reponame
stringlengths
2
39
files
list
median_score
float64
0
11.5
Maannniii
[ { "content": "'''**************************************** LIBRARIES ******************************************'''\nimport os\nfrom argparse import ArgumentParser\nfrom Queue import Queue\nfrom sys import argv,setrecursionlimit\n\n'''*************************************** GLOBAL VARIABLES ************************************'''\nsetrecursionlimit(3000) #if you get \"maximum recursion depth exceeded\" error enable this and set value > 1000 default value is 1000\nq=Queue()\nfile_dict={}\n\n'''*************************************** FUNCTIONS *******************************************'''\n# returns a list of files in given location\ndef files(pat):\n files=[x for x in os.listdir(pat) if not x.startswith(\".\") and os.path.isfile(os.path.join(pat,x))]\n return files\n\n# returns a list of directories in given location\ndef directories(pat):\n dirs=[x for x in os.listdir(pat) if not x.startswith(\".\") and os.path.isdir(os.path.join(pat,x))]\n return dirs\n\n# returns a list of hidden files in given location\ndef hid_fil(pat):\n hidden_files=[x for x in os.listdir(pat) if x.startswith(\".\") and os.path.isdir(os.path.join(pat,x))]\n return len(hidden_files)\n\n# returns a list of hidden directories in given location\ndef hid_dir(pat):\n hidden_dirs=[x for x in os.listdir(pat) if x.startswith(\".\") and os.path.isfile(os.path.join(pat,x))]\n return len(hidden_dirs)\n\n# main function to get files,dirs,hidden files,hidden dirs\ndef main(location):\n #global file_dict\n b=directories(location)\n for item in b:\n q.put(os.path.join(location,item))\n for fil in files(location):\n if fil not in file_dict.keys():\n file_dict.update({fil:os.path.join(location,fil)})\n elif type(file_dict[fil]).__name__ == \"str\":\n fi=[file_dict[fil]]\n fi.append(os.path.join(location,fil))\n file_dict.update({fil:set(fi)})\n else:\n fi=list(file_dict[fil])\n fi.append(os.path.join(location,fil))\n file_dict.update({fil:set(fi)})\n while not q.empty():\n main(q.get())\n\n# MAIN FUNCTION\nif __name__==\"__main__\":\n parser = ArgumentParser()\n parser.add_argument('-s','--search',help=\"File name to search for duplication\",required=False)\n parser.add_argument('-p','--path',help=\"Path to search for files defaults to PWD\",required=False,default=\".\")\n args=parser.parse_args()\n path=args.path\n if path == \".\":\n path=os.path.abspath(\".\")\n if path[0] ==\"/\" or path[0:2] == \"./\" or path[0:3]==\"../\" or path[0].isalpha():\n main(path)\n if args.search:\n print args.search\n for i in file_dict[args.search]:\n print \"\\t\",i\n exit(0)\n for i in file_dict.keys():\n if type(file_dict[i]).__name__ == \"set\":\n print i\n for j in file_dict[i]:\n print \"\\t\",j\n exit(0)\n print \"Congrats no duplicate files found\"\n", "id": "3459526", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "duplicate-finder.py" } ]
0
simone-romei
[ { "content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport datetime\nfrom scrapy.loader import ItemLoader\nfrom scrapy.spiders import Rule, CrawlSpider\nfrom scrapy.linkextractors import LinkExtractor\nfrom event.items import EventItem\n\nclass SalarazzmatazzSpider(scrapy.Spider):\n name = 'salarazzmatazz'\n\n allowed_domains = ['www.salarazzmatazz.com']\n\n start_urls = ['http://www.salarazzmatazz.com']\n\n def start_requests(self):\n date = datetime.date.today()\n \n # Search for SHOWs\n yield scrapy.Request(\n url=self.start_urls[0]+'/op/conciertos/%02d,%04d' % (date.month, date.year), \n callback=self.parseShows\n )\n\n def parseShows(self, response):\n self.logger.info('parse of shows %s', response.url)\n # Loop On events\n items = []\n events = response.selector.css('table.evento')\n for event in events:\n item = EventItem()\n item['name'] = \"\".join(event.css('td.sala a::text').extract()).encode('utf-8')\n item['url'] = self.start_urls[0] + \"\".join(event.css('td.sala a::attr(href)').extract()).encode('utf-8')\n item['imgList'] = self.start_urls[0] + \"\".join(event.css('td.inicial img::attr(src)').extract()).encode('utf-8')\n #item['zone'] = \"\".join(event.css('td.sala span:first-child ::text').extract()).encode('utf-8')\n item['dateStart'] = \"\".join(event.css('span.fecha ::text').extract()).encode(\"utf-8\")\n items.append(item)\n return items", "id": "3792018", "language": "Python", "matching_score": 2.033872365951538, "max_stars_count": 0, "path": "event-spider-scrapy/event/event/spiders/salarazzmatazz.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass EventItem(scrapy.Item): \n\t# Structure\n\tid = scrapy.Field()\n\t# What \n\tname = scrapy.Field()\n\turl = scrapy.Field()\n\timgList = scrapy.Field()\n\timgDetail = scrapy.Field()\n\t# When\n\tdateStart = scrapy.Field()\n\tdateEnd = scrapy.Field()\n\t# Where\n\tcountry = scrapy.Field()\n\tcity = scrapy.Field()\n\tlocality = scrapy.Field()\n\taddress = scrapy.Field()\n\tcoordinate = scrapy.Field()\n\tplace = scrapy.Field()\n\tzone = scrapy.Field()\n\t# Spider\n\tspiderName = scrapy.Field()\n\tspiderSource = scrapy.Field()\n\t# Extra\n\textras = scrapy.Field()\n", "id": "3241497", "language": "Python", "matching_score": 2.9248979091644287, "max_stars_count": 0, "path": "event-spider-scrapy/event/event/items.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pysolr\nfrom scrapy.exceptions import DropItem\n\n\nclass EventSpider(object):\n def process_item(self, item, spider):\n \titem['spiderName'] = spider.name\n item['spiderSource'] = spider.start_urls[0] \n return item\n\nclass EventValidator(object):\n def process_item(self, item, spider):\n \t# URL\n \tif 'url' not in item or not item['name'] or not item['name'].strip():\n \t\traise DropItem(\"Missing URL in %s\" % item)\n \t# Name\n \tif 'name' in item and item['name'] and item['name'].strip():\n \t\titem['name'] = item['name'].strip().lower()\n \telse:\n \t\traise DropItem(\"Missing name in %s\" % item)\n \t# Zone\n \tif 'zone' in item and item['zone'] and item['zone'].strip():\n \t\titem['zone'] = item['zone'].strip().lower()\n \treturn item\n\nclass EventSolrWriter(object):\n\tdef open_spider(self, spider):\n\t\tself.client = pysolr.Solr('http://192.168.99.100:8983/solr/eventCore', timeout=10)\n\n\tdef process_item(self, item, spider):\n\t\tself.client.add([{\n\t\t\t'id':item['url'],\n 'name_s':item['name'],\n 'url_s':item['url'],\n 'zone_s':item['zone']\n\t\t\t}])\n\n\n", "id": "9309931", "language": "Python", "matching_score": 1.962659478187561, "max_stars_count": 0, "path": "event-spider-scrapy/event/event/pipelines.py" } ]
2.033872
absoluteweapons
[ { "content": "import sys\nimport scrapy\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom urllib.parse import urlparse\nfrom urllib.parse import unquote\nimport re\nimport json\n\n# regex find query in body\ndef find_text(body, query):\n sQuery = re.escape(' '.join(unquote(query).split()))\n sBody = ' '.join(body.split())\n result = [r.start() for r in re.finditer(sQuery, sBody, re.IGNORECASE)]\n return result\n\nclass SearchTheSourceCode(scrapy.Spider):\n name = 'searchthesourcecode'\n\n def __init__(self, **kw):\n super(SearchTheSourceCode, self).__init__(**kw)\n url = kw.get('start')\n if not url.startswith('http://') and not url.startswith('https://'):\n url = 'http://%s/' % url\n self.url = url\n self.allowed_domains = [re.sub(r'^www\\.', '', urlparse(url).hostname)]\n\n def start_requests(self):\n yield scrapy.Request(self.url, callback=self.parse, errback=self.error)\n\n # error callback to handle inaccessible sites\n def error(self, failure):\n response = {\n \"error\": failure.value.response.status,\n \"body\": failure.value.response.body.decode(\"utf-8\")\n }\n print(json.dumps(response), file=sys.stderr)\n\n # parse page, search for query, output to json\n def parse(self, response):\n # build json entry\n url = response.url\n url_no_host = re.match(\n \"^(https?:\\/\\/[^:\\/\\s]+)(\\/.*)$\", response.url, re.IGNORECASE)\n body = ' '.join(response.text.split())\n matches = find_text(body, self.query)\n count = len(matches)\n if count > 0:\n snippet_start = max(0, matches[0] - 50)\n snippet_end = snippet_start + 100\n snippet = body[snippet_start:snippet_end]\n else:\n snippet = \"\"\n\n output = {\n 'url': url,\n 'hostless': url_no_host.group(2),\n 'matches': json.dumps(matches),\n 'count': count,\n 'snippet': snippet\n }\n\n # send to json file\n yield output\n\n # find links\n trending_links = LxmlLinkExtractor(\n allow=r'^http', deny=r'\\?', unique=True).extract_links(response)\n\n # follow links to next pages\n for next_page in trending_links:\n yield response.follow(next_page, self.parse)\n", "id": "388195", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "stsc/stsc/spiders/searchthesourcecode.py" } ]
0
sc1341
[ { "content": "#!/usr/bin/env python3\n# \n# Venmo-OSINT Tool\n# Created by sc1341\n\nimport argparse\nimport random\nimport requests\nimport os\nimport json\n\nfrom banner import banner\nfrom bs4 import BeautifulSoup\nfrom useragents import user_agents\n\n\nclass VenmoOSINT:\n\n\tdef __init__(self, username):\n\t\tself.username = username\n\t\tself.profile_data = {}\n\n\tdef scan_profile(self):\n\t\t\"\"\"Scans the target profile and returns the data\"\"\"\n\t\ttry:\n\t\t\tr = requests.get(f\"https://venmo.com/{self.username}\", \n\t\t\t\theaders={\"User-Agent\":random.choice(user_agents)})\n\t\texcept requests.exceptions.ConnectionError:\n\t\t\tprint(\"Error, unable to connect to host... check your network connection\")\n\t\t\treturn 1\n\n\t\tsoup = BeautifulSoup(r.text, \"html.parser\")\n\t\ttransactions = soup.find_all(\"div\", attrs={\"class\":\"single-payment content-wrap\"})\n\t\tprint(f\"{self.username} has {len(transactions)} public transactions\")\n\t\tfor i, transaction in enumerate(transactions):\n\t\t\tsend, recv = transaction.find_all(\"a\")\n\t\t\tsend, recv = send.getText(), recv.getText()\n\t\t\tmessage = transaction.find_all(\"div\", attrs={\"class\":\"paymentpage-text m_five_t\"})[0].getText()\n\t\t\tdate = transaction.find_all(\"div\", attrs={\"class\":\"date\"})[0].getText()\n\t\t\texport_message = f\"{send} paid {recv}{date} for {message}\"\n\t\t\tprint(export_message)\n\t\t\t# assign values in dictionary for output\n\t\t\tself.profile_data[str(i)] = {\"sender\":send,\n\t\t\t\t\t\t\t\t\t\"recipient\":recv,\n\t\t\t\t\t\t\t\t\t\"date\":date,\n\t\t\t\t\t\t\t\t\t\"exportMessage\":export_message\n\t\t\t\t\t\t\t\t\t}\n\tdef save_data(self, filename: str):\n\t\t\"\"\"Saves the data from the scan into a file\n\t\t:params: filename\n\t\t:return: none\n\t\t\"\"\"\n\t\ti = 0\n\t\twhile True:\n\t\t\tif (not os.path.exists(filename + str(i))):\n\t\t\t\twith open(f\"{filename}{i}.txt\", \"w\") as f:\n\t\t\t\t\tf.write(json.dumps(self.profile_data))\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ti += 1\n\n\ndef parse_args():\n\tparser = argparse.ArgumentParser(description=\"Venmo-OSINT Tool, created by sc1341\")\n\tparser.add_argument(\"--username\", help=\"Username\", required=True, nargs=1)\n\tparser.add_argument(\"--filename\", help=\"Output file name\", required=True, nargs=1)\n\treturn parser.parse_args()\n\ndef main():\n\targs = parse_args()\n\tprint(banner)\n\ta = VenmoOSINT(args.username[0])\n\ta.scan_profile()\n\ta.save_data(args.filename[0])\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n", "id": "802665", "language": "Python", "matching_score": 0, "max_stars_count": 24, "path": "main.py" }, { "content": "#!/usr/bin/env python3\n# Password analysis tool\n# https://github.com/sc1341\n\nimport matplotlib.pyplot as plt\nimport collections, statistics, argparse, re\n\n\ndef load_passwords(file: str, sep: str):\n\t\"\"\"\n\tReturns a list of passwords from a file\n\t\"\"\"\n\tpasswords = []\n\tfor cred in open(file, \"r\"):\n\t\tcred = cred.strip(\"\\n\")\n\t\t# Check to make sure sep exists so it doesn't split at the nothingth char\n\t\tif sep != '':\n\t\t\tcred = cred.split(sep)\n\t\t\tpasswords.append(cred[1])\n\t\telse:\n\t\t\tpasswords.append(cred)\n\treturn passwords\n\n\ndef in_list(creds:list, word_list: str):\n\t\"\"\"\n\tDetermines what passwords are found in the wordlist file, and the \n\tnumber of occurences. \n\n\tThis feature really isn't great. Takes absolute ages with rockyou.txt\n\t\"\"\"\n\trockyou = []\n\tfor word in creds:\n\t\tfor word2 in open(word_list):\n\t\t\tif word == word2.strip(\"\\n\"):\n\t\t\t\trockyou.append(word)\n\treturn collections.Counter(rockyou)\n\n\ndef in_list_graph(creds: dict, wordlist_name:str, title:str):\n\t\"\"\"\n\tCreates a bar graph of what password and how many times is it used in a wordlist\n\t\"\"\"\n\tdata = {x[0]:x[1] for x in c.most_common(10)} \n\tb = plt.bar(data.keys(), data.values())\n\tplt.xlabel(\"Password\")\n\tplt.ylabel(\"Number of occurences\")\n\tplt.title(f\"{title}\\nTop 10 common passwords found in {wordlist_name}\")\n\tplt.show()\n\ndef most_common_passwords(creds: list, num: int):\n\t\"\"\"\n\tReturns the top num most common passwords\n\t\"\"\"\n\treturn collections.Counter(creds).most_common(num)\n\ndef most_common_passwords_graph(creds: list, num: int):\n\t\"\"\"\n\tCreates a graph from the most common passwords\n\t\"\"\"\n\tc = collections.Counter(creds)\n\tdata = {x[0]:x[1] for x in c.most_common(num)} \n\t# I am not sure this really makes a difference or not with spacing... will check back on this\n\tb = plt.bar([' ' + x + ' ' for x in data.keys()], data.values(), align='center')\n\tplt.title(f\"Top {num} most common passwords\")\n\tplt.xlabel(\"Password\")\n\tplt.ylabel(\"Number of occurances\")\n\tplt.show()\n\ndef get_password_lengths(creds: list):\n\t\"\"\"\n\tDetermine how many passwords have what lengths\n\t\"\"\"\n\tlengths = {}\n\ts = 0\n\tfor p in creds:\n\t\tif len(p) not in lengths.keys():\n\t\t\tlengths[len(p)] = 1\n\t\telse:\n\t\t\tlengths[len(p)] += 1\n\t\ts += len(p)\n\t\n\t# The reason I didn't make this an orderdict or use Collections.Counter is I wanted a direct way to\n\t# find the average and median lengths\n\tdata = {\"average_length\": s/len(creds), \"median_length\": statistics.median([len(x) for x in creds]), \"lengths\":lengths}\n\treturn data\n\ndef graph_password_lengths(lengths: dict, show_median: bool, title:str):\n\t\"\"\"\n\tCreates and displays a bar graph showing password lengths and number of occurences. \n\t\"\"\"\n\tod = collections.OrderedDict(sorted(lengths['lengths'].items()))\n\tb = plt.bar(od.keys(), od.values())\n\t# Get start to end for graph ranges. Cannot use indexing on OD object :-(\n\tstart, *_, end = od.keys()\n\tplt.xticks([x for x in range(start, end+1)])\n\tplt.xlabel(\"Password length\")\n\tplt.ylabel(\"Number of passwords\")\n\tif show_median == True:\n\t\tplt.title(f\"{title}\\nTotal passwords cracked: {sum(lengths['lengths'].values())}\\nMedian password length: {lengths['median_length']}\")\n\tplt.show()\n\ndef pattern_detection(creds: list):\n\tpatterns = {\n\t\"Capitalized\":\"^[A-Z].*\",\n\t\"All uppercase\":\"[A-Z]*\",\n\t\"All lowercase\":\"[a-z]*\",\n\t\"Contains at least 1 special character\":'''.*[!@#$%^&*(),.?\":{}|<>; ].*''',\n\t\"Only digits\":\"[0-9]*\",\n\t\"4 characters\":\".{4,4}\",\n\t\"5 characters\":\".{5,5}\",\n\t\"6 characters\":\".{6,6}\",\n\t\"7 characters\":\".{7,7}\",\n\t\"8 characters\":\".{8,8}\",\n\t\"9 characters\":\".{9,9}\",\n\t\"10 characters\":\".{10,10}\",\n\t\"11 characters\":\".{11,11}\",\n\t\"12 characters and above\":\".{12,}\",\n\t\"Total\":\".*\",\n\t}\n\tfound = {x:0 for x in patterns.keys()}\n\tfor pattern, regex in patterns.items():\n\t\tfor word in creds:\n\t\t\tif re.fullmatch(regex, word):\n\t\t\t\tfound[pattern] += 1\n\treturn found\n\ndef format_output(data: dict, latex):\n\tif latex:\n\t\tfor key, value in data.items():\n\t\t\tprint(f\"{key}&{value}\\\\\\\\\")\n\telse:\n\t\tfor key, value in data.items():\n\t\t\tprint(f\"{key} : {value}\")\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Password analyzer\")\n parser.add_argument(\"--passwordfile\", help=\"File containing passwords to be analyzed\", required=True, nargs=1)\n parser.add_argument(\"--mostcommon\", help=\"Find the n most common passwords\", required=False, nargs=1, type=int)\n parser.add_argument(\"--lengths\", help=\"Displays a graph and prints out statistics about password lengths\", required=False, action='store_true')\n #parser.add_argument(\"-a\", help=\"Runs all analysis programs on the password list.\", required=False, action='store_true')\n #parser.add_argument(\"--wordlist\", help=\"Specify a commmon wordlist to be compared to password file to find weak passwords. Rockyou.txt is the default\", required=False, type=str, nargs=1)\n parser.add_argument(\"--showstats\", help=\"Show statistics such as median on the graphs if it applies\", required=False, action='store_true')\n parser.add_argument(\"--pattern\", help=\"Prints out pattern detection from the wordlist\", required=False, action=\"store_true\")\n parser.add_argument(\"--organization\", help=\"Specity an organization name for the title of each graph\", required=False, type=str)\n parser.add_argument(\"--latex\", help=\"Outputs in latex format in a table\", required=False, action='store_true')\n return parser.parse_args()\n\ndef main():\n\targs = parse_args()\n\tif args.organization == None:\n\t\targs.organization = ''\n\tpasswords = load_passwords(args.passwordfile[0], '')\n\tif args.lengths == True:\n\t\tdata = get_password_lengths(passwords)\n\t\tif args.showstats:\n\t\t\tgraph_password_lengths(data, True, args.organization)\n\t\telse:\n\t\t\tgraph_password_lengths(data, False, args.organization)\n\telif args.mostcommon != None:\n\t\tprint(most_common_passwords(passwords, args.mostcommon[0]))\n\t\tmost_common_passwords_graph(passwords, args.mostcommon[0], args.organization)\n\telif args.pattern != None:\n\t\tdata = pattern_detection(passwords)\n\t\tformat_output(data, args.latex)\n\nif __name__ == \"__main__\":\n\tmain()\n", "id": "1967682", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "pwd_analysis.py" } ]
0
chenzhuomit
[ { "content": "import math\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .resnet import ResNet\n\nclass NoShareConv2d(nn.Module):\n def __init__(self, input_size, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):\n super().__init__()\n if type(stride) == int:\n stride = (stride, stride)\n if type(padding) == int:\n padding = (padding, padding)\n if type(dilation) == int:\n dilation = (dilation, dilation)\n assert stride == (1, 1), 'only implemented stride is 1 case'\n assert dilation == (1, 1), 'only implemented dilation is 1 case'\n assert groups == 1, 'only implemented group is 1 case'\n assert padding_mode == 'zeros', 'only implemented zero padding'\n self.input_size = input_size\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = groups\n self.bias = bias\n self.padding_mode = padding_mode\n \n self.in_dim = in_channels * kernel_size**2\n self.length = (input_size + 2 * padding[0] - (kernel_size-1))\n self.out_dim = out_channels\n \n self.unfoldlayer = nn.Unfold(kernel_size, dilation, padding, stride)\n self.weight = nn.Parameter(torch.Tensor(self.length**2, self.in_dim, self.out_dim))\n if bias:\n self.bias = nn.Parameter(torch.Tensor(self.length**2, self.out_dim))\n else:\n self.bias = None\n self.reset_parameters()\n # self.fflayers = nn.ModuleList([\n # nn.Linear(self.in_dim, self.out_dim, bias) for i in range(self.length**2)\n # ])\n \n def forward(self, x):\n y = self.unfoldlayer(x).permute(2, 0, 1).contiguous()\n y = torch.bmm(y, self.weight)\n if self.bias is not None:\n y = y + self.bias.unsqueeze(1)\n y = y.permute(1, 2, 0).contiguous()\n return y.view(y.shape[0], self.out_channels, self.length, self.length)\n \n def reset_parameters(self):\n for i in range(self.length**2):\n nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5))\n if self.bias is not None:\n fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[i])\n bound = 1 / math.sqrt(fan_in)\n nn.init.uniform_(self.bias[i], -bound, bound)\n \n def extra_repr(self):\n s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n if self.padding_mode != 'zeros':\n s += ', padding_mode={padding_mode}'\n return s.format(**self.__dict__)\n \n \nclass NoShareResBlock(nn.Module):\n def __init__(self, input_size, hidden_channels, kernel_size, batch_norm=True, resnet=True, dropout=0.):\n super().__init__()\n self.input_size = input_size\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.padding = kernel_size // 2 if kernel_size % 2 == 1 else (kernel_size // 2, kernel_size // 2 - 1)\n self.batch_norm = batch_norm\n self.resnet = resnet\n self.dropout = dropout\n layers = []\n layers.append(NoShareConv2d(input_size, hidden_channels, hidden_channels, kernel_size, padding=self.padding))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n layers.append(NoShareConv2d(input_size, hidden_channels, hidden_channels, kernel_size, padding=self.padding))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n layers.append(NoShareConv2d(input_size, hidden_channels, hidden_channels, kernel_size, padding=self.padding))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n self.resblock = ResNet(nn.Sequential(*layers), resnet=resnet)\n \n def forward(self, x):\n return F.leaky_relu(self.resblock(x))\n \n def count_params(self):\n return sum(p.numel() for p in self.parameters())\n\nclass NoShareConvLayer(nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, batch_norm=True, pooling=True, resnet=True, dropout=0.):\n super().__init__()\n self.input_size1 = 32\n self.input_size2 = 16 if pooling else 32\n self.input_size3 = 8 if pooling else 32\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.out_channels = out_channels\n self.kernel_size1 = kernel_size1\n self.kernel_size2 = kernel_size2\n self.kernel_size3 = kernel_size3\n self.padding1 = kernel_size1 // 2\n self.padding3 = kernel_size3 // 2\n self.batch_norm = batch_norm\n self.pooling = pooling\n self.resnet = resnet\n self.dropout = dropout\n layers = []\n layers.append(NoShareConv2d(self.input_size1, in_channels, hidden_channels, self.kernel_size1, padding=self.padding1))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n if pooling:\n layers.append(nn.MaxPool2d(2))\n layers.append(NoShareResBlock(self.input_size2, hidden_channels, self.kernel_size2, batch_norm, resnet, dropout))\n if pooling:\n layers.append(nn.MaxPool2d(2))\n layers.append(NoShareConv2d(self.input_size3, hidden_channels, out_channels, self.kernel_size3, padding=self.padding3))\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n if pooling:\n layers.append(nn.MaxPool2d(2))\n self.net = nn.Sequential(*layers)\n \n def forward(self, x):\n return self.net(x)\n \n def count_params(self):\n return sum(p.numel() for p in self.parameters())\n \n \nclass NoShareConvNet(nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, intermediate_dim, nb_classes, batch_norm=True, pooling=True, resnet=True, dropout=0., convdropout=0.):\n super().__init__()\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.intermidiate_dim = intermediate_dim\n self.nb_classes = nb_classes\n self.batch_norm = batch_norm\n self.pooling = pooling\n self.resnet = resnet\n self.dropout = dropout\n self.convdropout = convdropout\n self.convlayer = NoShareConvLayer(in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, batch_norm, pooling, resnet, convdropout)\n if pooling:\n self.conv_out_dim = 4 * 4 * out_channels\n else:\n self.conv_out_dim = 32 * 32 * out_channels\n fflayer = []\n fflayer.append(nn.Linear(self.conv_out_dim, intermediate_dim))\n fflayer.append(nn.Dropout(dropout))\n fflayer.append(nn.LeakyReLU())\n fflayer.append(nn.Linear(intermediate_dim, nb_classes))\n self.fflayer = nn.Sequential(*fflayer)\n \n \n def forward(self, x):\n y = self.convlayer(x)\n y = y.view(y.shape[0], -1)\n return self.fflayer(y)\n \n def count_params(self):\n return sum(p.numel() for p in self.parameters())\n \n ", "id": "11861583", "language": "Python", "matching_score": 4.826777935028076, "max_stars_count": 0, "path": "model/no_share_conv.py" }, { "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .resnet import ResNet\n\nclass SharedResBlock(nn.Module):\n def __init__(self, hidden_channels, kernel_size, batch_norm=True, resnet=True, dropout=0.):\n super().__init__()\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.padding = kernel_size // 2 if kernel_size % 2 == 1 else (kernel_size // 2, kernel_size // 2 - 1)\n self.batch_norm = batch_norm\n self.resnet = resnet\n self.dropout = dropout\n layers = []\n layers.append(nn.Conv2d(hidden_channels, hidden_channels, kernel_size, padding=self.padding))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n layers.append(nn.Conv2d(hidden_channels, hidden_channels, kernel_size, padding=self.padding))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n layers.append(nn.Conv2d(hidden_channels, hidden_channels, kernel_size, padding=self.padding))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n self.resblock = ResNet(nn.Sequential(*layers), resnet=resnet)\n \n def forward(self, x):\n return F.leaky_relu(self.resblock(x))\n \n def count_params(self):\n return sum(p.numel() for p in self.parameters())\n\nclass SharedConvLayer(nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, batch_norm=True, pooling=True, resnet=True, dropout=0.):\n super().__init__()\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.kernel_size1 = kernel_size1\n self.kernel_size2 = kernel_size2\n self.kernel_size3 = kernel_size3\n self.padding1 = kernel_size1 // 2 if kernel_size1 % 2 == 1 else (kernel_size1 // 2, kernel_size1 // 2 - 1)\n self.padding3 = kernel_size3 // 2 if kernel_size1 % 2 == 1 else (kernel_size3 // 2, kernel_size3 // 2 - 1)\n self.batch_norm = batch_norm\n self.pooling = pooling\n self.resnet = resnet\n self.dropout = dropout\n layers = []\n layers.append(nn.Conv2d(in_channels, hidden_channels, self.kernel_size1, padding=self.padding1))\n if batch_norm:\n layers.append(nn.BatchNorm2d(hidden_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n if pooling:\n layers.append(nn.MaxPool2d(2))\n layers.append(SharedResBlock(hidden_channels, self.kernel_size2, batch_norm, resnet, dropout))\n if pooling:\n layers.append(nn.MaxPool2d(2))\n layers.append(nn.Conv2d(hidden_channels, out_channels, self.kernel_size3, padding=self.padding3))\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n layers.append(nn.Dropout(dropout))\n layers.append(nn.LeakyReLU())\n if pooling:\n layers.append(nn.MaxPool2d(2))\n self.net = nn.Sequential(*layers)\n \n def forward(self, x):\n return self.net(x)\n \n def count_params(self):\n return sum(p.numel() for p in self.parameters())\n \n \nclass SharedConvNet(nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, intermediate_dim, nb_classes, batch_norm=True, pooling=True, resnet=True, dropout=0., convdropout=0.):\n super().__init__()\n self.in_channels = in_channels\n self.hidden_channels = hidden_channels\n self.intermidiate_dim = intermediate_dim\n self.nb_classes = nb_classes\n self.batch_norm = batch_norm\n self.pooling = pooling\n self.resnet = resnet\n self.dropout = dropout\n self.convdropout = convdropout\n self.convlayer = SharedConvLayer(in_channels, hidden_channels, out_channels, kernel_size1, kernel_size2, kernel_size3, batch_norm, pooling, resnet, convdropout)\n if pooling:\n self.conv_out_dim = 4 * 4 * out_channels\n else:\n self.conv_out_dim = 32 * 32 * out_channels\n fflayer = []\n fflayer.append(nn.Linear(self.conv_out_dim, intermediate_dim))\n fflayer.append(nn.Dropout(dropout))\n fflayer.append(nn.LeakyReLU())\n fflayer.append(nn.Linear(intermediate_dim, nb_classes))\n self.fflayer = nn.Sequential(*fflayer)\n \n \n def forward(self, x):\n y = self.convlayer(x)\n y = y.view(y.shape[0], -1)\n return self.fflayer(y)\n \n def count_params(self):\n return sum(p.numel() for p in self.parameters())\n \n ", "id": "3852414", "language": "Python", "matching_score": 3.718604564666748, "max_stars_count": 0, "path": "model/shared_conv.py" }, { "content": "from argparse import ArgumentParser\nimport time\nfrom datetime import datetime\nimport os\nimport shutil\nimport logging\nimport random\nimport json\nimport mkl\nimport multiprocessing\nimport copy\n\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\nimport torch\nfrom torch import nn\nfrom torch import optim\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom model import DataLoader, SharedConvNet, NoShareConvNet\n\ndef get_config():\n parser = ArgumentParser()\n parser.add_argument(\"--hidden_channels\", type=int, default=101)\n parser.add_argument(\"--out_channels\", type=int, default=225)\n parser.add_argument(\"--kernel_size1\", type=int, default=3)\n parser.add_argument(\"--kernel_size2\", type=int, default=3)\n parser.add_argument(\"--kernel_size3\", type=int, default=3)\n parser.add_argument(\"--intermediate_dim\", type=int, default=512)\n parser.add_argument(\"--batch_norm\", type=int, default=1)\n parser.add_argument(\"--pooling\", type=int, default=1)\n parser.add_argument(\"--resnet\", type=int, default=1)\n parser.add_argument(\"--dropout\", type=float, default=0.5)\n parser.add_argument(\"--convdropout\", type=float, default=0.0)\n parser.add_argument(\"--model_name\", type=str, default='SharedConvNet')\n \n parser.add_argument(\"--batch_size\", type=int, default=200)\n parser.add_argument(\"--nb_epochs\", type=int, default=100)\n parser.add_argument(\"--lr\", type=float, default=1e-3)\n parser.add_argument(\"--gamma\", type=float, default=0.5)\n parser.add_argument(\"--lr_milestones\", type=str, default=\"800,1200,1800,2500,4000,6000,8000\")\n parser.add_argument(\"--save_dir\", type=str, default='none')\n parser.add_argument(\"--seed\", type=int, default=1) \n parser.add_argument(\"--float64\", type=int, default=0)\n parser.add_argument(\"--device\", type=str, default=\"cuda\")\n\n args = parser.parse_args()\n if args.save_dir.lower() == \"none\":\n args.save_dir='./results/'+args.model_name+'/'+now.strftime(\"%m-%d-%Y-%H-%M-%S\")+\\\n f'{args.hidden_channels}_{args.out_channels}_{args.kernel_size1}_{args.kernel_size2}_{args.kernel_size3}_{args.intermediate_dim}_{args.batch_norm}_{args.pooling}_{args.resnet}_{args.dropout}_{args.convdropout}'\n else:\n args.save_dir = \"./results/\" + args.save_dir\n\n if args.float64:\n torch.set_default_dtype(torch.float64)\n else:\n torch.set_default_dtype(torch.float32)\n\n os.makedirs(args.save_dir, exist_ok=True)\n with open(args.save_dir + '/config.json', 'w') as f:\n json.dump(args.__dict__, f, indent=2)\n file_handler = logging.FileHandler(os.path.join(args.save_dir, \"log.txt\"), mode=\"w\")\n if args.save_dir:\n for filename in os.listdir('./'):\n if '.sh' in filename or \\\n '.swb' in filename or \\\n '.py' in filename:\n if filename == '.pylint.d':\n continue\n if '__pycache__' in filename:\n continue\n shutil.copy(filename, args.save_dir)\n shutil.copytree('./model', args.save_dir+'/model', dirs_exist_ok=True, ignore=shutil.ignore_patterns('*__pycache__*'))\n logger.addHandler(file_handler)\n\n return args\n\ndef set_seed(seed):\n \"\"\"set random seed\n \"\"\"\n logging.info(f'random {seed=}')\n torch.backends.cudnn.deterministic = False\n torch.backends.cudnn.benchmark = True\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\nif __name__ == '__main__':\n\n logger = logging.getLogger()\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n now = datetime.now()\n \n config = get_config()\n set_seed(config.seed)\n \n device = torch.device(config.device) if torch.cuda.is_available() else torch.device('cpu')\n \n loss_writer = open(config.save_dir+\"/loss.txt\", 'a+')\n accu_writer = open(config.save_dir+\"/accuracy.txt\", 'a+')\n \n dataloader = DataLoader(config.batch_size)\n trainloader, testloader, classes = dataloader()\n \n model = globals()[config.model_name](3, config.hidden_channels, config.out_channels,\n config.kernel_size1, config.kernel_size2, config.kernel_size3,\n config.intermediate_dim, 10, config.batch_norm, \n config.pooling, config.resnet, config.dropout, config.convdropout)\n logging.info(f'number of parameters: {model.count_params()}')\n model = model.to(device)\n model.train()\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.AdamW(model.parameters(), config.lr)\n scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=list(map(int, config.lr_milestones.split(','))), gamma=config.gamma)\n \n for epoch in range(config.nb_epochs):\n model.train()\n running_loss = 0.\n for i, data in enumerate(trainloader):\n inputs, labels = data\n optimizer.zero_grad()\n outputs = model(inputs.to(device))\n loss = criterion(outputs, labels.to(device))\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item()\n if (i+1) % 25 == 0:\n logging.info(f'epoch={epoch+1}, i={i+1}, loss={running_loss/25}')\n for item in [epoch+1, i+1, running_loss]:\n loss_writer.write(\"%s \" % item)\n loss_writer.write('\\n')\n running_loss = 0.0\n \n model.eval()\n \n train_correct = 0\n train_total = 0.\n \n with torch.no_grad():\n for i, data in enumerate(trainloader):\n if i == 10000:\n break\n images, labels = data\n outputs = model(images.to(device))\n _, predicted = torch.max(outputs.data, 1)\n train_total += labels.shape[0]\n train_correct += (predicted == labels.to(device)).sum().item()\n train_accu = train_correct / train_total\n logging.info(f'epoch={epoch+1}, train accuracy={train_accu * 100}%')\n \n test_correct = 0\n test_total = 0.\n \n with torch.no_grad():\n for data in testloader:\n images, labels = data\n outputs = model(images.to(device))\n _, predicted = torch.max(outputs.data, 1)\n test_total += labels.shape[0]\n test_correct += (predicted == labels.to(device)).sum().item()\n test_accu = test_correct / test_total\n logging.info(f'epoch={epoch+1}, test accuracy={test_accu * 100}%')\n \n for item in [epoch+1, train_accu, test_accu]:\n accu_writer.write(\"%s \" % item)\n accu_writer.write('\\n')\n \n loss_writer.flush()\n accu_writer.flush()\n \n torch.save(model.state_dict(), config.save_dir+\"/model.pt\")\n \n \n\n", "id": "5364902", "language": "Python", "matching_score": 2.9445323944091797, "max_stars_count": 0, "path": "main.py" }, { "content": "from collections import namedtuple\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\n\nclass DataLoader:\n def __init__(self, batch_size):\n self.batch_size = batch_size\n \n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n \n self.trainset = torchvision.datasets.CIFAR10(root='./data', train=True,\n download=True, transform=transform)\n self.trainloader = torch.utils.data.DataLoader(self.trainset, batch_size=batch_size,\n shuffle=True, num_workers=2)\n \n self.testset = torchvision.datasets.CIFAR10(root='./data', train=False,\n download=True, transform=transform)\n self.testloader = torch.utils.data.DataLoader(self.testset, batch_size=batch_size,\n shuffle=False, num_workers=2)\n \n self.classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n \n def __call__(self):\n dataloader = namedtuple('dataloader', ['trainloader', 'testloader', 'classes'])\n return dataloader(self.trainloader, self.testloader, self.classes)", "id": "3240300", "language": "Python", "matching_score": 0.38710907101631165, "max_stars_count": 0, "path": "model/dataloader.py" }, { "content": "from .dataloader import *\nfrom .resnet import *\nfrom .shared_conv import *\nfrom .no_share_conv import *", "id": "8804971", "language": "Python", "matching_score": 0.24618752300739288, "max_stars_count": 0, "path": "model/__init__.py" }, { "content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass ResNet(nn.Module):\n def __init__(self, net, resnet=True):\n super().__init__()\n self.net = net\n self.resnet = resnet\n def forward(self, x):\n if self.resnet:\n identity = x\n else:\n identity = 0.\n return self.net(x) + identity", "id": "1010123", "language": "Python", "matching_score": 0.04947603493928909, "max_stars_count": 0, "path": "model/resnet.py" } ]
1.665821
aryan0078
[ { "content": "import requests\nimport wget\nimport pafy as pa\nfrom bs4 import BeautifulSoup as soup\ndef name_converter(x):\n\tp=requests.get('https://www.youtube.com/results?search_query={}'.format('+'.join(x.split())))\n\tsoupp=soup(p.text,'html.parser')\n\tlis = soupp.findAll('a', attrs={'class':'yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 yt-uix-sessionlink spf-link'})\n\to=str(lis)[220:280]\n\timport re\n\ta = re.search(r'\\b(watch)\\b',o)\n\treturn 'https://www.youtube.com/watch?v={}'.format(o[a.start()+8:a.start()+19])\n\ndef downloadlinks(x,mp3=False):\n\tv= pa.new(x)\n\t\n\tif mp3==True:\n\t\ts=v.getbestaudio()\n\t\treturn s.url\n\ts = v.getbestvideo()\n\t#p={}\n\t#for s in streams:\n\t\t#p.update({s.resolution:s.url})\n\treturn s.url\n\n", "id": "6772709", "language": "Python", "matching_score": 3.8131790161132812, "max_stars_count": 2, "path": "downloader.py" }, { "content": "import os\nfrom downloader import *\nimport pafy as p\nimport os\nclass Mtube:\n def __init__(self,name):\n try:\n os.chdir('./songs')\n if len(os.listdir())>10:\n self.cleaner()\n self.name=name\n self.songurl=self.name_converter(name)\n self.filename=self.namel()\n except:\n if len(os.listdir())>10:\n self.cleaner()\n self.name=name\n self.songurl=self.name_converter(name)\n self.filename=self.namel()\n #print(self.filename)\n def name_converter(self,x):\n p=requests.get('https://www.youtube.com/results?search_query={}'.format('+'.join(x.split())))\n soupp=soup(p.text,'html.parser')\n lis = soupp.findAll('a', attrs={'class':'yt-uix-tile-link yt-ui-ellipsis yt-ui-ellipsis-2 yt-uix-sessionlink spf-link'})\n o=str(lis)[220:280]\n import re\n a = re.search(r'\\b(watch)\\b',o)\n return 'https://www.youtube.com/watch?v={}'.format(o[a.start()+8:a.start()+19])\n def converter(self):\n from pydub import AudioSegment\n \n for i in os.listdir():\n if i.endswith('webm'):\n AudioSegment.from_file(i).export(i[:-5]+'.mp3', format=\"mp3\")\n os.remove(i)\n def d(self):\n video=p.new(self.songurl)\n best=video.getbestaudio()\n print(\"########DOWNLOADING SONG#########\")\n best.download()\n return video.title\n def m(self):\n if self.check():\n return self.filename\n self.d()\n self.converter()\n return self.filename\n def namel(self):\n video=p.new(self.songurl)\n best=video.getbestaudio()\n return video.title+'.mp3'\n def stream(self):\n video=p.new(self.songurl)\n best=video.getbestaudio()\n return best.url\n def cleaner(self):\n for i in os.listdir():\n if i.endswith('mp3'):\n os.remove(i)\n def check(self):\n for i in os.listdir():\n #print(i)\n if i==self.filename:\n return True\n\n \n ", "id": "11392190", "language": "Python", "matching_score": 1.809112787246704, "max_stars_count": 2, "path": "converter.py" }, { "content": "from flask import Flask,redirect, url_for, request, render_template,send_file\nfrom downloader import *\nfrom converter import *\napp=Flask(__name__)\nimport os\[email protected]('/')\ndef main():\n\t\n\treturn render_template('index.html')\[email protected]('/download',methods=['POST','GET'])\ndef download():\n\tif request.method=='POST':\n\t\tname=request.form['name']\n\t\tquality=request.form['service']\n\t\t#print(name)\n\t\t\n\t\tif quality=='mp3':\n\t\t\tmtube=Mtube(name)\n\t\t\tfilename=mtube.m()\n\t\t\treturn render_template(\"download.html\",sr=mtube.stream(),fn=filename)\n\t\telse:\n\t\t\tlink=downloadlinks(name_converter(name))\n\t\t\treturn redirect(link)\[email protected]('/multi')\ndef multi():\n\tif request.method=='POST':\n\t\tname=request.form['name']\n\t\t#print(name)\n\t\treturn render_template(\"download.html\",sr=stream(name))\n\[email protected]('/return-files/<name>')\ndef return_files_tut(name):\n\ttry:\n\t\tf=os.getcwd()+'/'+name\n\t\treturn send_file(f, attachment_filename=f,as_attachment=True)\n\texcept Exception as e:\n\t\treturn str(e)\n\napp.run()", "id": "1633727", "language": "Python", "matching_score": 0.6323533058166504, "max_stars_count": 2, "path": "main.py" } ]
1.809113
schavali02
[ { "content": "'''\nCreated on Aug 4, 2020\n\n@author: shashankchavali\n'''\nimport textanalysismodule as s\n\nprint(s.sentiment(\"This movie was great.\"))\nprint(s.sentiment(\"This was a horrible movie.\"))", "id": "4625939", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "textanalysis.py" }, { "content": "#File: textanalysismodule.py\n\n\nimport nltk\nimport random\nfrom nltk.corpus import movie_reviews\nfrom nltk.classify.scikitlearn import SklearnClassifier\nimport pickle\n\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\n\nfrom nltk.classify import ClassifierI\nfrom statistics import mode\n\nfrom nltk.tokenize import word_tokenize\n\n\nclass VoteClassifier(ClassifierI):\n def __init__(self, *classifiers):\n self._classifiers = classifiers\n\n def classify(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n\n def confidence(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n \nshort_pos = open(\"/Users/shashankchavali/Documents/positive.txt\",\"r\", encoding='ISO-8859-1').read()\nshort_neg = open(\"/Users/shashankchavali/Documents/negative.txt\",\"r\", encoding='ISO-8859-1').read()\n\ndocuments = []\nall_words = []\n\nallowed_word_types = [\"J\"]\n\nfor p in short_pos.split('\\n'):\n documents.append( (p, \"pos\") )\n words = word_tokenize(p)\n pos = nltk.pos_tag(words)\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\n \nfor p in short_neg.split('\\n'):\n documents.append( (p, \"neg\") )\n words = word_tokenize(p)\n pos = nltk.pos_tag(words)\n for w in pos:\n if w[1][0] in allowed_word_types:\n all_words.append(w[0].lower())\n\n\ndocument_pickle = open(\"documents.pickle\", \"rb\")\ndocuments = pickle.load(document_pickle)\ndocument_pickle.close()\n\nall_words = nltk.FreqDist(all_words)\n\nword_features = list(all_words.keys())[:5000]\n\n\nwf_pickle = open(\"wf.pickle\", \"rb\")\nword_features = pickle.load(wf_pickle)\nwf_pickle.close()\n\ndef find_features(document):\n words = word_tokenize(document)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features\n\n#print((find_features(movie_reviews.words('neg/cv000_29416.txt'))))\n\nfeaturesets = [(find_features(rev), category) for (rev, category) in documents]\n\nrandom.shuffle(featuresets)\n\ntraining_set = featuresets[:10000]\ntesting_set = featuresets[10000:]\n\n\n\n\nclassifier = nltk.NaiveBayesClassifier.train(training_set)\nclassifier1 = open(\"naivebayes.pickle\", \"rb\")\nclassifier = pickle.load(classifier1)\nclassifier1.close()\n\n\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(training_set)\nclassifier2 = open(\"MNB.pickle\", \"rb\")\nMNB_classifier = pickle.load(classifier2)\nclassifier2.close()\n\n\nBernoulliNB_classifier = SklearnClassifier(BernoulliNB())\nBernoulliNB_classifier.train(training_set)\nclassifier3 = open(\"Bernoulli.pickle\", \"rb\")\nBernoulliNB_classifier = pickle.load(classifier3)\nclassifier3.close()\n\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(training_set)\nclassifier4 = open(\"LogReg.pickle\", \"rb\")\nLogisticRegression_classifier = pickle.load(classifier4)\nclassifier4.close()\n\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(training_set)\nclassifier6 = open(\"LinearSVC.pickle\", \"rb\")\nLinearSVC_classifier = pickle.load(classifier6)\nclassifier6.close()\n\n\nvoted_classifier = VoteClassifier(\n LinearSVC_classifier,\n MNB_classifier,\n BernoulliNB_classifier,\n LogisticRegression_classifier)\n\n\ndef sentiment(text):\n feats = find_features(text)\n return voted_classifier(feats),voted_classifier.confidence(feats)", "id": "942066", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "textanalysismodule.py" } ]
0
buut-vrij
[ { "content": "\"\"\"\nSupport for S0PCM pulse meter.\n\nFor more details about this platform, please refer to the documentation at\nhttps://home-assistant.io/components/sensor.s0pcm/\n\"\"\"\nfrom time import gmtime, time\n\nfrom serial import Serial\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers.entity import Entity\n\n__version__ = '1'\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Optional('device', default='/dev/ttyACM0'): cv.string,\n vol.Optional('channels', default=[0]):\n vol.All(cv.ensure_list, [0, 1, 2, 3, 4])\n})\n\n\ndef setup_platform(hass, config, add_devices, discovery_info=None):\n \"\"\"Set up the S0PCM platform.\"\"\"\n s0pcmReader = S0pcmSerialReader(config['device'], 9600)\n # Anyone a suggestion how to do this without reader being added as device?\n add_devices([s0pcmReader])\n # Create and add channel entities\n s0pcmChannels = [S0pcmChannel(channel, s0pcmReader)\n for channel in config['channels']]\n add_devices(s0pcmChannels)\n # Create and add channel (hourly count) entities\n add_devices([S0pcmChannelHourly(s0pcmChannel)\n for s0pcmChannel in s0pcmChannels])\n\n\nclass S0pcmSerialReader(Entity):\n \"\"\"Get data from s0pcm serial device.\"\"\"\n\n def __init__(self, device, baudrate):\n \"\"\"Initialize the data object.\"\"\"\n self._counts = {key: None for key in range(0, 5)}\n self._serial = Serial(device, baudrate)\n\n def __del__(self):\n \"\"\"Close serial connection.\"\"\"\n self._serial.close()\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return \"S0PCM serial reader\"\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return None\n\n def GetCount(self, channel):\n \"\"\"Return the counters per S0PCM channel.\"\"\"\n return self._counts[channel]\n\n def update(self):\n \"\"\"Update and parse buffered serial lines.\"\"\"\n while (self._serial.inWaiting() > 0):\n data = str(self._serial.readline()).split(':')\n if len(data) == 19:\n self._counts[0] = data[6]\n self._counts[1] = data[9]\n self._counts[2] = data[12]\n self._counts[3] = data[15]\n self._counts[4] = data[18].strip(\"\\\\r\\\\n'\")\n\n\nclass S0pcmChannel(Entity):\n \"\"\"Publish total count of s0pcm pulse meter channel.\"\"\"\n\n def __init__(self, channel, s0pcmReader):\n \"\"\"Initialize the data object.\"\"\"\n self._channel = channel\n self._s0pcmReader = s0pcmReader\n self.update()\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return \"S0PCM channel {}\".format(self._channel)\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._state\n\n def update(self):\n \"\"\"Update counter value.\"\"\"\n self._state = self._s0pcmReader.GetCount(self._channel)\n\n\nclass S0pcmChannelHourly(Entity):\n \"\"\"Publish hourly count increment of s0pcm pulse meter channel.\"\"\"\n\n def __init__(self, s0pcmChannel):\n \"\"\"Initialize the data object.\"\"\"\n self._s0pcmChannel = s0pcmChannel\n self._last_update = time()\n self._state = None\n self._previousState = None\n self.update()\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return \"{} (hourly)\".format(self._s0pcmChannel.name)\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n return self._state\n\n def _getHour(self, timestamp):\n \"\"\"Return hour (24h) of timestamp.\"\"\"\n return gmtime(timestamp).tm_hour\n\n def update(self):\n \"\"\"Update counter value each hour.\"\"\"\n if (self._getHour(time()) != self._getHour(self._last_update)):\n if self._s0pcmChannel.state:\n currentState = int(self._s0pcmChannel.state)\n if self._previousState:\n self._state = currentState - self._previousState\n self._last_update = time()\n self._previousState = currentState\n", "id": "11399140", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "config/custom_components/sensor/s0pcm.py" } ]
0
kenfmcoin
[ { "content": "#!/usr/bin/env python3\n# Copyright (c) 2017 The KenFMcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test kenfmcoin-cli\"\"\"\nfrom test_framework.test_framework import KenFMcoinTestFramework\nfrom test_framework.util import assert_equal\n\nclass TestKenFMcoinCli(KenFMcoinTestFramework):\n\n def set_test_params(self):\n self.setup_clean_chain = True\n self.num_nodes = 1\n\n def run_test(self):\n \"\"\"Main test logic\"\"\"\n\n self.log.info(\"Compare responses from getinfo RPC and `kenfmcoin-cli getinfo`\")\n cli_get_info = self.nodes[0].cli.getinfo()\n rpc_get_info = self.nodes[0].getinfo()\n\n assert_equal(cli_get_info, rpc_get_info)\n\nif __name__ == '__main__':\n TestKenFMcoinCli().main()\n", "id": "9863484", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "test/functional/kenfmcoin_cli.py" } ]
0
vildasiurblyte
[ { "content": "quote_list = [ \r\n \"I don't know why you say goodbye\",\r\n \"I say hello\",\r\n \"It's a blackberry curve\",\r\n \"My son got a new smart phone...\",\r\n \"Do, not due.\",\r\n \"Life isn't about getting and having, it's about giving and being.\",\r\n \"Whatever the mind of man can conceive and believe, it can achieve.\",\r\n \"Strive not to be a success, but rather to be of value.\",\r\n \"Two roads diverged in a wood, and I—I took the one less traveled by, And that has made all the difference.\",\r\n \"I attribute my success to this: I never gave or took any excuse.\",\r\n \"You miss 100% of the shots you don't take.\",\r\n \"I've missed more than 9000 shots in my career. I've lost almost 300 games. 26 times I've been trusted to take the game winning shot and missed. I've failed over and over and over again in my life. And that is why I succeed.\",\r\n \"The most difficult thing is the decision to act, the rest is merely tenacity.\",\r\n \"Every strike brings me closer to the next home run.\",\r\n \"Definiteness of purpose is the starting point of all achievement.\",\r\n \"We must balance conspicuous consumption with conscious capitalism.\",\r\n \"Life is what happens to you while you're busy making other plans.\",\r\n \"We become what we think about.\",\r\n \"Twenty years from now you will be more disappointed by the things that you didn't do than by the ones you did do, so throw off the bowlines, sail away from safe harbor, catch the trade winds in your sails. Explore, Dream, Discover.\",\r\n \"Life is 10% what happens to me and 90% of how I react to it.\",\r\n \"The most common way people give up their power is by thinking they don't have any.\",\r\n \"The mind is everything. What you think you become.\",\r\n \"The best time to plant a tree was 20 years ago. The second best time is now.\",\r\n \"An unexamined life is not worth living.\",\r\n \"Take a break bud. Typing is tiring!\",\r\n \"Eighty percent of success is showing up.\",\r\n \"Thanks for helping out with hacktober!\",\r\n \"Your time is limited, so don't waste it living someone else's life.\",\r\n \"Winning isn't everything, but wanting to win is.\",\r\n \"I am not a product of my circumstances. I am a product of my decisions.\",\r\n \"Every child is an artist. The problem is how to remain an artist once he grows up.\",\r\n \"You can never cross the ocean until you have the courage to lose sight of the shore.\",\r\n \"I've learned that people will forget what you said, people will forget what you did, but people will never forget how you made them feel.\",\r\n \"Either you run the day, or the day runs you.\",\r\n \"Whether you think you can or you think you can't, you're right.\",\r\n \"The two most important days in your life are the day you are born and the day you find out why.\",\r\n \"Whatever you can do, or dream you can, begin it. Boldness has genius, power and magic in it.\",\r\n \"The best revenge is massive success.\",\r\n \"People often say that motivation doesn't last. Well, neither does bathing. That's why we recommend it daily.\",\r\n \"Life shrinks or expands in proportion to one's courage.\",\r\n \"If you hear a voice within you say “you cannot paint,” then by all means paint and that voice will be silenced.\",\r\n \"There is only one way to avoid criticism: do nothing, say nothing, and be nothing.\",\r\n \"Ask and it will be given to you; search, and you will find; knock and the door will be opened for you.\",\r\n \"The only person you are destined to become is the person you decide to be.\",\r\n \"Go confidently in the direction of your dreams. Live the life you have imagined.\",\r\n \"When I stand before God at the end of my life, I would hope that I would not have a single bit of talent left and could say, I used everything you gave me.\",\r\n \"Few things can help an individual more than to place responsibility on him, and to let him know that you trust him.\",\r\n \"Certain things catch your eye, but pursue only those that capture the heart.\",\r\n \"Believe you can and you're halfway there.\",\r\n \"Everything you've ever wanted is on the other side of fear.\",\r\n \"We can easily forgive a child who is afraid of the dark; the real tragedy of life is when men are afraid of the light.\",\r\n \"Teach thy tongue to say, “I do not know,” and thous shalt progress.\",\r\n \"Start where you are. Use what you have. Do what you can.\",\r\n \"When I was 5 years old, my mother always told me that happiness was the key to life. When I went to school, they asked me what I wanted to be when I grew up. I wrote down ‘happy'. They told me I didn't understand the assignment, and I told them they didn't understand life.\",\r\n \"Fall seven times and stand up eight.\",\r\n \"When one door of happiness closes, another opens, but often we look so long at the closed door that we do not see the one that has been opened for us.\",\r\n \"Everything has beauty, but not everyone can see.\",\r\n \"How wonderful it is that nobody need wait a single moment before starting to improve the world.\",\r\n \"When I let go of what I am, I become what I might be.\",\r\n \"Life is not measured by the number of breaths we take, but by the moments that take our breath away.\",\r\n \"Happiness is not something readymade. It comes from your own actions.\",\r\n \"If you're offered a seat on a rocket ship, don't ask what seat! Just get on.\",\r\n \"First, have a definite, clear practical ideal; a goal, an objective. Second, have the necessary means to achieve your ends; wisdom, money, materials, and methods. Third, adjust all your means to that end.\",\r\n \"If the wind will not serve, take to the oars.\",\r\n \"You can't fall if you don't climb. But there's no joy in living your whole life on the ground.\",\r\n \"We must believe that we are gifted for something, and that this thing, at whatever cost, must be attained.\",\r\n \"Too many of us are not living our dreams because we are living our fears.\",\r\n \"Challenges are what make life interesting and overcoming them is what makes life meaningful.\",\r\n \"If you want to lift yourself up, lift up someone else.\",\r\n \"I have been impressed with the urgency of doing. Knowing is not enough; we must apply. Being willing is not enough; we must do.\",\r\n \"Limitations live only in our minds. But if we use our imaginations, our possibilities become limitless.\",\r\n \"You take your life in your own hands, and what happens? A terrible thing, no one to blame.\",\r\n \"What's money? A man is a success if he gets up in the morning and goes to bed at night and in between does what he wants to do.\",\r\n \"I didn't fail the test. I just found 100 ways to do it wrong.\",\r\n \"In order to succeed, your desire for success should be greater than your fear of failure.\",\r\n \"A person who never made a mistake never tried anything new.\",\r\n \"The person who says it cannot be done should not interrupt the person who is doing it.\",\r\n \"There are no traffic jams along the extra mile.\",\r\n \"It is never too late to be what you might have been.\",\r\n \"You become what you believe.\",\r\n \"I would rather die of passion than of boredom.\",\r\n \"A truly rich man is one whose children run into his arms when his hands are empty.\",\r\n \"It is not what you do for your children, but what you have taught them to do for themselves, that will make them successful human beings.\",\r\n \"If you want your children to turn out well, spend twice as much time with them, and half as much money.\",\r\n \"Build your own dreams, or someone else will hire you to build theirs.\",\r\n \"The battles that count aren't the ones for gold medals. The struggles within yourself–the invisible battles inside all of us–that's where it's at.\",\r\n \"Education costs money. But then so does ignorance.\",\r\n \"I have learned over the years that when one's mind is made up, this diminishes fear.\",\r\n \"It does not matter how slowly you go as long as you do not stop.\",\r\n \"If you look at what you have in life, you'll always have more. If you look at what you don't have in life, you'll never have enough.\",\r\n \"Remember that not getting what you want is sometimes a wonderful stroke of luck.\",\r\n \"You can't use up creativity. The more you use, the more you have.\",\r\n \"Dream big and dare to fail.\",\r\n \"Our lives begin to end the day we become silent about things that matter.\",\r\n \"Do what you can, where you are, with what you have.\",\r\n \"If you do what you've always done, you'll get what you've always gotten.\",\r\n \"Dreaming, after all, is a form of planning.\",\r\n \"It's your place in the world; it's your life. Go on and do all you can with it, and make it the life you want to live.\",\r\n \"You may be disappointed if you fail, but you are doomed if you don't try.\",\r\n \"Remember no one can make you feel inferior without your consent.\",\r\n \"Life is what we make it, always has been, always will be.\",\r\n \"The question isn't who is going to let me; it's who is going to stop me.\",\r\n \"When everything seems to be going against you, remember that the airplane takes off against the wind, not with it.\",\r\n \"It's not the years in your life that count. It's the life in your years.\",\r\n \"Change your thoughts and you change your world.\",\r\n \"Either write something worth reading or do something worth writing.\",\r\n \"Nothing is impossible, the word itself says, “I'm possible!”\",\r\n \"The only way to do great work is to love what you do.\",\r\n \"If you can dream it, you can achieve it.\",\r\n \"History doesn't repeat itself but it often rhymes\",\r\n \"pleasure satisfaction is the most superficial satisfaction of life and therefore this is very easy to achieve and very easy to disappear.\",\r\n \"If we are not willing to fail we are also not willing to succeed\",\r\n \"Fear of failure most come from choosing bad values\",\r\n \"If someone is better than you about something, it looks like it's because he has experienced more failures than you.\",\r\n \"Some of the greatest moments of human life are unpleasant, unsuccessful, unrecognized, and not positive.\",\r\n \"A man can never be a big man without a great woman by his side who always gives support and hope in every step and decision taken.\",\r\n \"Without love intelligence is dangerous, and without love intelligence is not enough.\",\r\n \"Experience cannot be learned, but must be passed.\",\r\n \"Prioritize real work, not image.\",\r\n \"Learn to be grateful from the good things in your life and learn to be strong from the bad things in your life.\",\r\n \"Failure only happens when we give up.\",\r\n \"If you have decided to pursue a field, be a consistent person. That is the real key to success.\",\r\n \"Programs must be written for people to read, and only incidentally for machines to execute.\",\r\n \"Sine squared theta plus cosine squared theta equals one.\",\r\n \"Failure is a step towards success.\",\r\n]\r\n", "id": "2577472", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "quotes_list.py" } ]
0
qacwnfq
[ { "content": "import numpy as np\nimport astropy.io.ascii as ascii\nimport matplotlib.pyplot as plt\n\npyout = ascii.read('test.pyout')\nidlout = ascii.read('test.idlout')\n\nfig, axarr = plt.subplots(4, 2, figsize=(10, 10))\nfig.suptitle(\"python\")\naxarr[0,0].plot(pyout['alpha'])\naxarr[0,0].set_ylabel('alpha')\n\naxarr[0,1].plot(pyout['beta'])\naxarr[0,1].set_ylabel('beta')\n\naxarr[1,0].plot(pyout['sigsqr'])\naxarr[1,0].set_ylabel('sigsqr')\n\naxarr[1,1].plot(pyout['mu0'])\naxarr[1,1].set_ylabel('mu0')\n\naxarr[2,0].plot(pyout['usqr'])\naxarr[2,0].set_ylabel('usqr')\n\naxarr[2,1].plot(pyout['wsqr'])\naxarr[2,1].set_ylabel('wsqr')\n\naxarr[3,0].plot(pyout['ximean'])\naxarr[3,0].set_ylabel('ximean')\n\naxarr[3,1].plot(pyout['xisig'])\naxarr[3,1].set_ylabel('xisig')\n\n\nfig, axarr = plt.subplots(4, 2, figsize=(10, 10))\nfig.suptitle(\"IDL\")\naxarr[0,0].plot(idlout['alpha'])\naxarr[0,0].set_ylabel('alpha')\n\naxarr[0,1].plot(idlout['beta'])\naxarr[0,1].set_ylabel('beta')\n\naxarr[1,0].plot(idlout['sigsqr'])\naxarr[1,0].set_ylabel('sigsqr')\n\naxarr[1,1].plot(idlout['mu00'])\naxarr[1,1].set_ylabel('mu00')\n\naxarr[2,0].plot(idlout['usqr'])\naxarr[2,0].set_ylabel('usqr')\n\naxarr[2,1].plot(idlout['wsqr'])\naxarr[2,1].set_ylabel('wsqr')\n\naxarr[3,0].plot(idlout['ximean'])\naxarr[3,0].set_ylabel('ximean')\n\naxarr[3,1].plot(idlout['xisig'])\naxarr[3,1].set_ylabel('xisig')\n\nplt.show()\n", "id": "1790994", "language": "Python", "matching_score": 2.936736822128296, "max_stars_count": 46, "path": "tests/chain_plot.py" }, { "content": "import numpy as np\nimport corner\nimport astropy.io.ascii as ascii\nimport matplotlib.pyplot as plt\n\npyout = ascii.read('test.pyout')\nidlout = ascii.read('test.idlout')\n\nfig, axarr = plt.subplots(9, 9, figsize=(10, 10))\nfig.suptitle(\"Black = python, red = IDL\")\ncorner.corner(np.array([pyout['alpha'], pyout['beta'], pyout['sigsqr'],\n pyout['mu0'], pyout['usqr'], pyout['wsqr'],\n pyout['ximean'], pyout['xisig'], pyout['corr']]).T,\n labels=[r\"$\\alpha$\", r\"$\\beta$\", r\"$\\sigma^2$\",\n r\"$\\mu_0$\", r\"$u^2$\", r\"$w^2$\",\n r\"$\\bar{\\xi}$\", r\"$\\sigma_\\xi$\", r\"$\\rho_{\\xi\\eta}$\"],\n range=[0.99]*9, plot_datapoints=False,\n fig=fig)\ncorner.corner(np.array([idlout['alpha'], idlout['beta'], idlout['sigsqr'],\n idlout['mu00'], idlout['usqr'], idlout['wsqr'],\n idlout['ximean'], idlout['xisig'], idlout['corr']]).T,\n range=[0.99]*9, plot_datapoints=False,\n fig=fig, color='r')\nfig.subplots_adjust(bottom=0.065, left=0.07)\nplt.show()\n", "id": "7701766", "language": "Python", "matching_score": 1.4942975044250488, "max_stars_count": 46, "path": "tests/triangle_test.py" }, { "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.ascii as ascii\n\n\ndata = ascii.read('test.dat')\npyout = ascii.read('test.pyout')\nidlout = ascii.read('test.idlout')\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.scatter(data['x'], data['y'], c='k')\nax.errorbar(data['x'], data['y'], xerr=data['xsig'], yerr=data['ysig'], ls=' ', c='k')\n\n# Fix the plot boundaries and then plot the true line\nxlim = ax.get_xlim()\nax.set_xlim(xlim)\nylim = ax.get_ylim()\nax.set_ylim(ylim)\ntruex = np.array(xlim)\ntruey = 4.0 + 3.0*truex\nax.plot(truex, truey, c='k', lw=2)\n\n# Plot samples of the regression line\nsamplex = truex\nfor i in range(0, len(pyout), len(pyout)//50):\n sampley = pyout[i]['alpha'] + pyout[i]['beta'] * samplex\n ax.plot(samplex, sampley, c='b', alpha=0.1)\n\nfor i in range(0, len(pyout), len(pyout)//50):\n sampley = idlout[i]['alpha'] + idlout[i]['beta'] * samplex\n ax.plot(samplex, sampley, c='r', alpha=0.1)\n\n\nplt.show()\n", "id": "6287590", "language": "Python", "matching_score": 1.4259120225906372, "max_stars_count": 46, "path": "tests/plottest.py" }, { "content": "from matplotlib import rc\nrc(\"font\", family=\"serif\", size=10)\nrc(\"text\", usetex=True)\n\nimport daft\n\nfigshape = (6.5, 4)\nfigorigin = (-1.5, -0.5)\npgm = daft.PGM(figshape, figorigin)\n\npgm.add_node(daft.Node(\"y\", r\"y\", 1, 0, observed=True))\npgm.add_node(daft.Node(\"x\", r\"x\", 2, 0, observed=True))\npgm.add_node(daft.Node(\"eta\", r\"$\\eta$\", 1, 1))\npgm.add_node(daft.Node(\"xi\", r\"$\\xi$\", 2, 1))\npgm.add_node(daft.Node(\"alpha\", r\"$\\alpha$\", 0, 0))\npgm.add_node(daft.Node(\"beta\", r\"$\\beta$\", 0, 1))\npgm.add_node(daft.Node(\"sigsqr\", r\"$\\sigma^2$\", 0, 2))\npgm.add_node(daft.Node(\"pi\", r\"$\\pi$\", 2, 2))\npgm.add_node(daft.Node(\"mu\", r\"$\\mu$\", 3, 1))\npgm.add_node(daft.Node(\"tausqr\", r\"$\\tau^2$\", 3, 2))\npgm.add_node(daft.Node(\"mu0\", r\"$\\mu_0$\", 3, 0))\npgm.add_node(daft.Node(\"usqr\", r\"$u^2$\", 4, 1))\npgm.add_node(daft.Node(\"wsqr\", r\"$w^2$\", 4, 2))\npgm.add_node(daft.Node(\"prior_alpha\", r\"U($-\\infty$, $\\infty$)\", -1, 0, fixed=True))\npgm.add_node(daft.Node(\"prior_beta\", r\"U($-\\infty$, $\\infty$)\", -1, 1, fixed=True))\npgm.add_node(daft.Node(\"prior_sigsqr\", r\"U(0, $\\infty$)\", -1, 2, fixed=True))\npgm.add_node(daft.Node(\"prior_mu0\", r\"U(min(x), max(x))\", 4, 0, fixed=True))\n# pgm.add_node(daft.Node(\"prior_mu0\", r\"U($-\\infty$, $\\infty$)\", 4, 0, fixed=True))\npgm.add_node(daft.Node(\"prior_wsqr\", r\"U(0, $\\infty$)\", 4, 3, fixed=True))\npgm.add_node(daft.Node(\"prior_pi\", r\"Dirichlet(1, ..., 1)\", 2, 3, fixed=True))\n\npgm.add_edge(\"xi\", \"x\")\npgm.add_edge(\"eta\", \"x\")\npgm.add_edge(\"xi\", \"eta\")\npgm.add_edge(\"eta\", \"y\")\npgm.add_edge(\"xi\", \"y\")\npgm.add_edge(\"alpha\", \"eta\")\npgm.add_edge(\"beta\", \"eta\")\npgm.add_edge(\"sigsqr\", \"eta\")\npgm.add_edge(\"pi\", \"xi\")\npgm.add_edge(\"mu\", \"xi\")\npgm.add_edge(\"tausqr\", \"xi\")\npgm.add_edge(\"mu0\", \"mu\")\npgm.add_edge(\"usqr\", \"mu\")\npgm.add_edge(\"wsqr\", \"usqr\")\npgm.add_edge(\"wsqr\", \"tausqr\")\npgm.add_edge(\"prior_alpha\", \"alpha\")\npgm.add_edge(\"prior_beta\", \"beta\")\npgm.add_edge(\"prior_sigsqr\", \"sigsqr\")\npgm.add_edge(\"prior_mu0\", \"mu0\")\npgm.add_edge(\"prior_wsqr\", \"wsqr\")\npgm.add_edge(\"prior_pi\", \"pi\")\n\npgm.render()\npgm.figure.savefig(\"pgm.png\", dpi=300)\n", "id": "6967203", "language": "Python", "matching_score": 0.1213511973619461, "max_stars_count": 46, "path": "docs/pgm/pgm.py" }, { "content": "import requests\nimport pandas as pd\nimport numpy as np\nimport arviz as az\n\nidx = pd.IndexSlice\n\n\ndef get_raw_covidtracking_data():\n \"\"\" Gets the current daily CSV from COVIDTracking \"\"\"\n url = \"https://covidtracking.com/api/v1/states/daily.csv\"\n data = pd.read_csv(url)\n return data\n\n\ndef process_covidtracking_data(data: pd.DataFrame, run_date: pd.Timestamp):\n \"\"\" Processes raw COVIDTracking data to be in a form for the GenerativeModel.\n In many cases, we need to correct data errors or obvious outliers.\"\"\"\n data = data.rename(columns={\"state\": \"region\"})\n data[\"date\"] = pd.to_datetime(data[\"date\"], format=\"%Y%m%d\")\n data = data.set_index([\"region\", \"date\"]).sort_index()\n data = data[[\"positive\", \"total\"]]\n\n # Too little data or unreliable reporting in the data source.\n data = data.drop([\"MP\", \"GU\", \"AS\", \"PR\", \"VI\"])\n\n # On Jun 5 Covidtracking started counting probable cases too\n # which increases the amount by 5014.\n # https://covidtracking.com/screenshots/MI/MI-20200605-184320.png\n data.loc[idx[\"MI\", pd.Timestamp(\"2020-06-05\") :], \"positive\"] -= 5014\n\n # From CT: On June 19th, LDH removed 1666 duplicate and non resident cases\n # after implementing a new de-duplicaton process.\n data.loc[idx[\"LA\", pd.Timestamp(\"2020-06-19\") :], :] += 1666\n\n # Now work with daily counts\n data = data.diff().dropna().clip(0, None).sort_index()\n\n # Michigan missed 6/18 totals and lumped them into 6/19 so we've\n # divided the totals in two and equally distributed to both days.\n data.loc[idx[\"MI\", pd.Timestamp(\"2020-06-18\")], \"total\"] = 14871\n data.loc[idx[\"MI\", pd.Timestamp(\"2020-06-19\")], \"total\"] = 14871\n\n # Note that when we set total to zero, the model ignores that date. See\n # the likelihood function in GenerativeModel.build\n\n # Huge outlier in NJ causing sampling issues.\n data.loc[idx[\"NJ\", pd.Timestamp(\"2020-05-11\")], :] = 0\n\n # Huge outlier in CA causing sampling issues.\n data.loc[idx[\"CA\", pd.Timestamp(\"2020-04-22\")], :] = 0\n\n # Huge outlier in CA causing sampling issues.\n # TODO: generally should handle when # tests == # positives and that\n # is not an indication of positive rate.\n data.loc[idx[\"SC\", pd.Timestamp(\"2020-06-26\")], :] = 0\n\n # Two days of no new data then lumped sum on third day with lack of new total tests\n data.loc[idx[\"OR\", pd.Timestamp(\"2020-06-26\") : pd.Timestamp(\"2020-06-28\")], 'positive'] = 174\n data.loc[idx[\"OR\", pd.Timestamp(\"2020-06-26\") : pd.Timestamp(\"2020-06-28\")], 'total'] = 3296\n\n\n #https://twitter.com/OHdeptofhealth/status/1278768987292209154\n data.loc[idx[\"OH\", pd.Timestamp(\"2020-07-01\")], :] = 0\n\n # Nevada didn't report total tests this day\n data.loc[idx[\"NV\", pd.Timestamp(\"2020-07-02\")], :] = 0\n\n # A bunch of incorrect values for WA data so nulling them out.\n data.loc[idx[\"WA\", pd.Timestamp(\"2020-06-05\") : pd.Timestamp(\"2020-06-07\")], :] = 0\n data.loc[idx[\"WA\", pd.Timestamp(\"2020-06-20\") : pd.Timestamp(\"2020-06-21\")], :] = 0\n\n # Outlier dates in PA\n data.loc[\n idx[\n \"PA\",\n [\n pd.Timestamp(\"2020-06-03\"),\n pd.Timestamp(\"2020-04-21\"),\n pd.Timestamp(\"2020-05-20\"),\n ],\n ],\n :,\n ] = 0\n\n # At the real time of `run_date`, the data for `run_date` is not yet available!\n # Cutting it away is important for backtesting!\n return data.loc[idx[:, :(run_date - pd.DateOffset(1))], [\"positive\", \"total\"]]\n\n\ndef get_and_process_covidtracking_data(run_date: pd.Timestamp):\n \"\"\" Helper function for getting and processing COVIDTracking data at once \"\"\"\n data = get_raw_covidtracking_data()\n data = process_covidtracking_data(data, run_date)\n return data\n\n\ndef summarize_inference_data(inference_data: az.InferenceData):\n \"\"\" Summarizes an inference_data object into the form that we publish on\n rt.live \"\"\"\n posterior = inference_data.posterior\n hdi_mass = 80\n hpdi = az.hdi(posterior.r_t, hdi_prob=hdi_mass / 100).r_t\n\n observed_positive = inference_data.constant_data.observed_positive.to_series()\n scale_to_positives = lambda data: observed_positive.mean() / np.mean(data) * data\n tests = inference_data.constant_data.tests.to_series()\n normalized_positive = observed_positive / tests.clip(0.1 * tests.max())\n\n summary = pd.DataFrame(\n data={\n \"mean\": posterior.r_t.mean([\"draw\", \"chain\"]),\n \"median\": posterior.r_t.median([\"chain\", \"draw\"]),\n f\"lower_{hdi_mass}\": hpdi[:, 0],\n f\"upper_{hdi_mass}\": hpdi[:, 1],\n \"infections\": scale_to_positives(\n posterior.infections.mean([\"draw\", \"chain\"])\n ),\n \"test_adjusted_positive\": scale_to_positives(\n posterior.test_adjusted_positive.mean([\"draw\", \"chain\"])\n ),\n \"test_adjusted_positive_raw\": scale_to_positives(normalized_positive),\n \"positive\": observed_positive,\n \"tests\": tests,\n },\n index=pd.Index(posterior.date.values, name=\"date\"),\n )\n return summary\n", "id": "6393704", "language": "Python", "matching_score": 1.8519083261489868, "max_stars_count": 0, "path": "covid/data.py" }, { "content": "import numpy as np\nimport linmix\nfrom astropy.table import Table\n\n\ndef generate_test_data():\n alpha = 4.0\n beta = 3.0\n sigsqr = 0.5\n\n # GMM with 3 components for xi\n xi = np.random.normal(loc=1.0, scale=1.0, size=9)\n xi = np.concatenate([xi, np.random.normal(loc=2.0, scale=1.5, size=20)])\n xi = np.concatenate([xi, np.random.normal(loc=3.0, scale=0.5, size=30)])\n eta = np.random.normal(loc=alpha+beta*xi, scale=np.sqrt(sigsqr))\n\n # Let's mix in some weird measurement uncertainties:\n xsig = 0.25 * np.sin(np.arange(len(xi))) + 0.5\n ysig = 0.25 * np.cos(np.arange(len(eta)))**2 + 0.5\n x = np.random.normal(loc=xi, scale=xsig)\n y = np.random.normal(loc=eta, scale=ysig)\n\n # And put in zero uncertainty in a few of these.\n wzx = np.random.choice(np.arange(len(xi)), size=5, replace=False)\n xsig[wzx] = 0.0\n wzy = np.random.choice(np.arange(len(eta)), size=5, replace=False)\n ysig[wzy] = 0.0\n\n # And censor all the ydata less than 10, unless the yerr is 0\n w10 = (y < 10) & (ysig != 0)\n y[w10] = 10\n delta = np.ones((len(x),), dtype=int) # should really be bool, but ints are easier\n delta[w10] = 0\n\n out = Table([x, y, xsig, ysig, delta], names=['x', 'y', 'xsig', 'ysig', 'delta'])\n import astropy.io.ascii as ascii\n ascii.write(out, 'test.dat')\n\n\ndef run():\n import astropy.io.ascii as ascii\n try:\n a = ascii.read('test.dat')\n except:\n generate_test_data()\n a = ascii.read('test.dat')\n\n lm = linmix.LinMix(a['x'], a['y'], a['xsig'], a['ysig'], delta=a['delta'])\n lm.run_mcmc()\n ascii.write(lm.chain[['alpha', 'beta', 'sigsqr',\n 'mu0', 'usqr', 'wsqr',\n 'ximean', 'xisig', 'corr']],\n 'test.pyout')\n\nif __name__ == '__main__':\n run()\n", "id": "309181", "language": "Python", "matching_score": 1.5193220376968384, "max_stars_count": 46, "path": "tests/test.py" }, { "content": "\"\"\" A hierarchical Bayesian approach to linear regression with error in both X and Y.\n\"\"\"\n\n__all__ = ['linmix']\n\nfrom .linmix import LinMix\n", "id": "4918685", "language": "Python", "matching_score": 0.41978511214256287, "max_stars_count": 46, "path": "linmix/__init__.py" } ]
1.494298
MI-K253
[ { "content": "# ======================================\r\nfrom tkinter import*\r\nfrom tkinter import ttk\r\nfrom pathlib import Path\r\n\r\n# main window\r\nwin = Tk()\r\nwin.title(\"Calculator\")\r\nwin.resizable(False,False)\r\nwin.geometry(\"700x750\") \r\nwin.config(bg = \"black\") \r\nc = Canvas(win,width = 350,height = 110,bg = \"pink\") # output canvas\r\nc.place(relx = 0.3,rely = 0.02) \r\npath = r\"numbers/\" \r\n\r\nlive_label_text = StringVar()\r\nlive_label = Label(win,textvariable = live_label_text,fg = \"black\",bg = \"pink\",font = (\"arial\",40,\"bold\") )\r\nresult_label_text = StringVar()\r\nresult_label = Label(win,textvariable = result_label_text , fg = \"black\" , bg =\"pink\",font = (\"arial\",40,\"bold\"))\r\np = [] \r\nstatus = []\r\nresult_status = []\r\n\r\n# =======================\r\ntry:\r\n def result():\r\n \r\n process = \"\"\r\n for a in p:\r\n process += a\r\n result = eval(process)\r\n result_label_text.set(\"= \" + str(result))\r\n live_label_text.set(\"\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_label.place(relx =0.328 ,rely = 0.053)\r\n status.append(\"True\")\r\n result_status.append(\"True\")\r\n\r\n\r\n def result_deleter():\r\n if \"True\" in status:\r\n result_label_text.set(\"\")\r\n for member in range(len(status)):\r\n status.remove(status[0])\r\n\r\n\r\n def list_reseter():\r\n if \"True\" in result_status:\r\n for x in range(len(p)):\r\n p.remove(p[0])\r\n\r\n for member in range(len(result_status)):\r\n result_status.remove(result_status[0])\r\n\r\nexcept:\r\n result_label_text.set(\"Error\")\r\n result_label.place(relx =0.328 ,rely = 0.053)\r\n \r\n\r\ndef one():\r\n list_reseter()\r\n p.append(\"1\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"1\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef two():\r\n list_reseter()\r\n p.append(\"2\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"2\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef three():\r\n list_reseter()\r\n p.append(\"3\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"3\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef four():\r\n p.append(\"4\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"4\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef five():\r\n list_reseter()\r\n p.append(\"5\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"5\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef six():\r\n list_reseter()\r\n p.append(\"6\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"6\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef seven():\r\n list_reseter()\r\n p.append(\"7\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"7\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef eight():\r\n list_reseter()\r\n p.append(\"8\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"8\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef nine():\r\n list_reseter()\r\n p.append(\"9\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"9\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\n\r\ndef plus():\r\n list_reseter()\r\n p.append(\"+\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"+\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef minese():\r\n list_reseter()\r\n p.append(\"-\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"-\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef devide():\r\n list_reseter()\r\n p.append(\"/\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"/\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n \r\ndef multiply():\r\n list_reseter()\r\n p.append(\"*\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"X\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n\r\ndef ob():\r\n list_reseter()\r\n p.append(\"(\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \"(\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n\r\ndef cb():\r\n list_reseter()\r\n p.append(\")\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \")\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n\r\ndef dot():\r\n list_reseter()\r\n p.append(\".\")\r\n a = live_label_text.get()\r\n live_label_text.set(str(a) + \".\")\r\n live_label.place(relx =0.328 ,rely = 0.053)\r\n result_deleter()\r\n\r\ndef reset():\r\n result_deleter()\r\n list_reseter()\r\n\r\n# =======================\r\n# buttons\r\n# 1\r\nb1 = Button(win,command = one,activebackground = \"purple\")\r\nb1_img = PhotoImage(file = path + r\"\\1.png\" )\r\nb1.config(image = b1_img)\r\nb1.place(relx = 0.3 ,rely =0.2)\r\n\r\n#2\r\nb2 = Button(win , command = two,activebackground = \"purple\")\r\nb2_img = PhotoImage(file = path + r\"\\2.png\")\r\nb2.config(image = b2_img)\r\nb2.place(relx = 0.5 ,rely =0.2)\r\n\r\n# 3\r\n\r\nb3 = Button(win , command = three,activebackground = \"purple\")\r\nb3_img = PhotoImage(file = path + r\"\\3.png\")\r\nb3.config(image = b3_img)\r\nb3.place(relx = 0.7 ,rely =0.2)\r\n\r\n# 4\r\nb4 = Button(win , command = four,activebackground= \"purple\")\r\nb4_img = PhotoImage(file = path + r\"\\4.png\")\r\nb4.config(image = b4_img)\r\nb4.place(relx = 0.3 ,rely =0.4)\r\n\r\n# 5\r\nb5 = Button(win , command = five,activebackground = \"purple\")\r\nb5_img = PhotoImage(file = path + r\"\\5.png\")\r\nb5.config(image = b5_img)\r\nb5.place(relx = 0.5 ,rely =0.4)\r\n\r\n# 6\r\nb6 = Button(win , command = six,activebackground = \"purple\")\r\nb6_img = PhotoImage(file = path + r\"\\6.png\")\r\nb6.config(image = b6_img)\r\nb6.place(relx = 0.7 ,rely =0.4)\r\n\r\n# 7\r\nb7 = Button(win , command = seven,activebackground = \"purple\")\r\nb7_img = PhotoImage(file = path + r\"\\7.png\")\r\nb7.config(image = b7_img)\r\nb7.place(relx = 0.3 ,rely =0.6)\r\n\r\n# 8\r\nb8 = Button(win , command = eight,activebackground = \"purple\")\r\nb8_img = PhotoImage(file = path + r\"\\8.png\")\r\nb8.config(image = b8_img)\r\nb8.place(relx = 0.5 ,rely =0.6)\r\n\r\n# 9\r\nb9 = Button(win , command = nine,activebackground = \"purple\")\r\nb9_img = PhotoImage(file = path + r\"\\9.png\")\r\nb9.config(image = b9_img)\r\nb9.place(relx = 0.7 ,rely =0.6)\r\n\r\n# devide\r\nb_devide = Button(win ,command = devide,activebackground = \"purple\")\r\nb_devide_img = PhotoImage(file = path + r\"\\devide.png\")\r\nb_devide.config(image = b_devide_img)\r\nb_devide.place(relx = 0.06 ,rely =0.6)\r\n\r\n# equal\r\nb_equal = Button(win ,command = result,activebackground = \"purple\")\r\nb_equal_img = PhotoImage(file = path + r\"\\equal.png\")\r\nb_equal.config(image = b_equal_img)\r\nb_equal.place(relx = 0.06 ,rely =0.8)\r\n\r\n# reset\r\nb_reset = Button(win ,command = reset,activebackground = \"purple\")\r\nb_reset_img = PhotoImage(file = path + r\"\\reset.png\")\r\nb_reset.config(image = b_reset_img)\r\nb_reset.place(relx = 0.7 ,rely =0.8)\r\n\r\n# close bracket\r\nb_cb = Button(win ,command = cb,activebackground= \"purple\")\r\nb_cb_img = PhotoImage(file = path + r\"\\close bracket.png\")\r\nb_cb.config(image = b_cb_img)\r\nb_cb.place(relx = 0.5 ,rely =0.8)\r\n\r\n# open bracket\r\n\r\nb_ob = Button(win ,command = ob,activebackground = \"purple\")\r\nb_ob_img = PhotoImage(file = path + r\"\\open bracket.png\")\r\nb_ob.config(image = b_ob_img)\r\nb_ob.place(relx = 0.3 ,rely =0.8)\r\n\r\n# plus\r\nb_plus = Button(win , command = plus,activebackground = \"purple\")\r\nb_plus_img = PhotoImage(file =path + r\"\\plus.png\")\r\nb_plus.config(image = b_plus_img)\r\nb_plus.place(relx = 0.06 ,rely =0.2)\r\n\r\n# minese\r\nb_minese = Button(win , command = minese,activebackground = \"purple\")\r\nb_minese_img = PhotoImage(file = path + r\"\\minese.png\")\r\nb_minese.config(image = b_minese_img)\r\nb_minese.place(relx = 0.06 ,rely =0.4)\r\n\r\n# multiply\r\nb_multiply = Button(win , command = multiply,activebackground = \"purple\")\r\nb_multiply_img = PhotoImage(file = path + r\"\\multiply.png\")\r\nb_multiply.config(image = b_multiply_img)\r\nb_multiply.place(relx = 0.06 ,rely =0.01)\r\n\r\n# dot\r\nb_d = Button(win , command = dot,activebackground = \"purple\",activeforeground = \"purple\")\r\nb_d_img = PhotoImage(file = path + r\"\\dot.png\")\r\nb_d.config(image = b_d_img)\r\nb_d.place(relx = 0.83 ,rely =0.02)\r\n# ==========================\r\nwin.mainloop()\r\n", "id": "3636085", "language": "Python", "matching_score": 0.8817867636680603, "max_stars_count": 1, "path": "calculator.py" }, { "content": "\"\"\"\r\n* 6/24/2020\r\n* https://stackoverflow.com/questions/40514508/opencv-detect-movement-in-python\r\n\"\"\"\r\n\r\nimport cv2\r\n\r\nclass MotionDetection(object):\r\n def __init__(self):\r\n self.t_minus = None\r\n self.t = None\r\n self.t_plus = None\r\n \r\n @staticmethod\r\n def get_difference(t0, t1, t2):\r\n d1 = cv2.absdiff(t2, t1)\r\n d2 = cv2.absdiff(t1, t0)\r\n return cv2.bitwise_and(d1, d2)\r\n\r\n def init(self, frame):\r\n self.t_minus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n self.t = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n self.t_plus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n\r\n def main(self, frame):\r\n difference = cv2.countNonZero(self.get_difference(self.t_minus, self.t, self.t_plus))\r\n\r\n self.t_minus = self.t\r\n self.t = self.t_plus\r\n self.t_plus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n\r\n return difference\r\n", "id": "5087815", "language": "Python", "matching_score": 2.6879048347473145, "max_stars_count": 2, "path": "version2/Main/MotionDetecter.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport cv2\r\nimport KeyboardControl\r\nimport time\r\nfgbg = cv2.createBackgroundSubtractorMOG2()\r\nLastReverse = round(time.time())\r\n\r\n\r\ndef detect(frame, min_white):\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n frame = cv2.resize(frame, (600, 600))\r\n frame = cv2.GaussianBlur(frame, (5, 5), 10)\r\n fgmask = fgbg.apply(frame)\r\n white_count = cv2.countNonZero(fgmask)\r\n\r\n print(white_count)\r\n\r\n if white_count < min_white: \r\n return False # No motion detected\r\n\r\n else:\r\n return True # Motion Detected\r\n\r\n# END\r\n\r\n", "id": "10187586", "language": "Python", "matching_score": 1.7085061073303223, "max_stars_count": 2, "path": "Version1/MotionDetection.py" }, { "content": "import numpy as np\r\nfrom PIL import ImageGrab\r\nimport cv2\r\nimport time\r\n\r\n\r\nclass Processing:\r\n def __init__(self):\r\n self.Vertices = np.array([[10, 500], [10, 300], [300, 200], [500, 200], [800, 300], [800, 500]])\r\n\r\n def roi(self, image):\r\n mask = np.zeros_like(image)\r\n cv2.fillPoly(mask, [self.Vertices], 255)\r\n masked = cv2.bitwise_and(image, mask)\r\n return masked\r\n\r\n def detect_lines(self, image):\r\n try:\r\n lines = cv2.HoughLinesP(image, 1, np.pi/180, 180, np.array([]), 100, 5)\r\n for line in lines:\r\n coords = line[0]\r\n cv2.line(image, (coords[0], coords[1]), (coords[2], coords[3]), (255, 255, 255), 3)\r\n\r\n except:\r\n pass\r\n\r\n def main(self, image):\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n processed = cv2.Canny(image, 200, 300)\r\n processed = self.roi(processed)\r\n processed = cv2.GaussianBlur(processed, (5, 5), 0)\r\n self.detect_lines(processed)\r\n return processed\r\n\r\n\r\nprocessing = Processing()\r\n\r\n\r\nLastTime = time.time()\r\nwhile True:\r\n Image = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640)))\r\n cv2.imshow('main', processing.main(Image))\r\n\r\n print(\"Loop takes {} seconds\".format(time.time() - LastTime))\r\n LastTime = time.time()\r\n\r\n if cv2.waitKey(25) & 0xFF == ord('q'):\r\n cv2.destroyAllWindows()\r\n break\r\n", "id": "8978065", "language": "Python", "matching_score": 2.5704543590545654, "max_stars_count": 2, "path": "Line Detection/line detection.py" }, { "content": "\"\"\"\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nfrom PIL import ImageGrab\r\nfrom numpy import array\r\n\r\n\r\ndef get():\r\n image = array(ImageGrab.grab(bbox=(0, 50, 1280, 1024)))\r\n return image\r\n\r\n\r\n# END\r\n\r\n", "id": "9594112", "language": "Python", "matching_score": 0.21603061258792877, "max_stars_count": 2, "path": "version2/Main/GetFrame.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport KeyboardControl\r\nimport GetFrame\r\nimport time\r\nimport cv2\r\nfrom tensorflow import keras\r\nimport numpy as np\r\nimport MotionDetection\r\nimport threading\r\nimport keyboard\r\n\r\n\r\nImageWeight = 224\r\nImageHeight = 224\r\n\r\nClasses = {0: 'Forward', 1: 'Left', 2: 'Right'} # , 3: 'Reverse', 4: 'FL', 5: 'FR', 6: 'RL', 7: 'RF', 8: 'Stop'}\r\n\r\nKeyboardDelay = 0.1\r\nMaxSpeed = 40\r\nReverseDelay = 1 # For motion detection & high speed\r\nMotionThreshold = 1000\r\n\r\nCountdownNumber = 5\r\nCountdownDelay = 0.5\r\n\r\nModel = keras.models.load_model(r\"Trained-Models/Trained-Alexnet-64.h5\")\r\n\r\n\r\ndef keyboard_handler(result, delay):\r\n\r\n if result == 0:\r\n KeyboardControl.left(delay*1.2)\r\n\r\n elif result == 1:\r\n KeyboardControl.straight(delay)\r\n\r\n elif result == 2:\r\n KeyboardControl.right(delay*1.2)\r\n\r\n\r\ndef countdown(num, delay):\r\n for i in list(range(num))[::-1]:\r\n if i == 0:\r\n print(\"Start driving!\")\r\n continue\r\n print(i)\r\n time.sleep(delay)\r\n\r\n\r\ndef main():\r\n countdown(CountdownNumber, CountdownDelay)\r\n motion_detected = True\r\n\r\n while True:\r\n if not motion_detected:\r\n KeyboardControl.reverse(3)\r\n\r\n start_time = time.time()\r\n\r\n frame = GetFrame.get()\r\n\r\n motion_detected = True\r\n\r\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n frame = cv2.resize(frame, (ImageWeight, ImageHeight))\r\n\r\n frame = np.array(frame)\r\n frame = frame.reshape(1, ImageHeight, ImageWeight, 1)\r\n frame = frame/255\r\n\r\n prediction = Model.predict(frame)\r\n prediction = np.argmax(prediction)\r\n keyboard_handler(prediction, KeyboardDelay)\r\n\r\n print(f\"Chice= {prediction}\\tProcess took {str(time.time() - start_time)[0: 4]} seconds.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n# END", "id": "911464", "language": "Python", "matching_score": 4.757020473480225, "max_stars_count": 2, "path": "Version1/Main.py" }, { "content": "\"\"\"\r\n* By M.K 6/24/2020\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport GetFrame\r\nimport numpy as np\r\nfrom tensorflow import keras\r\nimport time\r\nimport cv2\r\nimport keyboard\r\nfrom random import randint\r\nimport os\r\nimport threading\r\nimport MotionDetecter\r\nimport winsound\r\nfrom playsound import playsound\r\n\r\n\r\nclass Driver(object): # Main class\r\n def __init__(self):\r\n keras.backend.learning_phase_scope(0)\r\n self.Max_Speed = 45\r\n self.Keyboard_Delay = 0.1 # Delay between press and release\r\n self.Turn_Threshold = 2.23\r\n self.Motion_Threshold = 300_000\r\n # ======================\r\n self.Speed = None\r\n self.Paused = False \r\n self.Pause_Key = \"p\"\r\n self.Classes = {0: \"Left\\t\", 1: \"Straight\", 2: \"Right \"} # labels\r\n self.Speed_Control_Model = keras.models.load_model(os.path.join(\"Models\", \"Speed_Controller_V2.h5\"))\r\n self.Main_Model = keras.models.load_model(os.path.join(\"Models\", \"mobilenet_v2\", \"log_ver 0.4-mobilenet_v2-Python-Plays-NFS.h5\"))\r\n\r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([], None))\r\n self.Thread_2.name = \"Keyboard_Control_Thread\"\r\n self.Thread_2.start()\r\n\r\n self.Delta_Value = None # For Motion detection\r\n self.Motion_Detection = MotionDetecter.MotionDetection()\r\n self.Motion_Detection.init(GetFrame.get())\r\n\r\n def motion_detection(self, frame, threshold, speed):\r\n self.Delta_Value = self.Motion_Detection.main(frame) \r\n\r\n if speed != 0 and self.Delta_Value > threshold: # if car isn't stuck \r\n return True\r\n\r\n self.keyboard_control([\"w\"], 0.3) # else we'll try to go forward for 0.3 seconds, if the speed still the same the car is stuck \r\n frame = GetFrame.get() \r\n self.Speed = self.get_speed(frame)\r\n if self.Speed > 8:\r\n return True\r\n\r\n return False \r\n\r\n def get_speed(self, image, model=None):\r\n if model is None:\r\n model = self.Speed_Control_Model\r\n\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\r\n\r\n middle_digit = image[894: 939, 1124: 1166] # croping image\r\n middle_digit = cv2.resize(middle_digit, (45, 40))\r\n\r\n right_digit = image[895: 939, 1125 + 41: 1165 + 41]\r\n right_digit = cv2.resize(right_digit, (45, 40))\r\n\r\n middle_digit, right_digit = np.array(middle_digit, dtype=\"float32\") / 255, np.array(right_digit, dtype=\"float32\") / 255 # Data Normalization\r\n middle_digit = np.reshape(middle_digit, (1, middle_digit.shape[0], middle_digit.shape[1], 1)), # (1, width, height, channels)\r\n right_digit = np.reshape(right_digit, (1, right_digit.shape[0], right_digit.shape[1], 1))\r\n\r\n predictions = (model.predict(middle_digit), model.predict(right_digit))\r\n speed = np.argmax(predictions[0]) * 10 + np.argmax(predictions[1]) \r\n return speed\r\n\r\n\r\n @staticmethod\r\n def make_prediction(frame, model=None): # Driver Model\r\n if model is None:\r\n model = self.Main_Model\r\n\r\n frame = cv2.resize(frame, (299, 299)) # preprocessing\r\n frame = frame[115:, :]\r\n frame = np.array(frame)\r\n frame = keras.applications.mobilenet_v2.preprocess_input(frame)\r\n frame = frame.reshape(1, 184, 299, 3)\r\n\r\n predictions = np.argmax(model.predict(frame))\r\n return predictions\r\n\r\n @staticmethod\r\n def keyboard_control(keys, delay):\r\n for key in keys:\r\n keyboard.press(key)\r\n time.sleep(delay)\r\n keyboard.release(keys)\r\n\r\n def main(self):\r\n if keyboard.is_pressed(\"p\"):\r\n if self.Paused:\r\n self.Paused = False # start driving\r\n playsound(r\"voices/start_driving.mp3\")\r\n\r\n else:\r\n self.Paused = True # stop driving\r\n playsound(r\"voices/stop_driving.mp3\")\r\n\r\n for key in [\"a\", \"w\", \"d\", \"s\"]:\r\n keyboard.release(key)\r\n\r\n time.sleep(1)\r\n\r\n if self.Paused:\r\n return\r\n # ====================\r\n\r\n start_time = time.time()\r\n frame = GetFrame.get()\r\n self.Speed = self.get_speed(frame)\r\n\r\n prediction = self.make_prediction(frame, self.Main_Model) # classes --> left , right , straight\r\n self.Thread_2.join() # Wait until, all keys are released\r\n\r\n if self.motion_detection(frame, self.Motion_Threshold, self.Speed):\r\n if prediction == 0:\r\n if self.Speed >= self.Max_Speed: # pressing \"A\"\r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([\"a\"], self.Keyboard_Delay * self.Turn_Threshold))\r\n self.Thread_2.start()\r\n\r\n else: # pressing \"A\" & \"W\" \r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([\"a\", \"w\"], self.Keyboard_Delay * self.Turn_Threshold))\r\n self.Thread_2.start()\r\n\r\n elif prediction == 1: \r\n if self.Speed >= self.Max_Speed: \r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([], self.Keyboard_Delay))\r\n self.Thread_2.start()\r\n\r\n else: # pressing \"W\"\r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([\"w\"], self.Keyboard_Delay))\r\n self.Thread_2.start()\r\n\r\n elif prediction == 2: # pressing \"D\"\r\n if self.Speed >= self.Max_Speed:\r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([\"d\"], self.Keyboard_Delay * self.Turn_Threshold))\r\n self.Thread_2.start()\r\n\r\n else: # pressing \"D\" & \"W\"\r\n self.Thread_2 = threading.Thread(target=self.keyboard_control, args=([\"d\", \"w\"], self.Keyboard_Delay * self.Turn_Threshold))\r\n self.Thread_2.start()\r\n\r\n else:\r\n print(\"No Motion Detected.\")\r\n playsound(r\"voices/car_is_stuck.mp3\")\r\n\r\n for key in [\"a\", \"w\", \"s\", \"s\"]: # releasing all keys\r\n keyboard.release(key)\r\n\r\n choices = {0: [\"s\", \"a\"], 1: [\"S\", \"d\"]}\r\n self.keyboard_control(choices[randint(0, 1)], 0.8) # random choice\r\n time.sleep(2)\r\n self.keyboard_control([\"w\"], 0.5) # going straight for 0.5 seconds\r\n\r\n # =================== \r\n frame_rate = 1 / (time.time() - start_time) \r\n print(f\"Frame rate= {str(frame_rate)[: 5]} choice= {self.Classes[prediction]} Speed= {self.Speed} Delta= {self.Delta_Value}\") # printing logs\r\n print(f\"Active-Thrads= {threading.active_count()} \\t Process Took {str(time.time() - start_time)[: 3]} seconds.\")\r\n\r\n# =============================================\r\ndriver = Driver()\r\n\r\nif __name__ == '__main__':\r\n \r\n for i in list(range(1, 10))[::-1]:\r\n if i > 3: # Beeping (before driving)\r\n print(i)\r\n winsound.Beep(2800, 250)\r\n time.sleep(0.5)\r\n continue\r\n\r\n winsound.Beep(2800, 1000)\r\n time.sleep(0.1) \r\n\r\n if i == 1:\r\n print(\"Start Driving...\")\r\n playsound(r\"voices/start_driving.mp3\")\r\n driver.keyboard_control([\"w\"], 2.5)\r\n \r\n while True: # Main loop\r\n try:\r\n driver.main()\r\n\r\n except Exception as e:\r\n\r\n for key in [\"w\", \"a\", \"s\", \"d\"]:\r\n keyboard.release(key)\r\n\r\n raise \r\n# END", "id": "7869724", "language": "Python", "matching_score": 4.714197635650635, "max_stars_count": 2, "path": "version2/Main/Main.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nfrom tensorflow import keras\r\nimport KeyboardControl\r\nimport cv2\r\nimport GetFrame\r\nfrom time import sleep\r\nimport numpy as np\r\n\r\nfor i in list(range(0, 3))[::-1]: # count down\r\n print(i)\r\n sleep(0.6)\r\n\r\nModelName = r\"Speed-model.h5\"\r\nMaxSpeed = 40 / 10\r\ndelay = 1 # for breaking\r\n\r\nModel = keras.models.load_model(ModelName)\r\n\r\n\r\ndef main():\r\n while True:\r\n\r\n image = GetFrame.get()\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n image = cv2.resize(image, (224, 224))\r\n image = image/255\r\n\r\n image = image[204:218, 196:204]\r\n image = np.array(image)\r\n image = image.reshape(1, image.shape[0], image.shape[1])\r\n\r\n prediction = Model.predict(image)\r\n\r\n if int(np.argmax(prediction)) > MaxSpeed or int(np.argmax(prediction)) == 0:\r\n print(\"High Speed Detected! OR U are stuck!\")\r\n KeyboardControl.reverse(delay)\r\n\r\n# END", "id": "449691", "language": "Python", "matching_score": 1.096584677696228, "max_stars_count": 2, "path": "Version1/Speed Controller Version 1/Main-speed-control.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nfrom time import sleep\r\nimport keyboard\r\n\r\n\r\ndef straight(delay):\r\n keyboard.press('w')\r\n sleep(delay)\r\n keyboard.release('w')\r\n\r\n\r\ndef reverse(delay):\r\n keyboard.press('s')\r\n sleep(delay)\r\n keyboard.release('s')\r\n\r\n\r\ndef right(delay):\r\n keyboard.press('w')\r\n keyboard.press('d')\r\n sleep(delay)\r\n keyboard.release('w')\r\n sleep(delay)\r\n keyboard.release('d')\r\n\r\n\r\ndef left(delay):\r\n keyboard.press('w')\r\n keyboard.press('a')\r\n sleep(delay)\r\n keyboard.release('w')\r\n sleep(delay)\r\n keyboard.release('a')\r\n\r\n\r\ndef straight_left(delay):\r\n keyboard.press('w')\r\n keyboard.press('a')\r\n sleep(delay)\r\n keyboard.release('w')\r\n keyboard.release('a')\r\n\r\n\r\ndef straight_right(delay):\r\n keyboard.press('w')\r\n keyboard.press('d')\r\n sleep(delay)\r\n keyboard.release('w')\r\n keyboard.release('d')\r\n\r\n\r\ndef reverse_left(delay):\r\n keyboard.press('s')\r\n keyboard.press('a')\r\n sleep(delay)\r\n keyboard.release('s')\r\n keyboard.release('a')\r\n\r\n\r\ndef reverse_right(delay):\r\n keyboard.press('s')\r\n keyboard.press('d')\r\n sleep(delay)\r\n keyboard.release('s')\r\n keyboard.release('d')\r\n\r\ndef key_check():\r\n key_list = []\r\n if keyboard.is_pressed('w'):\r\n key_list.append('W')\r\n\r\n if keyboard.is_pressed('d'):\r\n key_list.append('D')\r\n\r\n if keyboard.is_pressed('a'):\r\n key_list.append('A')\r\n\r\n if keyboard.is_pressed('s'):\r\n key_list.append('S')\r\n\r\n if keyboard.is_pressed('e'):\r\n key_list.append('E')\r\n\r\n if keyboard.is_pressed('r'):\r\n key_list.append('R')\r\n\r\n return key_list\r\n\r\n# END", "id": "5098122", "language": "Python", "matching_score": 2.059162139892578, "max_stars_count": 2, "path": "Version1/KeyboardControl.py" }, { "content": "\"\"\"\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport keyboard\r\n\r\n\r\ndef get():\r\n key_list = []\r\n if keyboard.is_pressed('a'):\r\n key_list.append('a')\r\n\r\n if keyboard.is_pressed('w'):\r\n key_list.append('w')\r\n\r\n if keyboard.is_pressed('d'):\r\n key_list.append('d')\r\n\r\n if keyboard.is_pressed('s'):\r\n key_list.append('s')\r\n\r\n if keyboard.is_pressed('t'):\r\n key_list.append('t')\r\n\r\n if keyboard.is_pressed('y'):\r\n key_list.append('y')\r\n\r\n return key_list\r\n\r\n# END\r\n", "id": "12546617", "language": "Python", "matching_score": 1.4863637685775757, "max_stars_count": 2, "path": "version2/Speed-Controller-version 2/GrabKeys.py" }, { "content": "\"\"\"\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport keyboard\r\n\r\ndef get():\r\n pressed_keys = []\r\n if keyboard.is_pressed('a'):\r\n pressed_keys.append('a')\r\n\r\n if keyboard.is_pressed('w'):\r\n pressed_keys.append('w')\r\n\r\n if keyboard.is_pressed('d'):\r\n pressed_keys.append('d')\r\n\r\n if keyboard.is_pressed('s'):\r\n pressed_keys.append('s')\r\n\r\n if keyboard.is_pressed('t'):\r\n pressed_keys.append('t')\r\n\r\n if keyboard.is_pressed('y'):\r\n pressed_keys.append('y')\r\n\r\n return pressed_keys\r\n\r\n# END\r\n", "id": "9471072", "language": "Python", "matching_score": 0.03216874599456787, "max_stars_count": 2, "path": "version2/Collect Data/GrabKeys.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import Dense, Conv2D, Dropout, MaxPooling2D, Input, Flatten\r\nfrom tensorflow.keras.callbacks import TensorBoard\r\nfrom datetime import datetime\r\nimport os\r\n\r\nImage_Height, Image_Width = 40, 45\r\nNum_Classes = 10\r\nEpochs, Batch_Size = 15, 32\r\n# TB_LogDir = f\"logs/Speed-Controller-{Epochs}epochs-{datetime.now()}\"\r\n# TB_CallBack = TensorBoard(log_dir=TB_LogDir, histogram_freq=1)\r\n\r\nX = np.array(np.load(r\"Data/X.npy\", allow_pickle=True))\r\nY = np.array(np.load(r\"Data/Y.npy\", allow_pickle=True))\r\nX = np.reshape(X, (X.shape[0], Image_Height, Image_Width, 1)) # Reshaping data --> (nmber, height, width, channels)\r\nprint(f\"{X}\\n\\n{Y}\\n\\nLengths:\\tX: {len(X)} Y:{len(Y)}\\nShapes:\\tX: {np.shape(X)} Y: {np.shape(Y)}\\n\")\r\n\r\ndef main():\r\n\tmodel = keras.Sequential()\r\n\tmodel.add(Input(shape=(Image_Height, Image_Width, 1)))\r\n\r\n\tmodel.add(Conv2D(32, (3, 3), strides=(1, 1), padding=\"same\", activation=\"relu\"))\r\n\tmodel.add(MaxPooling2D())\r\n\r\n\tmodel.add(Flatten())\r\n\r\n\tmodel.add(Dense(1024, activation=\"relu\"))\r\n\tmodel.add(Dense(512, activation=\"relu\"))\r\n\tmodel.add(Dropout(0.3))\r\n\tmodel.add(Dense(128, activation=\"relu\"))\r\n\tmodel.add(Dense(Num_Classes, activation=\"softmax\"))\r\n\r\n\tmodel.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\r\n\tmodel.fit(X, Y, epochs=Epochs, batch_size=Batch_Size, validation_split=0.1, verbose=1)\r\n\tmodel.save(r\"Speed_Controller.h5\")\r\n\r\n\treturn\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\r\n# END\r\n", "id": "10546049", "language": "Python", "matching_score": 2.2686753273010254, "max_stars_count": 2, "path": "version2/Speed-Controller-version 2/Model.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Fashion MNIST Simple Autoencoder.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1c4IKyAdLkkWnMcOTmgDDj6TF6ulmMox6\n\"\"\"\n\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.layers import Input, Flatten, Dense, Reshape\r\nfrom keras.optimizers import Adam\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport random\n\n(xTrain, yTrain), (xTest, yTest) = tf.keras.datasets.fashion_mnist.load_data()\r\nprint(\"Train data shape:\\t\", xTrain.shape, \"\\nTest data shape:\\t\", xTest.shape)\r\nplt.imshow(xTrain[random.randint(0, xTrain.shape[0])], cmap='gray')\n\n\"\"\"# Parameters\"\"\"\n\nepochs = 16\r\nbatchSize = 32\r\nlearningRate = 0.001\r\ndecay = 1e-6\n\n\"\"\"# Preprocessing \r\ndividing all images by 255\n\"\"\"\n\nxTrain = xTrain / 255.0\r\nxTest = xTest / 255.0\n\n\"\"\"# Building model\"\"\"\n\n# Encoder\r\nencoderInput = Input(shape=(28, 28, 1)) # 784 features\r\nx = Flatten()(encoderInput)\r\nx = Dense(128, activation='relu')(x) # 128 features\r\nencoderOutput = Dense(64, activation='relu')(x) # 64 features\r\nencoder = keras.Model(encoderInput, encoderOutput, name=\"encoder\") # encoder model\r\n\r\n# Decoder\r\ndecoderInput = Dense(128, activation='relu')(encoderOutput)\r\nx = Dense(28*28*1, activation='relu')(decoderInput)\r\ndecoderOutput = Reshape((28, 28, 1))(x)\r\n\r\n# Autoencoder\r\nautoencoder = keras.Model(encoderInput, decoderOutput, name=\"autoencoder\")\r\nautoencoder.summary()\n\n\"\"\"# Training model\"\"\"\n\noptimizer = Adam(learningRate, decay)\r\nautoencoder.compile(optimizer, loss=\"mse\")\r\nautoencoder.fit(xTrain, xTrain, epochs=epochs, batch_size=batchSize, validation_split=0.1)\n\n\"\"\"# Testing model\"\"\"\n\ndef testModel(image):\r\n figure = plt.figure(figsize=(28, 28))\r\n rows, columns = 1, 2\r\n\r\n figure.add_subplot(rows, columns, 1)\r\n plt.imshow(image, cmap=\"gray\") # showing original image\r\n plt.axis('off') \r\n plt.title(\"Original image\", fontsize=40) \r\n\r\n # Autoencoder output image\r\n image = image.reshape(-1, 28, 28, 1)\r\n predection = autoencoder.predict([image])\r\n #predection = predection * 255\r\n\r\n # Showing output image\r\n figure.add_subplot(rows, columns, 2)\r\n plt.imshow(predection[0].reshape(28, 28), cmap=\"gray\")\r\n plt.axis('off') \r\n plt.title(\"Generated image\", fontsize=40)\n\nfor i in range(50):\r\n testModel(xTest[random.randint(0, xTest.shape[0])])", "id": "7641410", "language": "Python", "matching_score": 2.651556968688965, "max_stars_count": 0, "path": "fashion_mnist_simple_autoencoder.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow import keras # Tensorflow API (make things easier:))\r\nimport numpy as np\r\n\r\nFileName = r\"Speed-Data-Normilized.npy\" # Loading train data\r\n\r\nData = np.load(FileName, allow_pickle=True)\r\nx_train = []\r\ny_train = []\r\n\r\nfor i in Data:\r\n x_train.append(i[0])\r\n y_train.append(i[1])\r\n\r\nx_train = np.array(x_train)\r\ny_train = np.array(y_train)\r\n\r\nprint(x_train)\r\nprint(y_train)\r\n\r\nmodel = keras.Sequential([\r\n keras.layers.Flatten(input_shape=(14, 8)),\r\n keras.layers.Dense(4096, activation=\"relu\"),\r\n keras.layers.Dense(2048, activation=\"relu\"),\r\n keras.layers.Dense(1024, activation=\"relu\"),\r\n keras.layers.Dense(512, activation=\"relu\"),\r\n keras.layers.Dense(256, activation=\"relu\"),\r\n keras.layers.Dense(128, activation=\"relu\"),\r\n keras.layers.Dense(10, activation=\"softmax\")\r\n ])\r\n\r\nmodel.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\r\n\r\n\r\nmodel.fit(x_train, y_train, epochs=50)\r\n\r\nmodel.save(\"speed-model.h5\")\r\n\r\n# END", "id": "1042998", "language": "Python", "matching_score": 0.5106388926506042, "max_stars_count": 2, "path": "Version1/Speed Controller Version 1/Speed-Model-CNN.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\nfrom tqdm import tqdm\r\nfrom random import shuffle\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Converter(object):\r\n def __init__(self):\r\n self.Part_Number = int(input(\"Enter Part number >>> \")) # geting data part number\r\n self.Input_Address = r\"D:/ML/NFS Train files/Version 3/Normalized Parts/Not Compressed/{}-Train-Normalized- Part {}.npy\"\r\n self.Output_Address = r\"D:/ML/NFS Train files/Version 3/\"\r\n self.Images = []\r\n self.Labels = []\r\n self.Ids = []\r\n self.Final_Data = None\r\n\r\n @staticmethod\r\n def visualize_data(data, title, bins): \r\n\r\n hist1_y, _, _ = plt.hist(data, bins=bins) # Data histogram\r\n plt.title(title)\r\n plt.yticks(np.arange(0, hist1_y.max(), 3000))\r\n\r\n plt.tight_layout()\r\n plt.savefig(\"Python-Plays-NFS-Data-Histogram-HQ.jpg\", dpi=1600, quality=100, optimize=True) # saving figure\r\n\r\n def main(self):\r\n x = np.load(self.Input_Address.format('X', self.Part_Number), allow_pickle=True) # loading each data part\r\n y = np.load(self.Input_Address.format('Y', self.Part_Number), allow_pickle=True)\r\n\r\n for i in tqdm(range(0, len(x))):\r\n image = x[i] * 255\r\n image = cv2.cvtColor(np.float32(image), cv2.COLOR_BGR2RGB)\r\n cv2.imwrite(self.Output_Address + \"images/\" + str(self.Part_Number) + \"/\" + str(i) + \".jpg\", image, [cv2.IMWRITE_JPEG_QUALITY, 100]) # Save each image\r\n\r\n self.Labels.append(y[i])\r\n self.Ids.append(i)\r\n\r\n self.visualize_data(self.Labels, \"Labels\", 3)\r\n self.Final_Data = {\"ID\": self.Ids, \"Label\": self.Labels}\r\n self.Final_Data = pd.DataFrame(data=self.Final_Data) # Converting data to pandas Data Frame \r\n\r\n def save_data(self):\r\n print(\"Saving...\")\r\n self.Final_Data.to_pickle(self.Output_Address + \"Labels-part{}.pkl\".format(self.Part_Number))\r\n self.Final_Data.to_csv(self.Output_Address + \"Labels-part{}.csv\".format(self.Part_Number))\r\n print(\"Done Saving!\")\r\n\r\n\r\nconverter = Converter()\r\n\r\nif __name__ == '__main__':\r\n converter.main()\r\n converter.save_data()\r\n\r\n# END", "id": "3950103", "language": "Python", "matching_score": 3.8929195404052734, "max_stars_count": 2, "path": "version2/Preprocess Data/conver to jpg.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nAddress = r\"E:/NFS Train Files/Data-Part{}.npy\"\r\nCount = int(input(\"Enter number of parts >>>(5) \")) # parts\r\nName = str(input(\"Enter output name>>> \"))\r\nData = pd.DataFrame()\r\n\r\nfor i in range(1, Count+1):\r\n print(\"Reading File number {}\".format(i))\r\n file = np.load(Address.format(i), allow_pickle=True)\r\n file = pd.DataFrame(file)\r\n print(file.head())\r\n Data = Data.append(file)\r\n print(\"Done Reading File Number{}\".format(i))\r\n\r\nData.to_csv(Name)\r\nprint(\"Done!\\nSaved CSV File\")\r\n", "id": "7620854", "language": "Python", "matching_score": 1.7099320888519287, "max_stars_count": 2, "path": "Version1/Preprocessing/Merge-Data.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport time\r\nfrom random import shuffle\r\n\r\nInputAddress = r\"D:\\ML\\NFS Train files\\Version 3\\parts-not normalized\\TrainFiles-part {}.npy\"\r\nOutputAddress = r\"D:\\ML\\NFS Train files\\Version 3\\Normalized Parts\\{}-Normalized- Part {}.npy\"\r\nFileName = r\"Normalized-Data\" # For saving\r\nTestSplitSize = 0.06\r\n\r\n\r\nclass Preprocessing(object):\r\n def __init__(self, test_split, data_dir, crop_height):\r\n self.Data = []\r\n self.TestSplit = test_split # TestData size\r\n self.Height = crop_height # For cropping image\r\n self.Address = data_dir # For loading data\r\n\r\n self.forwards = []\r\n self.lefts = []\r\n self.rights = []\r\n\r\n self.FinalData = []\r\n self.x_train = []\r\n self.y_train = []\r\n self.x_test = []\r\n self.y_test = []\r\n\r\n def load_data(self, part_numbers): # loading all data parts\r\n for i in range(1, part_numbers + 1):\r\n print(\"Loading part {}\".format(i))\r\n file = np.load(self.Address.format(i), allow_pickle=True)\r\n\r\n for row in file:\r\n self.Data.append(row)\r\n\r\n shuffle(self.Data)\r\n print(\"Input data length = {}\".format(len(self.Data)))\r\n\r\n def norm_targets(self): # Normalizing labels\r\n counter = 0\r\n \r\n for row in self.Data:\r\n counter += 1\r\n if counter % 100 == 0 :\r\n print(counter)\r\n\r\n # ================\r\n image = row[0]\r\n image = self.norm_image(image)\r\n choice = row[1]\r\n\r\n if choice[0] == 1:\r\n self.lefts.append([image, [1, 0, 0]])\r\n\r\n elif choice[2] == 1:\r\n self.rights.append([image, [0, 0, 1]])\r\n\r\n elif choice[1] == 1:\r\n self.forwards.append([image, [0, 1, 0]])\r\n \r\n del self.Data # free uping ram\r\n\r\n def norm_image(self, image):\r\n image = image[self.Height:, :]\r\n\r\n r, g, b = cv2.split(image)\r\n r, g, b = r/255, g/255, b/255\r\n\r\n image = cv2.merge((r, g, b))\r\n return image\r\n\r\n def norm_length(self): # All classes shouls have same length\r\n self.forwards = self.forwards[: len(self.lefts)][: len(self.rights)]\r\n self.lefts = self.lefts[: len(self.forwards)]\r\n self.rights = self.rights[: len(self.lefts)]\r\n\r\n self.FinalData = self.rights + self.lefts + self.forwards\r\n shuffle(self.FinalData) # shuffling final data\r\n \r\n def split_data(self): \r\n test_data_length = len(self.FinalData) * self.TestSplit\r\n\r\n images = []\r\n targets = []\r\n\r\n for row in self.FinalData:\r\n images.append(row[0])\r\n targets.append(row[1])\r\n\r\n print(targets)\r\n\r\n self.x_train = images[: int(len(self.FinalData) - test_data_length)]\r\n self.y_train = targets[: int(len(self.FinalData) - test_data_length)]\r\n\r\n self.x_test = images[int(len(self.FinalData) - test_data_length):]\r\n self.y_test = targets[int(len(self.FinalData) - test_data_length):]\r\n\r\n print(\"Lengths >>> NormalizedData = {}\".format(len(self.FinalData)))\r\n print(\"Lengths >>> x_train = {}\\ty_train = {}\".format(len(self.x_train), len(self.y_train)))\r\n print(\"Lengths >>> x_test = {}\\ty_test={}\".format(len(self.x_test), len(self.y_test)))\r\n\r\n self.x_train, self.y_train = np.array(self.x_train), np.array(self.y_train) # Converting to np array\r\n self.x_test, self.y_test = np.array(self.x_test), np.array(self.y_test)\r\n\r\n print(\"Shapes >>> x_train = {}, y_train = {}\".format(self.x_train.shape, self.y_train.shape))\r\n print(\"Shapes >>> x_test = {}, y_test = {}\".format(self.x_test.shape, self.y_test.shape))\r\n\r\n\r\n def save_data(self, address, split_part_number=6): # saving data in multiple parts\r\n print(\"Saving...\")\r\n train_data_length = int(len(self.y_train))\r\n\r\n train_split_file_length = int(train_data_length / split_part_number) \r\n\r\n counter = 0\r\n for i in range(1, split_part_number + 1):\r\n print(\"Working on part {}.\".format(i))\r\n\r\n if i == split_part_number: # If processing last part\r\n x_train = self.x_train[train_split_file_length * counter:]\r\n y_train = self.y_train[train_split_file_length * counter:]\r\n\r\n else:\r\n x_train = self.x_train[train_split_file_length * counter: train_split_file_length * (counter + 1)]\r\n y_train = self.y_train[train_split_file_length * counter: train_split_file_length * (counter + 1)]\r\n\r\n print(\"File number {} length =\\t{}\\n\".format(i, len(y_train)))\r\n np.save(address.format(\"X-Train\", i), x_train)\r\n np.save(address.format(\"Y-Train\", i), y_train)\r\n\r\n counter += 1\r\n\r\n np.save(address.format(\"X-Test\", 0), self.x_test)\r\n np.save(address.format(\"Y-Test\", 0), self.y_test)\r\n print(type(self.y_test), self.y_test)\r\n print(\"Done Saving!\")\r\n\r\n\r\npreprocessing = Preprocessing(TestSplitSize, InputAddress, 115)\r\n\r\n\r\ndef main():\r\n input_part_numbers = int(input(\"How Many Parts To Load >>> \"))\r\n output_part_numbers = int(input(\"Split file to how many parts >>> \"))\r\n\r\n preprocessing.load_data(input_part_numbers)\r\n print(\"Normalizing...\")\r\n preprocessing.norm_targets()\r\n print(\"Length Normalizing\")\r\n preprocessing.norm_length()\r\n print(\"Split Data\")\r\n preprocessing.split_data()\r\n print(\"Saving Data\")\r\n preprocessing.save_data(OutputAddress, output_part_numbers)\r\n\r\n print(\"Done!\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n# END", "id": "749772", "language": "Python", "matching_score": 3.0601983070373535, "max_stars_count": 2, "path": "version2/Preprocess Data/NormilizeData.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom collections import Counter\r\nfrom random import shuffle\r\nimport cv2\r\n\r\nFileCount = int(input('Count>>> '))\r\nName = r\"E:\\NFS Train Files\\Data-Part{}.npy\"\r\nNoMatchCount = 0\r\n\r\nTrainData = None\r\n\r\nLefts = []\r\nRights = []\r\nForwards = []\r\n\"\"\"\r\nReverses = []\r\nFL = [] # Forward Left\r\nFR = [] # Forward Right\r\nRL = [] # Reverse Left\r\nRR = [] # Reverse Right\r\nStops = [] # No key is pressed\r\n\"\"\"\r\n# ====================================================\r\nfor i in range(1, FileCount+1):\r\n print(\"Working on file number{}\".format(i))\r\n TrainData = np.load(Name.format(i), allow_pickle=True)\r\n shuffle(TrainData)\r\n\r\n for data in TrainData:\r\n Image = cv2.cvtColor(data[0], cv2.COLOR_BGR2GRAY)\r\n Image = Image/255\r\n Choice = data[1]\r\n\r\n if Choice == [0, 1, 0]:\r\n Forwards.append([Image, 1])\r\n\r\n elif Choice == [1, 0, 0]:\r\n Lefts.append([Image, 2])\r\n\r\n elif Choice == [0, 0, 1]:\r\n Rights.append([Image, 3])\r\n\r\n else:\r\n NoMatchCount += 1\r\n \"\"\"\r\n elif Choice == [0, 0, 0, 1]:\r\n Reverses.append([Image, 4])\r\n \r\n elif Choice == [1, 1, 0, 0]:\r\n FL.append([Image, 5])\r\n \r\n elif Choice == [0, 1, 1, 0]:\r\n FR.append([Image, 6])\r\n \r\n elif Choice == [1, 0, 0, 1]:\r\n RL.append([Image, 7])\r\n \r\n elif Choice == [0, 0, 1, 1]:\r\n RR.append([Image, 8])\r\n \r\n elif Choice == [0, 0, 0, 0]:\r\n Stops.append([Image, 9])\r\n \"\"\"\r\n print(\"File number {} closed.\".format(i))\r\n\r\n\r\nForwards = Forwards[:len(Lefts)][:len(Rights)]\r\nLefts = Lefts[:len(Forwards)]\r\nRights = Rights[:len(Lefts)]\r\n\"\"\"\r\nReverses = Reverses[:len(Rights)]\r\nFL = Reverses[:len(Reverses)]\r\nFR = Reverses[:len(FL)]\r\nRL = Reverses[:len(FR)]\r\nRR = Reverses[:len(RL)]\r\nStops = Reverses[:len(RR)]\r\n\"\"\"\r\nFinalData = Forwards + Lefts + Rights\r\nshuffle(FinalData)\r\n\r\n# =================================================\r\nnp.save('Final-Data-Merged.npy', FinalData)\r\n\r\nprint('Process completed! \\n Find no Matches for {} images.\\n Fianl Data Len = {}'.format(NoMatchCount, len(FinalData)))\r\n\r\n# END\r\n", "id": "9725026", "language": "Python", "matching_score": 1.3680113554000854, "max_stars_count": 2, "path": "Version1/Preprocessing/Normilize Data.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport cv2\r\nfrom tqdm import tqdm\r\nfrom keras.utils import to_categorical\r\nnp.set_printoptions(threshold=sys.maxsize)\r\n\r\nImages_Dir = r\"Data/{}.jpg\"\r\nImage_Count = 11\r\n\r\ndef main():\r\n\timages = []\r\n\tlabels = []\r\n\r\n\tfor i in tqdm(range(0, Image_Count)):\r\n\t\timage = cv2.imread(Images_Dir.format(i), 0)\r\n\r\n\t\tif i == 0:\r\n\t\t\timage = cv2.resize(image, (45, 40))\r\n\t\t\tprint(np.shape(image))\r\n\t\t\tcontinue\r\n\r\n\t\timage = image[895: 940, 1125: 1165] \r\n\t\timage = cv2.resize(image, (45, 40))\r\n\t\tprint(np.shape(image))\r\n\r\n\t\tcv2.imshow(f\"image{i}.jpg\", image)\r\n\t\tcv2.waitKey()\r\n\r\n\r\n\t\timage = np.array(image) / 255\r\n\t\timages.append(image)\r\n\t\tlabel = int(input(f\"Enter image {i} label >>> \")) # Manually labeling data\r\n\t\tlabels.append(label)\r\n\r\n\timages, labels = images * 100, labels * 100 # Duplicating images\r\n\timages = np.array(images)\r\n\tlabels = to_categorical(np.array(labels))\r\n\tprint(f\"Shape:\\tX: {images.shape} Y: {labels.shape}\\n\")\r\n\r\n\tprint(\"Saving..\")\r\n\tnp.save(\"Data/X.npy\", images) # Save data\r\n\tnp.save(\"Data/Y.npy\", labels)\r\n\tprint(\"Done\")\r\n\r\n\treturn 0\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()\r\n\t_ = input(\"Press any key to exit >>> \")\r\n\r\n# END", "id": "6799384", "language": "Python", "matching_score": 1.2030982971191406, "max_stars_count": 2, "path": "version2/Speed-Controller-version 2/PreprocessData.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport time\r\nimport GrabScreen\r\nimport GrabKeys\r\nimport cv2\r\n\r\nSaveAddress = r\"D:/ML/NFS Train files/Version 3/\"\r\nFileName = \"TrainFiles-part {}.npy\"\r\n\r\nImageHeight = 1041\r\nImageWeight = 1279\r\n\r\nCuntDownDelay = 0.5\r\n\r\nData = []\r\n\r\nPartNumber = int(input(\"Enter part number>>> \"))\r\n\r\n\r\ndef keys_to_output(keys): \r\n output = [0, 0, 0, 0]\r\n\r\n if 'a' in keys:\r\n output[0] = 1\r\n\r\n elif 'd' in keys:\r\n output[2] = 1\r\n\r\n else:\r\n output[1] = 1\r\n\r\n if 's' in keys:\r\n output[3] = 1\r\n\r\n return output\r\n\r\n\r\ndef count_down(delay):\r\n for a in list(range(1, 11))[:: -1]:\r\n print(a)\r\n time.sleep(delay)\r\n\r\n if a == 1:\r\n print(\"Starting...\")\r\n\r\n\r\ncount_down(CuntDownDelay)\r\n\r\nwhile True:\r\n StartTime = time.time()\r\n\r\n frame = GrabScreen.get()\r\n frame = frame[30:ImageHeight, 1:ImageWeight]\r\n frame = cv2.resize(frame, (299, 299))\r\n \r\n PressedKeys = GrabKeys.get() # cropping the edges\r\n EncodedKeys = keys_to_output(PressedKeys)\r\n\r\n Data.append([frame, EncodedKeys])\r\n\r\n if 'y' in PressedKeys: # stopping for a few seconds\r\n for i in list(range(1, 9))[::-1]:\r\n print(i)\r\n time.sleep(1)\r\n\r\n if 't' in PressedKeys: # Save & exit\r\n print(\"Len = {}\\t saving...\".format(len(Data)))\r\n np.save(SaveAddress + FileName.format(PartNumber), Data)\r\n break\r\n\r\n StopTime = time.time()\r\n print(\"Process took {} seconds.\".format(StopTime - StartTime)) \r\n\r\n# END\r\n", "id": "9417611", "language": "Python", "matching_score": 3.0754811763763428, "max_stars_count": 2, "path": "version2/Collect Data/CollectData.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport KeyboardControl\r\nimport GetFrame\r\nimport keyboard\r\nimport numpy as np\r\nimport os\r\nimport time\r\nimport cv2\r\n\r\nFileName = r\"Speed-Data.npy\"\r\nTrainingData = None\r\n\r\n\r\ndef keys_to_output(pressed_keys):\r\n output = [0, 0, 0] # A, W, D,\r\n\r\n if 'A' in pressed_keys:\r\n output[0] = 1\r\n\r\n elif 'D' in pressed_keys:\r\n output[2] = 1\r\n\r\n else:\r\n output[1] = 1\r\n\r\n return output\r\n\r\n\r\ndef main():\r\n for i in list(range(10))[::-1]:\r\n print(i)\r\n time.sleep(1)\r\n\r\n if os.path.isfile(FileName):\r\n print(\"File Exist, loading data\")\r\n TrainingData = list(np.load(FileName, allow_pickle=True))\r\n\r\n else:\r\n print(\"File doesn't exist, making file...\")\r\n TrainingData = []\r\n\r\n last_time = time.time()\r\n\r\n while True:\r\n if keyboard.is_pressed('t'):\r\n screen = GetFrame.get()\r\n screen = cv2.resize(screen, (224, 224))\r\n keys = keys_to_output(KeyboardControl.key_check())\r\n TrainingData.append([screen])\r\n\r\n print(\"Getting data took {} seconds.\".format(time.time() - last_time))\r\n last_time = time.time()\r\n\r\n if keyboard.is_pressed('r'):\r\n print(\"Saving!\")\r\n print(len(TrainingData))\r\n np.save(FileName, TrainingData)\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n\r\n except :\r\n print(\"Saving!\")\r\n print(len(TrainingData))\r\n np.save(FileName, TrainingData)\r\n\r\n# END", "id": "4062236", "language": "Python", "matching_score": 2.501918315887451, "max_stars_count": 2, "path": "Version1/Speed Controller Version 1/GetTrainData.py" }, { "content": "\"\"\"\r\n* By M.K\r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport GetFrame\r\nimport time\r\nimport keyboard\r\nimport os\r\n\r\nCounter = 0\r\n\r\ndef main():\r\n global Counter\r\n if not keyboard.is_pressed(\"t\"):\r\n return\r\n\r\n start_time = time.time()\r\n frame = GetFrame.get()\r\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\r\n cv2.imwrite(f\"{os.getcwd()}/Data/{Counter}.jpg\", frame) # saving frame \r\n Counter += 1\r\n print(f\"Process took {str(time.time() - start_time)[: 4]} seconds!\")\r\n time.sleep(1)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n for i in list(range(0, 10))[: : -1]:\r\n print(i)\r\n time.sleep(0.5)\r\n # ==========\r\n\r\n print(\"Press 't' to take screenshot.\")\r\n\r\n while True:\r\n main()\r\n\r\n if keyboard.is_pressed(\"e\"):\r\n _ = input(\"Press any key to exit >>> \")\r\n break\r\n# END", "id": "7405835", "language": "Python", "matching_score": 1.294660210609436, "max_stars_count": 2, "path": "version2/Speed-Controller-version 2/CollectData.py" }, { "content": "\"\"\"\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nfrom time import sleep\r\n\r\nData = np.load(\"Speed-Data.npy\", allow_pickle=True)\r\nImages = []\r\n\r\n\r\ncounter = 0\r\nFrameNumber = 0\r\nfor i in range(0, 100):\r\n for i in Data:\r\n if not(counter == 3 or counter == 7):\r\n Classes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n Classes[FrameNumber] = 1\r\n\r\n frame = cv2.cvtColor(i[0], cv2.COLOR_BGR2GRAY)\r\n frame = frame[204:218, 196:204] # Cropping image\r\n frame = frame / 255\r\n\r\n Images.append([frame, FrameNumber])\r\n FrameNumber += 1\r\n\r\n counter += 1\r\n counter = 0\r\n FrameNumber = 0\r\n\r\nImages = np.array(Images)\r\nprint(Images)\r\nnp.save(\"Speed-Data-Normilized.npy\", Images)\r\n\r\n# END", "id": "3161066", "language": "Python", "matching_score": 1.6613894701004028, "max_stars_count": 2, "path": "Version1/Speed Controller Version 1/Normailize Data.py" }, { "content": "\"\"\"\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom time import sleep\r\n\r\nprint(\"Loading Data...\")\r\nDatas = np.load(r\"D:\\ML\\NFS Train files\\Final-Data-Merged-Encoded.npy\", allow_pickle=True)\r\n\r\nfor i in list(range(5))[::-1]:\r\n print(i)\r\n sleep(0.8)\r\n\r\n\r\nCounter = 0\r\n#print(Datas[\"images\"][0])\r\nwhile Counter <= len(Datas):\r\n Counter += 1\r\n cv2.imshow(\"DataSet\", Datas[Counter][0])\r\n print(Datas[Counter][1])\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n\r\n", "id": "9886288", "language": "Python", "matching_score": 2.5083889961242676, "max_stars_count": 2, "path": "Version1/Preprocessing/TestData.py" }, { "content": "\"\"\"\r\n* By M.K \r\n* Licensed under the MIT License.\r\n* https://github.com/MI-K253\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nDir = r\"Y-Test- Normalized-Data.npy\" # Data directory\r\nLoopMode = int(input(\"Loop Mode? 1/0 >>>\\t\")) \r\n\r\nprint(\"Loading Data\")\r\nData = np.load(Dir, allow_pickle=True) # Loading Data\r\nprint(Data)\r\n\r\nfor row in Data:\r\n print(row[1])\r\n image = cv2.cvtColor(row, cv2.COLOR_RGB2BGR)\r\n cv2.imshow('FinalData', image)\r\n\r\n if LoopMode:\r\n if cv2.waitKey(26) & 0xFF == ord('q'):\r\n break\r\n\r\n else:\r\n cv2.waitKey() # waiting until user press a key\r\n", "id": "10128102", "language": "Python", "matching_score": 2.0785319805145264, "max_stars_count": 2, "path": "version2/Preprocess Data/CheckData.py" }, { "content": "\"\"\"\r\n* https://github.com/MI-K253/Python-Plays-Game\r\n\"\"\"\r\nimport zstandard\r\nimport pathlib\r\nimport shutil\r\nimport numpy as np\r\n\r\npath = r\"C:\\Users\\Miaad\\PycharmProjects\\Python plays nfs\\Version2\\Test data hot encoded.npy.zst\"\r\nout = r\"C:\\Users\\Miaad\\PycharmProjects\\Python plays nfs\\Version2\\a.npy\"\r\n\r\n\r\ndef decompress_zstandard_to_folder():\r\n with open(path, 'rb') as compressed:\r\n decomp = zstandard.ZstdDecompressor()\r\n\r\n with open(out, 'wb') as destination:\r\n decomp.copy_stream(compressed, destination)\r\n\r\ndecompress_zstandard_to_folder()\r\n\r\nfile = np.load(out, allow_pickle=True)\r\nprint(file)\r\n\r\n# END", "id": "2299576", "language": "Python", "matching_score": 0.8236499428749084, "max_stars_count": 2, "path": "Version1/Preprocessing/Zst files decompressor.py" } ]
1.884547
parkseobin
[ { "content": "import os\nimport tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nimport time\nimport threading\n\n\n\nclass Dataset(object):\n def __init__(self, args):\n self.train_directory = args.train_directory\n self.validation_directory = args.validation_directory\n self.batch_size = args.batch_size\n self.train_list = os.listdir(self.train_directory)\n self.test_list = os.listdir(self.validation_directory)\n assert len(self.train_list) > 0 and len(self.test_list) > 0, 'Empty dataset'\n self.test_i = 0\n self.patch_size = args.patch_size # HR patch size\n self.queue_size = 256\n self.make_queue()\n\n\n def make_queue(self):\n image_shape_hr = (self.patch_size, self.patch_size, 3)\n image_shape_lr = (self.patch_size//2, self.patch_size//2, 3)\n image_shape_lr_ = (self.patch_size//4, self.patch_size//4, 3)\n\n self.maml_img_lr = tf.placeholder(tf.float32, (None,) + image_shape_lr)\n self.maml_img_bicubic = tf.placeholder(tf.float32, (None,) + image_shape_hr)\n self.maml_img_hr = tf.placeholder(tf.float32, (None,) + image_shape_hr)\n self.img_lr = tf.placeholder(tf.float32, (None,) + image_shape_lr_)\n self.img_bicubic = tf.placeholder(tf.float32, (None,) + image_shape_lr)\n self.img_hr = tf.placeholder(tf.float32, (None,) + image_shape_lr)\n # Dequeues element in random order\n queue = tf.RandomShuffleQueue(self.queue_size, self.batch_size, \n dtypes=(tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32), \n shapes=(image_shape_lr_, image_shape_lr, image_shape_lr,\n image_shape_lr, image_shape_hr, image_shape_hr))\n\n self.enqueue_many = queue.enqueue_many([self.img_lr, self.img_bicubic, self.img_hr,\n self.maml_img_lr, self.maml_img_bicubic, self.maml_img_hr])\n self.dequeue_many = queue.dequeue_many(self.batch_size)\n\n\n def start_enqueue_daemon(self, sess):\n def enqueue_thread(sess):\n while(True):\n img_lr, img_bicubic, img_hr, maml_img_lr, maml_img_bicubic, maml_img_hr \\\n = self.next(test=False)\n sess.run([self.enqueue_many], feed_dict={\n self.img_lr: img_lr, \n self.img_bicubic: img_bicubic,\n self.img_hr: img_hr,\n self.maml_img_lr: maml_img_lr, \n self.maml_img_bicubic: maml_img_bicubic,\n self.maml_img_hr: maml_img_hr\n })\n time.sleep(0.02)\n\n thread_number = 1\n threads = []\n for i in range(thread_number):\n t = threading.Thread(target=enqueue_thread, args=(sess,), daemon=True)\n t.start()\n threads.append(t)\n\n return threads\n\n\n def augmentation(self, input_img):\n '''\n input_img: Pillow Image object\n returns: Pillow Image object\n '''\n aug_methods = [\n Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,\n Image.ROTATE_90, Image.ROTATE_180, \n Image.ROTATE_270, Image.TRANSPOSE,\n Image.TRANSVERSE\n ]\n\n if(np.random.randint(len(aug_methods) + 1) == 0):\n return input_img\n else:\n return input_img.transpose(np.random.choice(aug_methods))\n\n\n def choose_random_image(self, test):\n if(test):\n random_img = os.path.join(self.validation_directory, self.test_list[self.test_i])\n self.test_i += 1\n if(self.test_i >= len(self.test_list)):\n self.test_i = 0\n else:\n random_img = os.path.join(self.train_directory, np.random.choice(self.train_list))\n \n random_img = Image.open(random_img).convert('RGB')\n random_img = random_img.crop((0, 0, \n random_img.size[0] - random_img.size[0]%8, random_img.size[1] - random_img.size[1]%8\n ))\n\n return random_img\n\n\n def next(self, test):\n if(test):\n maml_hr_img = self.choose_random_image(test)\n else:\n #maml_hr_img = self.global_img \n maml_hr_img = self.choose_random_image(test)\n\n # patch size on HR image\n if(maml_hr_img.size[1] <= self.patch_size or maml_hr_img.size[0] <= self.patch_size):\n return self.next(test)\n\n left = np.random.randint(maml_hr_img.size[0] - self.patch_size)\n upper = np.random.randint(maml_hr_img.size[1] - self.patch_size)\n maml_hr_img = maml_hr_img.crop((left, upper, left + self.patch_size, upper + self.patch_size))\n\n lev2 = maml_hr_img.size\n lev1 = (lev2[0]//2, lev2[1]//2)\n lev0 = (lev1[0]//2, lev1[1]//2)\n\n maml_lr_img = maml_hr_img.resize(lev1, resample=Image.BICUBIC)\n maml_bicubic_img = maml_lr_img.resize(lev2, resample=Image.BICUBIC)\n\n hr_img = maml_lr_img.copy()\n lr_img = hr_img.resize(lev0, resample=Image.BICUBIC)\n bicubic_img = lr_img.resize(lev1, resample=Image.BICUBIC)\n\n maml_hr_img = np.array(maml_hr_img, dtype=np.float32, ndmin=4)\n maml_lr_img = np.array(maml_lr_img, dtype=np.float32, ndmin=4)\n maml_bicubic_img = np.array(maml_bicubic_img, dtype=np.float32, ndmin=4)\n hr_img = np.array(hr_img, dtype=np.float32, ndmin=4)\n lr_img = np.array(lr_img, dtype=np.float32, ndmin=4)\n bicubic_img = np.array(bicubic_img, dtype=np.float32, ndmin=4)\n\n return lr_img, bicubic_img, hr_img, maml_lr_img, maml_bicubic_img, maml_hr_img", "id": "6673220", "language": "Python", "matching_score": 2.024412155151367, "max_stars_count": 52, "path": "dataset.py" }, { "content": "import pickle\nimport os\nimport numpy as np\nimport imageio\ntry:\n from sklearn.cross_validation import train_test_split\nexcept ModuleNotFoundError:\n from sklearn.model_selection import train_test_split\nfrom mpi_utils import mpi_size, mpi_rank\nfrom janky_stuff import JankySubsampler\n\n\nmpisize = mpi_size()\nmpirank = mpi_rank()\n\n\ndef get_dataset(name):\n return {\n 'cifar10': Cifar10,\n 'imagenet64': Imagenet64,\n 'imagenet32': Imagenet32,\n }[name]\n\n\ndef tile_images(images, d1=4, d2=4, border=1):\n id1, id2, c = images[0].shape\n out = np.ones([d1 * id1 + border * (d1 + 1),\n d2 * id2 + border * (d2 + 1),\n c], dtype=np.uint8)\n out *= 255\n if len(images) != d1 * d2:\n raise ValueError('Wrong num of images')\n for imgnum, im in enumerate(images):\n num_d1 = imgnum // d2\n num_d2 = imgnum % d2\n start_d1 = num_d1 * id1 + border * (num_d1 + 1)\n start_d2 = num_d2 * id2 + border * (num_d2 + 1)\n out[start_d1:start_d1 + id1, start_d2:start_d2 + id2, :] = im\n return out\n\n\ndef iter_data_mpi(*args, n_batch, log, shuffle=False, iters=None, seed=None, split_by_rank=True):\n 'Take the tensors in *args and iterate through them across mpi ranks if split_by_rank, otherwise iter normally'\n if not args:\n raise ValueError\n size = args[0].shape[0]\n for idx in range(1, len(args)):\n if args[idx].shape[0] != size:\n raise ValueError(f'mismatch in arg {idx}, shape {args[idx].shape[0]} vs {size}')\n\n if seed:\n np.random.seed(seed)\n\n if shuffle:\n idxs = np.random.permutation(np.arange(size))\n else:\n idxs = np.arange(size)\n\n ms = mpisize\n mr = mpirank\n if not split_by_rank:\n ms = 1\n mr = 0\n\n # Truncate the data if it does not divide evenly\n sequences_per_batch = ms * n_batch\n length = (idxs.size // sequences_per_batch) * sequences_per_batch\n if length != idxs.size:\n log('Truncating {}/{} sequences'.format(idxs.size - length, idxs.size))\n idxs = idxs[:length]\n # Reshape starting indices to K*mpi_size*n_batch\n idxs = idxs.reshape([-1, ms, n_batch])\n log(f'Number of minibatches in this dataset: {len(idxs)}')\n for mb_idx in range(len(idxs)):\n indices = idxs[mb_idx, mr]\n vals = [t[indices] for t in args]\n yield vals\n if iters and mb_idx > iters:\n break\n\n\nclass ImageDataset(object):\n 'Non-jpeg images'\n\n def decode(self, samples, logname):\n H = self.H\n out_samples = self.samples_to_image(samples)\n n_examples = out_samples.shape[0]\n d2 = H.sample_grid_dim\n if d2 > n_examples:\n d2 = n_examples\n d1 = n_examples // d2\n tiled_image = tile_images(out_samples, d1=d1, d2=d2)\n imname = f'{H.desc}-samples-{logname}.png'\n out_path = os.path.join(H.model_dir, imname)\n imageio.imwrite(out_path, tiled_image)\n self.logprint(f'Saved samples in file {out_path}')\n\n def initialize_image_embedding(self):\n w, h, c = self.embedding_sizes\n embedding = []\n for i in range(w):\n for j in range(h):\n for k in range(c):\n embedding.append([i, j, k])\n self.x_emb = np.array(embedding).T.reshape([1, 3, self.ctx])\n\n def samples_to_image(self, samples):\n return samples.reshape(self.orig_shape)\n\n\nclass JankySubsampledDataset(ImageDataset):\n def __init__(self, datasets, pmf, seed=42):\n assert len(pmf) == len(datasets)\n if seed is None:\n raise ValueError(\"seed can't be None\")\n self.datasets = datasets\n self.pmf = pmf\n # Some basic sanity-checks.\n attrs = (\n \"orig_shape\",\n \"shape\",\n \"ctx\",\n \"num_embeddings\",\n \"embedding_sizes\",\n \"n_vocab\",\n \"x_emb\",\n )\n for attr in attrs:\n assert hasattr(self.ref, attr), f\"{attr} is missing in the main dataset.\"\n ref_attr = getattr(self.ref, attr)\n setattr(self, attr, ref_attr)\n for oth in self.oth:\n assert hasattr(oth, attr), f\"{attr} is missing in the auxiliary dataset\"\n oth_attr = getattr(oth, attr)\n assert type(ref_attr) == type(oth_attr)\n if isinstance(ref_attr, np.ndarray):\n assert (ref_attr == oth_attr).all(), f\"expected {attr} to be the same.\"\n else:\n assert ref_attr == oth_attr, f\"expected {attr} to be the same.\"\n # Perform model selection and evaluation using the main dataset.\n attrs = (\n \"H\",\n \"logprint\",\n \"vaX\",\n \"vaY\",\n \"teX\",\n \"teY\",\n \"n_classes\",\n \"full_dataset_valid\",\n \"full_dataset_train\",\n \"iters_per_epoch\",\n )\n for attr in attrs:\n setattr(self, attr, getattr(self.ref, attr, None))\n trX = [ds.trX for ds in datasets]\n auxX = [np.zeros_like(tr[:, 0:1]) + idx for idx, tr in enumerate(trX)]\n self.trX = JankySubsampler(trX, pmf, seed=seed)\n self.auxX = JankySubsampler(auxX, pmf, seed=seed)\n\n @property\n def ref(self):\n return self.datasets[0]\n\n @property\n def oth(self):\n return self.datasets[1:]\n\n\nclass Imagenet64(ImageDataset):\n '''To download, if your data dir is /root/data:\n\n mkdir -p /root/data\n cd /root/data\n wget https://openaipublic.blob.core.windows.net/distribution-augmentation-assets/imagenet64-train.npy\n wget https://openaipublic.blob.core.windows.net/distribution-augmentation-assets/imagenet64-valid.npy\n '''\n\n def __init__(self, H, logprint):\n self.logprint = logprint\n self.H = H\n # Whether the full dataset is loaded on each rank, or just its own partition\n self.full_dataset_train = True\n self.full_dataset_valid = True\n n_train = 1231149\n self.n_batch = H.n_batch\n self.orig_shape = [-1, 64, 64, 3]\n self.orig_pixels = 64 * 64 * 3\n self.num_embeddings = 3\n self.n_vocab = 256\n self.embedding_sizes = [64, 64, 3]\n self.iters_per_epoch = n_train // (mpisize * self.n_batch)\n tr = np.load('/root/data/imagenet64-train.npy', mmap_mode='r').reshape([-1, 12288])\n self.trX = tr[:n_train]\n self.trY = None\n self.vaY = None\n self.teY = None\n self.vaX = tr[n_train:]\n self.n_classes = None\n self.teX = np.load('/root/data/imagenet64-valid.npy', mmap_mode='r').reshape([-1, 12288])\n self.n_vocab = 256\n self.ctx = 12288\n self.shape = [-1, self.ctx]\n assert self.ctx == H.n_ctx, f'n_ctx should be {self.ctx}'\n self.initialize_image_embedding()\n\n\nclass Imagenet32(Imagenet64):\n '''To download, if your data dir is /root/data:\n mkdir -p /root/data\n cd /root/data\n wget https://openaipublic.blob.core.windows.net/distribution-augmentation-assets/imagenet32-train.npy\n wget https://openaipublic.blob.core.windows.net/distribution-augmentation-assets/imagenet32-valid.npy\n '''\n\n def __init__(self, H, logprint):\n self.logprint = logprint\n self.H = H\n # 1281167 << dataset has this many examples\n # We will use 10k examples for dev\n n_train = 1281167 - 10000\n self.full_dataset_train = True\n self.full_dataset_valid = True\n self.n_batch = H.n_batch\n self.orig_shape = [-1, 32, 32, 3]\n self.trY = None\n self.vaY = None\n self.teY = None\n self.n_classes = None\n self.orig_pixels = 32 * 32 * 3\n self.num_embeddings = 3\n self.n_vocab = 256\n self.embedding_sizes = [32, 32, 3]\n self.iters_per_epoch = n_train // (mpisize * self.n_batch)\n # we are dumb and saved imagenet32 in 3x32x32, unlike ImageNet64, which we saved in transposed format, sorry about the inconsistency\n tr = np.load('/root/data/imagenet32-train.npy').reshape([-1, 3, 32, 32]).transpose(\n [0, 2, 3, 1]).reshape([-1, 3072])\n self.trX = tr[:n_train]\n self.vaX = tr[n_train:]\n self.teX = np.load('/root/data/imagenet32-valid.npy').reshape([-1, 3, 32, 32]).transpose(\n [0, 2, 3, 1]).reshape([-1, 3072])\n self.n_vocab = 256\n self.ctx = 3072\n self.shape = [-1, self.ctx]\n assert self.ctx == H.n_ctx, f'n_ctx should be {self.ctx}'\n self.initialize_image_embedding()\n\n\ndef flatten(outer):\n return [el for inner in outer for el in inner]\n\n\ndef unpickle_cifar10(file):\n fo = open(file, 'rb')\n data = pickle.load(fo, encoding='bytes')\n fo.close()\n data = dict(zip([k.decode() for k in data.keys()], data.values()))\n return data\n\n\ndef cifar10(data_dir, one_hot=True, test_size=None):\n test_size = test_size or 5000\n tr_data = [unpickle_cifar10(os.path.join(data_dir, 'data_batch_%d' % i)) for i in range(1, 6)]\n trX = np.vstack(data['data'] for data in tr_data)\n trY = np.asarray(flatten([data['labels'] for data in tr_data]))\n te_data = unpickle_cifar10(os.path.join(data_dir, 'test_batch'))\n teX = np.asarray(te_data['data'])\n teY = np.asarray(te_data['labels'])\n trX = trX.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1).reshape([-1, 3072])\n teX = teX.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1).reshape([-1, 3072])\n trX, vaX, trY, vaY = train_test_split(trX, trY, test_size=test_size, random_state=11172018)\n if one_hot:\n trY = np.eye(10, dtype=np.float32)[trY]\n vaY = np.eye(10, dtype=np.float32)[vaY]\n teY = np.eye(10, dtype=np.float32)[teY]\n else:\n trY = np.reshape(trY, [-1, 1])\n vaY = np.reshape(vaY, [-1, 1])\n teY = np.reshape(teY, [-1, 1])\n return (trX, trY), (vaX, vaY), (teX, teY)\n\n\nclass Cifar10(ImageDataset):\n def __init__(self, H, logprint):\n self.logprint = logprint\n self.H = H\n self.full_dataset_train = True\n self.full_dataset_valid = True\n # 5k examples for valid\n n_train = 45000\n if H.datapoints:\n n_train = H.datapoints\n self.n_batch = H.n_batch\n self.iters_per_epoch = n_train // (mpisize * self.n_batch)\n self.orig_shape = [-1, 32, 32, 3]\n self.n_classes = 10\n self.orig_pixels = 32 * 32 * 3\n self.num_embeddings = 3\n self.n_vocab = 256\n self.embedding_sizes = [32, 32, 3]\n self.n_batch = H.n_batch\n self.iters_per_epoch = n_train // (mpisize * self.n_batch)\n (self.trX, self.trY), (self.vaX, self.vaY), (self.teX, self.teY) = cifar10('/root/data/cifar10/', one_hot=False, test_size=H.test_size)\n if H.datapoints:\n logprint(f'Only using {H.datapoints} examples')\n self.trX = self.trX[:n_train]\n self.trY = self.trY[:n_train]\n self.shape = [-1, 3072]\n self.ctx = 32 * 32 * 3\n assert self.ctx == H.n_ctx, f'n_ctx should be {self.ctx}'\n self.initialize_image_embedding()\n\n def preprocess(self, arr):\n arr = arr.reshape([-1, 3, 32, 32])\n arr = arr.transpose([0, 2, 3, 1])\n return arr.reshape([-1, 3072])\n", "id": "3871799", "language": "Python", "matching_score": 0.44943732023239136, "max_stars_count": 90, "path": "datasets.py" }, { "content": "# Code from https://github.com/Zheng222/IDN-tensorflow/blob/master/model.py\nimport tensorflow as tf\n\ndef IDN(t_image, t_image_bicubic, scale, reuse=False):\n t_image_bicubic = tf.identity(t_image_bicubic)\n with tf.variable_scope(\"IDN\", reuse=reuse):\n conv1 = tf.layers.conv2d(t_image, 64, (3, 3), (1, 1), padding='same', activation=lrelu, name='conv1')\n conv2 = tf.layers.conv2d(conv1, 64, (3, 3), (1, 1), padding='same', activation=lrelu, name='conv2')\n n = conv2\n for i in range(4):\n n = distillation(n, name='distill/%i' % i)\n output = upsample(n, scale=scale,features=64, name=str(scale)) + t_image_bicubic\n return output\n\ndef distillation(x, name=''):\n tmp = tf.layers.conv2d(x, 48, (3, 3), (1, 1), padding='same', activation=lrelu, name=name+'/conv1')\n tmp = GroupConv2d(tmp, act=lrelu, name=name+'/conv2')\n tmp = tf.layers.conv2d(tmp, 64, (3, 3), (1, 1), padding='same', activation=lrelu, name=name+'/conv3')\n tmp1, tmp2 = tf.split(axis=3, num_or_size_splits=[16, 48], value=tmp)\n tmp2 = tf.layers.conv2d(tmp2, 64, (3, 3), (1, 1), padding='same', activation=lrelu, name=name+'/conv4')\n tmp2 = GroupConv2d(tmp2, n_filter=48, act=lrelu, name=name+'/conv5')\n tmp2 = tf.layers.conv2d(tmp2, 80, (3, 3), (1, 1), padding='same', activation=lrelu, name=name+'/conv6')\n output = tf.concat(axis=3, values=[x, tmp1]) + tmp2\n output = tf.layers.conv2d(output, 64, (1, 1), (1, 1), padding='same', activation=lrelu, name=name+'/conv7')\n return output\n\n\ndef lrelu(x, alpha=0.05):\n return tf.maximum(alpha * x, x)\n\n\ndef _phase_shift(I, r):\n return tf.depth_to_space(I, r)\n\n\ndef PS(X, r, color=False):\n if color:\n Xc = tf.split(X, 3, 3) # tf.split(value, num_or_size_splits, axis=0)\n X = tf.concat([_phase_shift(x, r) for x in Xc], 3)\n else:\n X = _phase_shift(X, r)\n return X\n\ndef upsample(x, scale=4, features=32, name=None):\n with tf.variable_scope(name):\n x = tf.layers.conv2d(x, features, 3, padding='same')\n ps_features = 3 * (scale ** 2)\n x = tf.layers.conv2d(x, ps_features, 3, padding='same')\n x = PS(x, scale, color=True)\n return x\n\ndef GroupConv2d(x, n_filter=32, filter_size=(3, 3), strides=(1, 1), n_group=4, act=None, padding='SAME', name=None):\n groupConv = lambda i, k: tf.nn.conv2d(i, k, strides=[1, strides[0], strides[1], 1], padding=padding)\n channels = int(x.get_shape()[-1])\n with tf.variable_scope(name):\n We = tf.get_variable(\n name='W', shape=[filter_size[0], filter_size[1], channels / n_group, n_filter], trainable=True\n )\n\n if n_group == 1:\n outputs = groupConv(x, We)\n else:\n inputGroups = tf.split(axis=3, num_or_size_splits=n_group, value=x)\n weightsGroups = tf.split(axis=3, num_or_size_splits=n_group, value=We)\n convGroups = [groupConv(i, k) for i, k in zip(inputGroups, weightsGroups)]\n\n outputs = tf.concat(axis=3, values=convGroups)\n\n b = tf.get_variable(\n name='b', shape=n_filter, trainable=True\n )\n\n outputs = tf.nn.bias_add(outputs, b, name='bias_add')\n\n if act:\n outputs = lrelu(outputs)\n return outputs", "id": "9221140", "language": "Python", "matching_score": 1.959007740020752, "max_stars_count": 52, "path": "IDN_definition.py" }, { "content": "import tensorflow as tf\nfrom IDN_definition import IDN\n\n\n\nclass IDNModel(object):\n def __init__(self, tensor_input, gt_output, scope_name):\n self.tensor_input = tensor_input\n self.scope_name = scope_name\n self.gt_output = gt_output\n self.model_name = 'IDN'\n\n\n def build_model(self):\n img_lr, img_bicubic = self.tensor_input\n\n tf.get_variable_scope().reuse_variables()\n if(self.scope_name == self.model_name):\n output = IDN(img_lr, img_bicubic, 2)\n else:\n with tf.variable_scope(self.scope_name, reuse=tf.AUTO_REUSE):\n output = IDN(img_lr, img_bicubic, 2)\n\n self.output = output\n self.loss = tf.reduce_mean((output - self.gt_output)**2)\n\n y_vector = [0.25678824, 0.50412941, 0.09790588]\n output_y = output[:, :, :, 0:1] * y_vector[0] + output[:, :, :, 1:2]*y_vector[1] + output[:, :, :, 2:3]*y_vector[2]\n gt_output_y = self.gt_output[:, :, :, 0:1] * y_vector[0] + self.gt_output[:, :, :, 1:2]*y_vector[1] + self.gt_output[:, :, :, 2:3]*y_vector[2]\n self.output = tf.clip_by_value(output, 0, 255)\n output = self.output\n self.psnr = tf.image.psnr(output, self.gt_output, max_val=255)\n self.psnr_y = tf.image.psnr(output_y[:, 2:-2, 2:-2, :], gt_output_y[:, 2:-2, 2:-2, :], max_val=255)\n #self.psnr_y = tf.image.psnr(output_y, gt_output_y, max_val=255)\n self.ssim = tf.image.ssim_multiscale(output, self.gt_output, max_val=255)\n self.ssim_y = tf.image.ssim(output_y, gt_output_y, max_val=255)\n img_bicubic_y = img_bicubic[:, :, :, 0:1] * y_vector[0] + img_bicubic[:, :, :, 1:2]*y_vector[1] + img_bicubic[:, :, :, 2:3]*y_vector[2]\n self.bicubic_psnr = tf.image.psnr(img_bicubic_y, gt_output_y, max_val=255)", "id": "1438629", "language": "Python", "matching_score": 1.9519567489624023, "max_stars_count": 52, "path": "models.py" }, { "content": "import tensorflow as tf\nimport numpy as np\nfrom PIL import Image\nimport os\nimport datetime\n\n\n\nclass SRTrainer(object):\n def __init__(self, dataset, network_model_class, args):\n self.batch_size = args.batch_size\n self.log_step = args.log_step\n self.validation_step = args.validation_step\n self.train_iteration = args.train_iteration\n self.param_restore_path = args.param_restore_path\n self.param_save_path = args.param_save_path\n self.lr_beta = args.lr_beta # beta\n self.lr_alpha = args.lr_alpha #alpha\n self.gradient_number = args.gradient_number\n self.dataset = dataset\n self.network_model_class = network_model_class\n self.build_success = False\n \n\n def set_optimizer(self):\n with tf.variable_scope('optimizer', reuse=tf.AUTO_REUSE):\n # Extract variables\n final_vars = [v for v in tf.global_variables() if v.name.startswith(self.final_network.model_name)]\n update_vars = [v for v in tf.global_variables() if v.name.startswith('update')]\n update_gradients = tf.gradients(self.update_network.loss, update_vars)\n accum_vars = [tf.Variable(tf.zeros_like(value), trainable=False) for value in update_gradients]\n\n # Inner udpate\n self.update_opt = tf.train.GradientDescentOptimizer(self.lr_alpha, name='update_opt')\n #self.update_opt = tf.train.AdamOptimizer(self.lr_alpha, name='update_opt')\n self.init_update_opt = tf.variables_initializer(self.update_opt.variables())\n self.update_opt = self.update_opt.minimize(self.update_network.loss, var_list=update_vars)\n\n # Outer udpate\n self.init_accumulator = [accum_var.assign(tf.zeros_like(accum_var)) for accum_var in accum_vars]\n self.accumulate_grads = [accum_vars[i].assign_add(update_gradients[i] / self.batch_size) for i in range(len(update_gradients))]\n self.fomaml_opt = tf.train.AdamOptimizer(self.lr_beta, name='fomaml_opt').apply_gradients(zip(accum_vars, final_vars))\n\n self.copy_opt = tf.train.GradientDescentOptimizer(self.lr_alpha, name='copy_opt')\n self.init_copy_opt = tf.variables_initializer(self.copy_opt.variables())\n self.copy_opt = self.copy_opt.minimize(self.copied_network.loss)\n \n\n def build(self):\n self.img_lr = tf.placeholder(tf.float32, (None, None, None, 3))\n self.img_bicubic = tf.placeholder(tf.float32, (None, None, None, 3))\n self.img_hr = tf.placeholder(tf.float32, (None, None, None, 3))\n\n self.final_network = self.network_model_class((self.img_lr, self.img_bicubic), self.img_hr, '')\n self.final_network.build_model()\n\n # Make model for update\n self.update_network = self.network_model_class((self.img_lr, self.img_bicubic), self.img_hr, 'update')\n self.update_network.build_model()\n\n # Make model for test\n self.copied_network = self.network_model_class((self.img_lr, self.img_bicubic), self.img_hr, 'copy')\n self.copied_network.build_model()\n\n # Making sync operation\n copy_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'copy')\n target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.final_network.model_name)\n update_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'update')\n copy_ops = []\n update_ops = []\n for i in range(len(copy_vars)):\n copy_ops.append(copy_vars[i].assign(target_vars[i]))\n update_ops.append(update_vars[i].assign(target_vars[i]))\n self.copy_sync_op = tf.group(*copy_ops)\n self.update_sync_op = tf.group(*update_ops)\n\n self.set_optimizer()\n self.build_success = True\n print('>> build complete!')\n\n\n def train_one_step(self, sess, epoch):\n train_img_lr, train_img_bicubic, train_img_hr, eval_img_lr, eval_img_bicubic, eval_img_hr \\\n = sess.run(self.dataset.dequeue_many)\n loss = np.zeros((self.batch_size,), dtype=np.float32)\n psnr = np.zeros((self.batch_size,), dtype=np.float32)\n for i in range(self.batch_size):\n #1 copy latest parameter\n sess.run([self.update_sync_op, self.init_update_opt])\n\n #2 update parameter for given iteration number with train data\n update_iteration = self.gradient_number\n for j in range(update_iteration):\n sess.run(self.update_opt, feed_dict={\n self.img_lr: [train_img_lr[i]],\n self.img_bicubic: [train_img_bicubic[i]], \n self.img_hr: [train_img_hr[i]]\n })\n\n _, loss[i], psnr[i] = sess.run([\n self.accumulate_grads, self.update_network.loss, self.update_network.psnr_y\n ], feed_dict={\n self.img_lr: [eval_img_lr[i]],\n self.img_bicubic: [eval_img_bicubic[i]],\n self.img_hr: [eval_img_hr[i]]\n })\n\n #3 update the parameter with accumulated gradients\n sess.run(self.fomaml_opt)\n sess.run(self.init_accumulator)\n\n return loss.mean(), psnr.mean()\n\n\n def validation(self, sess):\n test_size = len(self.dataset.test_list)\n updated_psnr = np.zeros((test_size,))\n base_psnr = np.zeros((test_size,))\n updated_ssim = np.zeros((test_size,))\n base_ssim = np.zeros((test_size,))\n bicubic_psnr = np.zeros((test_size,))\n for i in range(test_size):\n img_lr, img_bicubic, img_hr, maml_img_lr, maml_img_bicubic, maml_img_hr \\\n = self.dataset.next(test=True)\n sess.run([self.copy_sync_op, self.init_copy_opt])\n\n base_psnr[i], bicubic_psnr[i], base_ssim[i] = sess.run([\n self.copied_network.psnr_y, self.copied_network.bicubic_psnr,\n self.copied_network.ssim_y\n ], feed_dict={\n self.img_lr: maml_img_lr, \n self.img_bicubic: maml_img_bicubic, \n self.img_hr: maml_img_hr\n })\n\n for _ in range(self.gradient_number):\n sess.run([self.copy_opt], feed_dict={\n self.img_lr: img_lr, \n self.img_bicubic: img_bicubic,\n self.img_hr: img_hr,\n })\n\n updated_psnr[i], updated_ssim[i] = sess.run([\n self.copied_network.psnr_y, self.copied_network.ssim_y\n ], feed_dict={\n self.img_lr: maml_img_lr, \n self.img_bicubic: maml_img_bicubic, \n self.img_hr: maml_img_hr\n })\n\n return updated_psnr.mean(), base_psnr.mean(), bicubic_psnr.mean(), updated_ssim.mean(), base_ssim.mean()\n \n\n def train(self):\n assert self.param_save_path is not None, 'param_save_path is None'\n if(not os.path.exists(self.param_save_path)):\n os.makedirs(self.param_save_path)\n self.build()\n detector_saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.final_network.model_name))\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n self.dataset.start_enqueue_daemon(sess)\n if(self.param_restore_path != None):\n restore_path = os.path.join(self.param_restore_path, 'model.ckpt')\n detector_saver.restore(sess, restore_path)\n print('>> restored parameter from {}'.format(restore_path), flush=True)\n print('\\n[*] Start training MLSR\\n\\n')\n\n loss_log, psnr_log, best_psnr_test = 0, 0, 0\n for i in range(1, self.train_iteration+1):\n train_loss, train_psnr = self.train_one_step(sess, i)\n loss_log += train_loss\n psnr_log += train_psnr\n if(i % self.log_step == 0):\n loss_log /= self.log_step\n psnr_log /= self.log_step\n now = datetime.datetime.now()\n print(\"[{}]\".format(now.strftime('%Y-%m-%d %H:%M:%S')), flush=True)\n print(\"Step: [{}/{}]\\t Loss: {:.6f}\\tPSNR: {:.6f}\\n\".format(i, self.train_iteration, loss_log, train_psnr), flush=True)\n loss_log, psnr_log = 0, 0\n\n updated_psnr = None\n base_psnr = None\n if(i % self.validation_step == 0):\n updated_psnr, base_psnr, bicubic_psnr, updated_ssim, base_ssim = self.validation(sess)\n print(\">> Test PSNR: (base: {}), (bicubic: {}), (updated: {})\\n\".format(\n base_psnr, bicubic_psnr, updated_psnr\n ), flush=True)\n print(\">> Test SSIM: (base: {}), (updated: {})\\n\\n\".format(\n base_ssim, updated_ssim\n ), flush=True)\n\n if(updated_psnr > best_psnr_test):\n best_psnr_test = updated_psnr\n detector_saver.save(sess, os.path.join(self.param_save_path, 'model.ckpt'))\n detector_saver.save(sess, os.path.join(self.param_save_path, 'last.ckpt'))", "id": "11061555", "language": "Python", "matching_score": 3.007500648498535, "max_stars_count": 52, "path": "train.py" }, { "content": "import os\nimport tensorflow as tf\nimport argparse\nfrom train import SRTrainer\nfrom models import IDNModel\nfrom dataset import Dataset\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Training MLSR')\n parser.add_argument('--lr-beta', type=float, default=1e-6)\n parser.add_argument('--lr-alpha', type=float, default=1e-5)\n parser.add_argument('--batch-size', type=int, default=16)\n parser.add_argument('--patch-size', type=int, default=512)\n parser.add_argument('--gradient-number', type=int, default=5)\n parser.add_argument('--log-step', type=int, default=50)\n parser.add_argument('--train-iteration', type=int, default=10000)\n parser.add_argument('--validation-step', type=int, default=500)\n parser.add_argument('--gpu', type=str, default='0')\n parser.add_argument('--validation-directory', type=str, default='Urban100/train')\n parser.add_argument('--train-directory', type=str, default='Urban100/validation')\n parser.add_argument('--param-restore-path', type=str, default='checkpoint_x2')\n parser.add_argument('--param-save-path', type=str, default=None)\n args = parser.parse_args()\n os.environ['CUDA_VISIBLE_DEVICES']= args.gpu\n print('>> using gpu: {}\\n\\n'.format(args.gpu))\n\n dataset = Dataset(args)\n trainer = SRTrainer(dataset, IDNModel, args)\n trainer.train()", "id": "2718429", "language": "Python", "matching_score": 0.8561933040618896, "max_stars_count": 52, "path": "main.py" } ]
1.955482
eWarehousing-Solutions
[ { "content": "class ListResourceMixin:\n def list(self, *args, **kwargs):\n return self._api.filter(self.resource, *args, **kwargs)\n\n\nclass DetailResourceMixin:\n def get(self, *args, **kwargs):\n return self._api.get(self.resource, *args, **kwargs)\n\n\nclass CreateResourceMixin:\n def create(self, data, *args, **kwargs):\n return self._api.create(self.resource, data, *args, **kwargs)\n\n\nclass UpdateResourceMixin:\n def update(self, *args, **kwargs):\n return self._api.update(self.resource, *args, **kwargs)\n\n\nclass DeleteResourceMixin:\n def delete(self, *args, **kwargs):\n return self._api.delete(self.resource, *args, **kwargs)\n", "id": "10751973", "language": "Python", "matching_score": 3.3716511726379395, "max_stars_count": 1, "path": "ewhs/mixins.py" }, { "content": "from .mixins import ListResourceMixin, DeleteResourceMixin, DetailResourceMixin, \\\n UpdateResourceMixin, CreateResourceMixin\n\n\nclass Resource(object):\n resource = None\n\n def __init__(self, api):\n self._api = api\n\n\nclass CRUDResource(\n ListResourceMixin,\n DeleteResourceMixin,\n DetailResourceMixin,\n UpdateResourceMixin,\n CreateResourceMixin,\n Resource\n):\n pass\n\n\nclass Shipment(ListResourceMixin, DetailResourceMixin, Resource):\n resource = 'shipments'\n\n\nclass Order(ListResourceMixin, DetailResourceMixin, CreateResourceMixin, UpdateResourceMixin, Resource):\n resource = 'orders'\n\n\nclass Stock(ListResourceMixin, Resource):\n resource = 'stock'\n", "id": "4537335", "language": "Python", "matching_score": 1.1401216983795166, "max_stars_count": 1, "path": "ewhs/resources.py" }, { "content": "def test_list_shipments(authenticated_client, response):\n \"\"\"Retrieve a list of shipments\"\"\"\n response.get(f\"https://api.ewarehousing.com/wms/shipments/\", \"shipment_list\")\n\n shipments = authenticated_client.shipment.list()\n assert isinstance(shipments, list)\n\n assert len(shipments) == 1\n\n assert shipments[0]['id'] == '1701bf71-0b9a-4984-bee5-c9e83b7d2c1d'\n assert shipments[0]['order_reference'] == 'MW_ORD_001'\n assert shipments[0]['labels'][0]['tracking_code'] == 'ewh-pick-up'\n assert shipments[0]['labels'][0]['tracking_url'] == '#'\n assert shipments[0]['labels'][0]['shipping_method']['id'] == '7e808ac8-4167-11e9-92f0-0242ac140006'\n assert shipments[0]['labels'][0]['shipping_method']['name'] == 'eWarehousing afhaal order'\n\n\ndef test_filter_shipments(authenticated_client, response):\n \"\"\"Retrieve a list of shipments\"\"\"\n response.get(\"https://api.ewarehousing.com/wms/shipments/?order_reference=VB_ORDER_001\", \"shipment_list\")\n\n shipments = authenticated_client.shipment.list(params={\n 'order_reference': 'VB_ORDER_001',\n })\n\n assert isinstance(shipments, list)\n\n assert len(shipments) == 1\n\n assert shipments[0]['id'] == '1701bf71-0b9a-4984-bee5-c9e83b7d2c1d'\n assert shipments[0]['order_reference'] == 'MW_ORD_001'\n assert shipments[0]['labels'][0]['tracking_code'] == 'ewh-pick-up'\n assert shipments[0]['labels'][0]['tracking_url'] == '#'\n assert shipments[0]['labels'][0]['shipping_method']['id'] == '7e808ac8-4167-11e9-92f0-0242ac140006'\n assert shipments[0]['labels'][0]['shipping_method']['name'] == 'eWarehousing afhaal order'\n\n\ndef test_get_shipment(authenticated_client, response):\n \"\"\"Retrieve a single shipment by shipment ID.\"\"\"\n response.get(\"https://api.ewarehousing.com/wms/orders/1701bf71-0b9a-4984-bee5-c9e83b7d2c1d/\", \"shipment_single\")\n\n shipment = authenticated_client.order.get('1701bf71-0b9a-4984-bee5-c9e83b7d2c1d')\n assert isinstance(shipment, dict)\n\n assert shipment['id'] == '1701bf71-0b9a-4984-bee5-c9e83b7d2c1d'\n assert shipment['order_reference'] == 'MW_ORD_001'\n assert shipment['labels'][0]['tracking_code'] == 'ewh-pick-up'\n assert shipment['labels'][0]['tracking_url'] == '#'\n assert shipment['labels'][0]['shipping_method']['id'] == '7e808ac8-4167-11e9-92f0-0242ac140006'\n assert shipment['labels'][0]['shipping_method']['name'] == 'eWarehousing afhaal order'\n", "id": "11970656", "language": "Python", "matching_score": 2.9538044929504395, "max_stars_count": 1, "path": "tests/test_shipments.py" }, { "content": "ORDER_ID = \"94dbdb91-87ac-4634-b77d-e126a6206b15\"\n\n\ndef test_get_order(authenticated_client, response):\n \"\"\"Retrieve a single order by order ID.\"\"\"\n response.get(f\"https://api.ewarehousing.com/wms/orders/{ORDER_ID}/\", \"order_single\")\n\n order = authenticated_client.order.get(ORDER_ID)\n assert isinstance(order, dict)\n\n assert order['id'] == ORDER_ID\n assert order['external_reference'] == 'ORD001'\n assert order['address'] == {\n 'street': 'Nijverheidsweg'\n }\n\n\ndef test_list_orders(authenticated_client, response):\n \"\"\"Retrieve a list of orders\"\"\"\n response.get(f\"https://api.ewarehousing.com/wms/orders/\", \"order_list\")\n\n orders = authenticated_client.order.list()\n assert isinstance(orders, list)\n\n assert len(orders) == 2\n\n assert orders[0]['id'] == ORDER_ID\n assert orders[0]['external_reference'] == 'ORD001'\n assert orders[0]['address'] == {\n 'street': 'Nijverheidsweg'\n }\n\n assert orders[1]['external_reference'] == 'ORD002'\n assert orders[1]['address'] == {\n 'street': 'Nijverheidsweg'\n }\n\n\ndef test_filer_orders(authenticated_client, response):\n \"\"\"Retrieve a list of orders\"\"\"\n response.get(f\"https://api.ewarehousing.com/wms/orders/?status=created\", \"order_list\")\n\n orders = authenticated_client.order.list(\n params={'status': 'created'}\n )\n\n assert isinstance(orders, list)\n\n\ndef test_create_order(authenticated_client, response):\n \"\"\"Create an order.\"\"\"\n response.post(f\"https://api.ewarehousing.com/wms/orders/\", \"order_single\")\n\n data = {\n \"note\": \"Testorder\",\n \"customer\": \"53b5a543-129a-403c-9a6e-3d9c525ffa5b\",\n \"order_lines\": [\n {\n \"quantity\": 7,\n \"description\": \"Voorbeeldproduct-B\",\n \"variant\": \"default_variant_b_id\"\n },\n {\n \"quantity\": 15,\n \"description\": \"Voorbeeldproduct-A\",\n \"variant\": \"default_variant_a_id\"\n }\n ],\n \"shipping_email\": \"<EMAIL>\",\n \"shipping_method\": None,\n \"shipping_address\": {\n \"city\": \"Heinenoord\",\n \"state\": \"ZH\",\n \"street\": \"Nijverheidsweg\",\n \"country\": \"NL\",\n \"street2\": None,\n \"zipcode\": \"3274 KJ\",\n \"fax_number\": None,\n \"addressed_to\": None,\n \"phone_number\": \"0186 612 267\",\n \"mobile_number\": \"\",\n \"street_number\": 27,\n \"street_number_addition\": \"\"\n },\n \"external_reference\": \"ORD001\",\n \"requested_delivery_date\": \"2018-11-19\"\n }\n order = authenticated_client.order.create(data)\n assert isinstance(order, dict)\n assert order['id'] == ORDER_ID\n\n\ndef test_update_order(authenticated_client, response):\n \"\"\"Update an order.\"\"\"\n response.patch(\"https://api.ewarehousing.com/wms/orders/94dbdb91-87ac-4634-b77d-e126a6206b15/\", \"order_single\")\n\n data = {\n \"note\": \"Testorder\",\n \"customer\": \"53b5a543-129a-403c-9a6e-3d9c525ffa5b\",\n \"order_lines\": [\n {\n \"quantity\": 7,\n \"description\": \"Voorbeeldproduct-B\",\n \"variant\": \"default_variant_b_id\"\n },\n {\n \"quantity\": 15,\n \"description\": \"Voorbeeldproduct-A\",\n \"variant\": \"default_variant_a_id\"\n }\n ],\n \"shipping_email\": \"<EMAIL>\",\n \"shipping_method\": None,\n \"shipping_address\": {\n \"city\": \"Heinenoord\",\n \"state\": \"ZH\",\n \"street\": \"Nijverheidsweg\",\n \"country\": \"NL\",\n \"street2\": None,\n \"zipcode\": \"3274 KJ\",\n \"fax_number\": None,\n \"addressed_to\": None,\n \"phone_number\": \"0186 612 267\",\n \"mobile_number\": \"\",\n \"street_number\": 27,\n \"street_number_addition\": \"\"\n },\n \"external_reference\": \"ORD001\",\n \"requested_delivery_date\": \"2018-11-19\"\n }\n order = authenticated_client.order.update(ORDER_ID, data)\n assert isinstance(order, dict)\n assert order['id'] == ORDER_ID\n", "id": "6539987", "language": "Python", "matching_score": 0.4602188169956207, "max_stars_count": 1, "path": "tests/test_orders.py" }, { "content": "def test_list_stock_levels(authenticated_client, response):\n \"\"\"Retrieve a list of stock levels\"\"\"\n response.get(f\"https://api.ewarehousing.com/wms/stock/\", \"stock_list\")\n\n stock = authenticated_client.stock.list()\n assert isinstance(stock, list)\n\n assert len(stock) == 4\n\n assert stock[0]['id'] == 'f2894c16-399e-4ca1-9151-489775a6519c'\n assert stock[0]['article_code'] == 'SHRT-R'\n assert stock[0]['ean'] == '8785073983111'\n assert stock[0]['stock_physical'] == 1\n assert stock[0]['stock_salable'] == 1\n assert stock[0]['stock_available'] == 0\n assert stock[0]['stock_quarantaine'] == 0\n assert stock[0]['stock_plannable'] == 0\n\n assert stock[1]['id'] == 'f2894c16-399e-4ca1-9151-489775a6519d'\n assert stock[1]['article_code'] == 'SNPBCK'\n assert stock[1]['stock_physical'] == 0\n", "id": "8025689", "language": "Python", "matching_score": 0.7896081805229187, "max_stars_count": 1, "path": "tests/test_stock.py" }, { "content": "import platform\nimport re\nimport time\nfrom collections import OrderedDict\n\nfrom requests import Request, Session\nfrom .resources import Order, Shipment, Stock\nfrom .exceptions import ServerError, BadRequest, AuthenticationError\n\n\nclass EwhsClient:\n UNAME = \" \".join(platform.uname())\n CLIENT_VERSION = \"0.1.0\"\n\n API_URL = \"https://api.ewarehousing.com\"\n\n def __init__(self, username, password, customer_code=None, wms_code=None, api_url=None):\n self.session = Session()\n\n self.username = username\n self.password = password\n self.customer_code = customer_code\n self.wms_code = wms_code\n self.access_token = None\n self.refresh_token = None\n self.expires_at = 0\n\n self._url = api_url if api_url else self.API_URL\n\n self.user_agent_components = OrderedDict()\n self.set_user_agent_component(\"Ewarehousing\", self.CLIENT_VERSION)\n self.set_user_agent_component(\"Python\", platform.python_version())\n\n # initialize resources\n self.shipment = Shipment(self)\n self.order = Order(self)\n self.stock = Stock(self)\n\n def set_user_agent_component(self, key, value, sanitize=True):\n \"\"\"Add or replace new user-agent component strings.\n\n Given strings are formatted along the format agreed upon by eWarehousing and implementers:\n - key and values are separated by a forward slash (\"/\").\n - multiple key/values are separated by a space.\n - keys are camel-cased, and cannot contain spaces.\n - values cannot contain spaces.\n\n Note: When you set sanitize=false you need to make sure the formatting is correct yourself.\n \"\"\"\n if sanitize:\n key = \"\".join(_x.capitalize() for _x in re.findall(r\"\\S+\", key))\n if re.search(r\"\\s+\", value):\n value = \"_\".join(re.findall(r\"\\S+\", value))\n self.user_agent_components[key] = value\n\n @property\n def user_agent(self):\n \"\"\"Return the formatted user agent string.\"\"\"\n components = [\"/\".join(x) for x in self.user_agent_components.items()]\n return \" \".join(components)\n\n def _send(self, method, resource, resource_id=None, data=None, params=None, expand=None, **kwargs):\n url = 'wms/{}'.format(resource)\n\n if resource_id is not None:\n url = '{}/{}'.format(url, resource_id)\n\n self._authenticate()\n\n headers = dict(self._get_headers(), **{\"Authorization\": \"Bearer {}\".format(self.access_token)})\n\n if expand and len(expand):\n headers['Expand'] = ','.join(expand)\n\n request = Request(\n method=method,\n url=self._format_url(url),\n json=data,\n params=params,\n headers=headers,\n )\n\n prepped = self.session.prepare_request(request=request)\n response = self.session.send(prepped)\n\n if response.status_code == 401:\n raise AuthenticationError(response.json())\n\n if response.status_code == 400:\n raise BadRequest()\n\n if response.status_code == 500:\n raise ServerError()\n\n if response.status_code == 204:\n return None\n\n return response.json()\n\n def _authenticate(self):\n if not self.access_token or int(time.time()) > self.expires_at:\n self.request_access_token()\n\n def request_access_token(self):\n if not self.refresh_token:\n return self.request_refresh_token()\n\n auth_url = \"wms/auth/refresh/\"\n\n self._send_auth(\n auth_url,\n {\"refresh_token\": self.refresh_token},\n )\n\n def request_refresh_token(self):\n auth_url = \"wms/auth/login/\"\n\n self._send_auth(\n auth_url,\n {\n \"username\": self.username,\n \"password\": <PASSWORD>\n },\n )\n\n def _send_auth(self, url, post_data):\n request = Request(\n method=\"POST\",\n url=self._format_url(url),\n json=post_data,\n headers=self._get_headers(),\n )\n\n prepped = self.session.prepare_request(request=request)\n response = self.session.send(prepped)\n data = response.json()\n\n if response.status_code != 200:\n raise AuthenticationError(data[\"message\"])\n\n self.refresh_token = data[\"refresh_token\"]\n self.access_token = data[\"token\"]\n self.expires_at = data[\"exp\"]\n\n def _get_default_headers(self):\n return {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"X-Ewhs-Client-Info\": self.UNAME,\n }\n\n def _get_headers(self):\n headers = self._get_default_headers()\n\n if self.customer_code:\n headers[\"X-Customer-Code\"] = self.customer_code\n\n if self.wms_code:\n headers[\"X-Wms-Code\"] = self.wms_code\n\n return headers\n\n def _format_url(self, url):\n url = \"{}/{}\".format(self._url, url)\n\n if not url.endswith(\"/\"):\n return url + \"/\"\n\n return url\n\n def filter(self, resource, params=None, expand=None, **kwargs):\n return self._send('GET', resource, params=params, expand=expand, **kwargs)\n\n def create(self, resource, data, expand=None, **kwargs):\n return self._send('POST', resource, data=data, expand=expand, **kwargs)\n\n def update(self, resource, resource_id, data, expand=None, **kwargs):\n return self._send('PATCH', resource, resource_id, data=data, expand=expand, **kwargs)\n\n def delete(self, resource, resource_id, expand=None, **kwargs):\n return self._send('DELETE', resource, resource_id, expand=expand, **kwargs)\n\n def get(self, resource, resource_id, expand=None, **kwargs):\n return self._send('GET', resource, resource_id, expand=expand, **kwargs)\n", "id": "5507066", "language": "Python", "matching_score": 2.776400327682495, "max_stars_count": 1, "path": "ewhs/client.py" }, { "content": "class EwhsError(Exception):\n pass\n\n\nclass BadRequest(EwhsError):\n pass\n\n\nclass AuthenticationError(EwhsError):\n pass\n\n\nclass ServerError(EwhsError):\n pass\n\n\nclass ApiLimitReached(EwhsError):\n pass\n\n\nclass DoesNotExist(EwhsError):\n pass\n", "id": "11574056", "language": "Python", "matching_score": 0.8769241571426392, "max_stars_count": 1, "path": "ewhs/exceptions.py" } ]
1.140122
shivamsupr
[ { "content": "# Snippets from Actual Settings.py\n\nTEMPLATES = [\n {\n 'BACKEND': 'django_jinja.backend.Jinja2',\n \"DIRS\": [\"PROJECT_ROOT_DIRECTORY\", \"...\"],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'match_extension': '.html',\n 'context_processors': [\n 'django.template.context_processors.request',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz'\n ],\n 'globals': {\n },\n 'extensions': DEFAULT_EXTENSIONS + [\n 'pipeline.templatetags.ext.PipelineExtension',\n ],\n },\n },\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True\n },\n]\n\n# Auto Register Template Globals\n_template_globals = {}\nfor object_name in dir(app_template_globals):\n _obj = getattr(app_template_globals, object_name)\n if callable(_obj) and not object_name.startswith('__'):\n _template_globals[object_name] = _obj.__module__ + '.' + _obj.__qualname__\nTEMPLATES[0]['OPTIONS']['globals'].update(_template_globals)\n", "id": "12506050", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "project-name/my_app/settings.py" }, { "content": "from datetime import datetime\nimport time\nimport pytz\n\ndef now():\n return datetime.utcnow().replace(tzinfo=pytz.utc)", "id": "4680188", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "project-name/src/apps/main/app_template_globals.py" } ]
0
Gabrielchapo
[ { "content": "import numpy as np\nimport json\n\nclass GetMnistData:\n \n def __init__(self, train_path, test_path):\n\n ## Load and prepare data from mnist_handwritten_train.json\n try:\n with open(train_path, 'r') as file:\n content = file.read()\n file.close()\n except:\n exit(\"Error: mnist_handwritten_train.json not found\")\n content = json.loads(content)\n try:\n X = []\n Y = []\n for x in content:\n X.append(x[\"image\"][:])\n Y.append(x[\"label\"])\n except:\n exit(\"Error: incorrect JSON format\")\n self.X_train = np.array(X, float)\n self.Y_train = np.zeros((self.X_train.shape[0], 10), float)\n # preparing one-hot label Y train\n i = 0\n for y in Y:\n self.Y_train[i][y] = 1\n i += 1\n\n ## Load and prepare data from mnist_handwritten_test.json\n try:\n with open(test_path, 'r') as file:\n content = file.read()\n file.close()\n except:\n exit(\"Error: mnist_handwritten_test.json not found\")\n content = json.loads(content)\n try:\n X = []\n Y = []\n for x in content:\n X.append(x[\"image\"][:])\n Y.append(x[\"label\"])\n except:\n exit(\"Error: incorrect JSON format\")\n self.X_test = np.array(X, float)\n self.Y_test = np.zeros((self.X_test.shape[0], 10), float)\n\n # preparing one-hot label Y test\n i = 0\n for y in Y:\n self.Y_test[i][y] = 1\n i += 1\n\n def get_X_train(self):\n print(\"X_train shape:\", self.X_train.shape)\n return self.X_train\n def get_Y_train(self):\n print(\"Y_train shape:\", self.Y_train.shape)\n return self.Y_train\n def get_X_test(self):\n print(\"X_test shape:\", self.X_test.shape)\n return self.X_test\n def get_Y_test(self):\n print(\"Y_test shape:\", self.Y_test.shape)\n return self.Y_test", "id": "304799", "language": "Python", "matching_score": 2.691316604614258, "max_stars_count": 6, "path": "Model/GetMnistData.py" }, { "content": "from Model.NeuralNetwork import NeuralNetwork\nfrom Model.GetMnistData import GetMnistData\nimport numpy as np\n\n\ndata = GetMnistData('data/mnist_handwritten_train.json', 'data/mnist_handwritten_test.json')\n\nX = data.get_X_train()\nY = data.get_Y_train()\n\nX_test = data.get_X_test()\nY_test = data.get_Y_test()\n\nmodel = NeuralNetwork()\n\n\nmodel.add_layer(30, input_dim=X.shape[1], activation='sigmoid')\nmodel.add_layer(30, activation='sigmoid')\nmodel.add_layer(30, activation='sigmoid')\nmodel.add_layer(10, activation='softmax')\n\n#model.load(\"weights.json\")\n\nmodel.summary()\n\nmodel.compile(0.3, \"cross_entropy\")\n\nmodel.fit(X, Y, epoch=3000, normalize=True)\n\nprediction = model.predict(X_test)\nprediction = np.argmax(prediction, axis=1)\nreal = np.argmax(Y_test, axis=1)\ncount = 0\n\nfor i in range(len(prediction)):\n #print(\"predicted:\",prediction[i], ', real:', real[i])\n if prediction[i] == real[i]:\n count += 1\n\nprint(\"Accuracy: \", count / len(prediction))\n\n\n#model.save(\"weights.json\")\n", "id": "6796033", "language": "Python", "matching_score": 1.890584945678711, "max_stars_count": 6, "path": "test_NeuralNetwork.py" }, { "content": "import numpy as np\n\nclass CrossValidation:\n\n def __init__(self, model, X, Y, nb_folds=5):\n folds_size = len(X) // nb_folds\n self.all_accuracies = []\n for index in range(nb_folds):\n X_train = [x for i,x in enumerate(X) if i <= index * folds_size or i > (index+1) * folds_size]\n Y_train = [y for i,y in enumerate(Y) if i <= index * folds_size or i > (index+1) * folds_size]\n X_test = [x for i,x in enumerate(X) if i > index * folds_size and i <= (index+1) * folds_size]\n Y_test = [y for i,y in enumerate(Y) if i > index * folds_size and i <= (index+1) * folds_size]\n model.fit(X_train,Y_train)\n self.all_accuracies.append(model.evaluate(X_test, Y_test))\n\n def accuracy(self):\n return sum(self.all_accuracies) / len(self.all_accuracies) ", "id": "544976", "language": "Python", "matching_score": 1.6212363243103027, "max_stars_count": 6, "path": "Model/CrossValidation.py" }, { "content": "from Model.LinearRegression import LinearRegression\nfrom Model.CrossValidation import CrossValidation\nimport numpy as np\n\n# EXTRACT DATA FROM FILE\n\"\"\"\nmatrix = np.loadtxt(\"text\", delimiter=',', skiprows=1).T\nX, Y = matrix[0], matrix[1]\n\"\"\"\nmatrix = np.loadtxt(\"data/Car.csv\", delimiter=';', skiprows=1)\nY = [x[-1] for x in matrix]\nX = [x[0:-1] for x in matrix]\n\n\nmodel = LinearRegression(normalize=True)\n\n\"\"\"model.fit(X,Y)\n\n\nprint(model.predict([[8.860e+01, 1.688e+02, 6.410e+01, 4.880e+01, 2.548e+03, 1.300e+02,\n 3.470e+00, 2.680e+00, 9.000e+00, 1.110e+02, 5.000e+03, 2.100e+01,\n 2.700e+01]]))\"\"\"\n\ntmp = CrossValidation(model, X, Y, 10)\nprint(tmp.accuracy())", "id": "7059423", "language": "Python", "matching_score": 2.7480506896972656, "max_stars_count": 6, "path": "test_LinearRegression.py" }, { "content": "from Model.LogisticRegression import LogisticRegression\nfrom Model.CrossValidation import CrossValidation\nimport pandas as pd\n\ndf = pd.read_csv(\"data/titanic.csv\")\nY = df.Survived.tolist()\nY = [[1,0] if x == 1 else [0,1] for x in Y]\n\ndf = df[[\"Pclass\", \"SibSp\", \"Parch\", \"Fare\", \"Sex\"]]\ndf = pd.get_dummies(df)\nX = df.values.tolist()\n\nmodel = LogisticRegression(normalize=True)\n\"\"\"\nX = [[1,1,1,0,0,0],\n [1,0,1,0,0,0],\n [1,1,1,0,0,0],\n [0,0,1,1,1,0],\n [0,0,1,1,0,0],\n [0,0,1,1,1,0]]\nY = [[1, 0],\n [1, 0],\n [1, 0],\n [0, 1],\n [0, 1],\n [0, 1]]\"\"\"\n\"\"\"\nmodel.fit(X,Y)\nprint(model.get_weights())\nprint(model.predict([3.0, 1.0, 0.0, 7.25]))\nprint(Y[0])\"\"\"\n\ntmp = CrossValidation(model, X, Y, 5)\n\nprint(tmp.accuracy())", "id": "3243418", "language": "Python", "matching_score": 0.5800917148590088, "max_stars_count": 6, "path": "test_LogisticRegression.py" }, { "content": "import C_module\nimport numpy as np\n\nclass LogisticRegression:\n\n def __init__(self, normalize=False):\n self.mean = None\n self.sigma = None\n if normalize == False:\n self.normalize = False\n else:\n self.normalize = True\n\n def fit(self, X, Y):\n \n if self.normalize == True:\n X = np.array([np.array(x) for x in X])\n self.sigma = [np.amax(x) - np.amin(x) for x in zip(*X)]\n self.mean = [sum(x) / len(X) for x in zip(*X)]\n X = (X - self.mean) / self.sigma\n X = [list(x) for x in X]\n\n self.weights = C_module.regression_fit(X, Y, 1)\n\n def predict(self, X):\n\n # X isn't an unique value\n if type(X) is list or isinstance(X,np.ndarray):\n\n # X has multiple parameters\n if len(self.weights) > 2:\n if type(X[0]) is list or isinstance(X[0],np.ndarray):\n X = np.array([np.array(x) for x in X])\n if self.normalize == True:\n X = (X - self.mean) / self.sigma\n X = [list(x) for x in X]\n else:\n X = np.array([np.array(x) for x in X])\n if self.normalize == True:\n X = (X - self.mean) / self.sigma\n X = [list(X)]\n\n # X has one parameter\n else:\n X = [[x] for x in X]\n if self.normalize == True:\n X = (X - self.mean) / self.sigma\n X = list(X)\n \n # X is an unique value\n else:\n if self.normalize == True:\n X = [[(X - self.mean) / self.sigma]]\n\n return C_module.regression_predict(X, self.weights, 1)\n \n def evaluate(self, X_test, Y_test):\n predictions = self.predict(X_test)\n predictions = [np.argmax(x) for x in predictions]\n Y_test = [np.argmax(x) for x in Y_test]\n count = 0\n for i in range(len(Y_test)):\n if Y_test[i] == predictions[i]:\n count += 1\n return count / len(Y_test)\n ", "id": "6208264", "language": "Python", "matching_score": 3.652667999267578, "max_stars_count": 6, "path": "Model/LogisticRegression.py" }, { "content": "import C_module\nimport numpy as np\n\nclass LinearRegression:\n\n def __init__(self, normalize=False):\n self.mean = None\n self.sigma = None\n if normalize == False:\n self.normalize = False\n else:\n self.normalize = True\n\n def fit(self, X, Y):\n\n # X isn't an unique value\n if type(X) is list or isinstance(X,np.ndarray):\n\n # X has multiple parameters\n if type(X[0]) is list or isinstance(X[0],np.ndarray):\n if self.normalize == True:\n X = np.array([np.array(x) for x in X])\n self.sigma = [np.amax(x) - np.amin(x) for x in zip(*X)]\n self.mean = [sum(x) / len(X) for x in zip(*X)]\n X = (X - self.mean) / self.sigma\n X = [list(x) for x in X]\n\n # X has one parameter\n else:\n if self.normalize == True:\n self.mean = sum(X) / len(X)\n self.sigma = np.amax(X) - np.amin(X)\n X = [[x] for x in X]\n X = (X - self.mean) / self.sigma\n X = X.tolist()\n \n\n # X is an unique value\n else:\n X = [X]\n Y = [[y] for y in Y]\n Y = list(Y)\n self.weights = C_module.regression_fit(X, Y, 0)\n \n def predict(self, X):\n\n # X isn't an unique value\n if type(X) is list or isinstance(X,np.ndarray):\n\n # X has multiple parameters\n if len(self.weights) > 2:\n if type(X[0]) is list or isinstance(X[0],np.ndarray):\n X = np.array([np.array(x) for x in X])\n if self.normalize == True:\n X = (X - self.mean) / self.sigma\n X = [list(x) for x in X]\n else:\n X = np.array([np.array(x) for x in X])\n if self.normalize == True:\n X = (X - self.mean) / self.sigma\n X = [list(X)]\n\n # X has one parameter\n else:\n X = [[x] for x in X]\n if self.normalize == True:\n X = (X - self.mean) / self.sigma\n X = list(X)\n \n # X is an unique value\n else:\n if self.normalize == True:\n X = [[(X - self.mean) / self.sigma]]\n\n self.weights = [[x] for x in self.weights]\n return C_module.regression_predict(X, self.weights, 0)\n \n def evaluate(self, X_test, Y_test):\n predictions = self.predict(X_test)\n predictions = [abs((a - b) / b) for a, b in zip(predictions, Y_test)]\n return sum(predictions) / len(predictions)", "id": "1033996", "language": "Python", "matching_score": 0.882337212562561, "max_stars_count": 6, "path": "Model/LinearRegression.py" }, { "content": "import numpy as np\nimport json\nimport C_module\n\nclass NeuralNetwork:\n\n def __init__(self):\n self.layers = []\n self.nb_layers = 0\n\n def add_layer(self, size, activation=None, input_dim=None):\n\n layer = {}\n np.random.seed(42)\n if self.nb_layers == 0:\n if input_dim == None:\n exit(\"Error: first layer need an input_dim.\")\n else:\n input_dim = self.layers[-1][\"weights\"].shape[1]\n\n layer[\"name\"] = \"layer_\"+str(self.nb_layers)\n layer['activation'] = activation\n layer['weights'] = np.random.randn(input_dim, size)\n layer['bias'] = np.zeros((1, size))\n self.layers.append(layer)\n self.nb_layers += 1\n\n def summary(self):\n for attribut in self.layers:\n print(\"Layer:\", attribut[\"name\"], \"| Dimensions:\", attribut[\"weights\"].shape, \"| Activation:\", attribut[\"activation\"])\n\n def compile(self, lr, loss):\n self.lr = lr\n self.loss = loss\n\n def fit(self, X, Y, epoch, normalize=False):\n self.sigma = [np.amax(x) - np.amin(x) if np.amax(x) - np.amin(x) != 0 else 1 for x in zip(*X)]\n self.mean = [sum(x) / len(X) for x in zip(*X)]\n X = (X - self.mean) / self.sigma\n X = [list(x) for x in X]\n Y = [list(y) for y in Y]\n b = []\n W = []\n for layer in self.layers:\n W.append(layer[\"weights\"].tolist())\n b.append(layer['bias'].tolist())\n tmp = C_module.neural_network_fit(X,Y,W,b,epoch)\n W = tmp[:self.nb_layers]\n b = tmp[self.nb_layers:]\n for i,x in enumerate(W):\n self.layers[i][\"weights\"] = x\n for i,x in enumerate(b):\n self.layers[i][\"bias\"] = x\n \n \n def predict(self, X):\n self.sigma = [np.amax(x) - np.amin(x) if np.amax(x) - np.amin(x) != 0 else 1 for x in zip(*X)]\n self.mean = [sum(x) / len(X) for x in zip(*X)]\n X = (X - self.mean) / self.sigma\n X = [list(x) for x in X]\n b = []\n W = []\n for layer in self.layers:\n W.append(layer[\"weights\"])\n b.append(layer['bias'])\n return C_module.neural_network_predict(X,W,b)\n\n\n def load(self, path):\n try:\n with open(path, 'r') as file:\n content = file.read()\n file.close()\n except:\n print(\"Error: json file not found\")\n content = json.loads(content)\n for attribut in content:\n layer = {}\n layer[\"name\"] = attribut[\"name\"]\n layer[\"activation\"] = attribut[\"activation\"]\n layer[\"weights\"] = np.array(attribut[\"weights\"])\n self.layers.append(layer)\n self.nb_layers += 1\n\n def save(self, path):\n for index,layer in enumerate(self.layers):\n self.layers[index][\"weights\"] = layer[\"weights\"].tolist()\n content = json.dumps(self.layers)\n with open(path, \"w\") as file:\n file.write(content)\n file.close()\n", "id": "12178691", "language": "Python", "matching_score": 2.037299394607544, "max_stars_count": 6, "path": "Model/NeuralNetwork.py" }, { "content": "from distutils.core import setup, Extension\n\ndef main():\n setup(name=\"C_module\",\n description=\"Python interface for the C library function\",\n author=\"GabrielChapo\",\n ext_modules=[Extension(\"C_module\", [\n \"C_module/sources/binding.c\",\n \"C_module/sources/python_utils.c\",\n \"C_module/sources/error_functions.c\",\n \"C_module/sources/activation_functions.c\",\n \"C_module/sources/2D_matrix.c\",\n \"C_module/sources/neural_network.c\",\n \"C_module/sources/regression.c\"\n ])])\n\nif __name__ == \"__main__\":\n main()", "id": "10311059", "language": "Python", "matching_score": 0.5725857615470886, "max_stars_count": 6, "path": "setup.py" } ]
1.890585
Chaoses-Ib
[ { "content": "from io import StringIO\nfrom pathlib import Path\n\nfrom quom import Quom\n\nFILE_MAIN_HPP = \"\"\"\\\n#pragma once\n\n#ifndef FOOBAR_HPP\n#define FOOBAR_HPP\n\n#include \"foo.hpp\"\n\n#endif // FOOBAR_HPP\n\"\"\"\n\nFILE_FOO_HPP = \"\"\"\\\n#pragma once\n\n# /* */ ifndef /*123*/ FOOBAR_FOO_HPP\n#define FOOBAR_FOO_HPP // abc\n\n#include <iostream>\n\nextern int foo;\n\n#endif // FOOBAR_FOO_HPP\n\"\"\"\n\nFILE_FOO_CPP = \"\"\"\\\n#include \"foo.hpp\"\n\n#include <algorithm>\n\nint foo = 42;\"\"\"\n\nRESULT_NORMAL = \"\"\"\\\n#pragma once\n\n#ifndef FOOBAR_HPP\n#define FOOBAR_HPP\n\n# /* */ ifndef /*123*/ FOOBAR_FOO_HPP\n#define FOOBAR_FOO_HPP // abc\n\n#include <iostream>\n\nextern int foo;\n\n#endif // FOOBAR_FOO_HPP\n\n#endif // FOOBAR_HPP\n\n#include <algorithm>\n\nint foo = 42;\"\"\"\n\nRESULT_NORMAL_WITHOUT_TRIM = \"\"\"\\\n#pragma once\n\n#ifndef FOOBAR_HPP\n#define FOOBAR_HPP\n\n\n\n# /* */ ifndef /*123*/ FOOBAR_FOO_HPP\n#define FOOBAR_FOO_HPP // abc\n\n#include <iostream>\n\nextern int foo;\n\n#endif // FOOBAR_FOO_HPP\n\n\n#endif // FOOBAR_HPP\n\n\n#include <algorithm>\n\nint foo = 42;\"\"\"\n\n\ndef init():\n with open('main.hpp', 'w+') as file:\n file.write(FILE_MAIN_HPP)\n\n with open('foo.hpp', 'w+') as file:\n file.write(FILE_FOO_HPP)\n\n with open('foo.cpp', 'w+') as file:\n file.write(FILE_FOO_CPP)\n\n\ndef test_normal(fs):\n init()\n\n dst = StringIO()\n Quom(Path('main.hpp'), dst)\n\n assert dst.getvalue() == RESULT_NORMAL\n\n\ndef test_normal_without_trim(fs):\n init()\n\n dst = StringIO()\n Quom(Path('main.hpp'), dst, trim=False)\n\n assert dst.getvalue() == RESULT_NORMAL_WITHOUT_TRIM\n\n\ndef test_without_newline_at_end(fs):\n with open('main.hpp', 'w+') as file:\n file.write('int a;')\n\n dst = StringIO()\n Quom(Path('main.hpp'), dst)\n\n assert dst.getvalue() == 'int a;'\n", "id": "11155519", "language": "Python", "matching_score": 3.3631064891815186, "max_stars_count": 1, "path": "tests/test_quom/test_last_source_file.py" }, { "content": "import os\nfrom io import StringIO\nfrom pathlib import Path\n\nfrom quom import Quom\nfrom quom.__main__ import main\n\nFILE_MAIN_HPP = \"\"\"\nint foo = 3;\n\nint foo();\n\"\"\"\n\nFILE_MAIN_CPP = \"\"\"\nint foo() { return 42; }\n\"\"\"\n\nRESULT = \"\"\"\nint foo = 3;\n\nint foo();\n\nint foo() { return 42; }\n\"\"\"\n\n\ndef test_source_directory(fs):\n os.makedirs('project/')\n os.chdir('project/')\n os.makedirs('include/')\n os.makedirs('src/')\n\n with open('include/main.hpp', 'w+') as file:\n file.write(FILE_MAIN_HPP)\n\n with open('src/main.cpp', 'w+') as file:\n file.write(FILE_MAIN_CPP)\n\n dst = StringIO()\n Quom(Path('include/main.hpp'), dst)\n assert dst.getvalue() != RESULT\n\n dst = StringIO()\n Quom(Path('include/main.hpp'), dst, relative_source_directories=[Path('../src')])\n assert dst.getvalue() == RESULT\n\n dst = StringIO()\n Quom(Path('include/main.hpp'), dst, source_directories=[Path('src').resolve()])\n assert dst.getvalue() == RESULT\n\n dst = StringIO()\n Quom(Path('include/main.hpp'), dst, source_directories=[Path('/project/src')])\n assert dst.getvalue() == RESULT\n\n main(['include/main.hpp', 'result.hpp', '-S', './../src'])\n assert Path('result.hpp').read_text() == RESULT\n\n main(['include/main.hpp', 'result.hpp', '-S', 'src'])\n assert Path('result.hpp').read_text() == RESULT\n\n main(['include/main.hpp', 'result.hpp', '-S', '/project/src'])\n assert Path('result.hpp').read_text() == RESULT\n", "id": "3422685", "language": "Python", "matching_score": 2.3107869625091553, "max_stars_count": 1, "path": "tests/test_quom/test_source_directory.py" }, { "content": "import re\nfrom pathlib import Path\nfrom queue import Queue\nfrom typing import TextIO, Union, List\n\nfrom .quom_error import QuomError\nfrom .tokenizer import tokenize, Token, CommentToken, PreprocessorToken, PreprocessorIfNotDefinedToken, \\\n PreprocessorDefineToken, PreprocessorEndIfToken, PreprocessorIncludeToken, PreprocessorPragmaOnceToken, \\\n RemainingToken, LinebreakWhitespaceToken, EmptyToken, StartToken, EndToken, WhitespaceToken\n\nCONTINUOUS_LINE_BREAK_START = 0\nCONTINUOUS_BREAK_REACHED = 3\n\n\ndef find_token(tokens: List[Token], token_type: any):\n for i, token in enumerate(tokens):\n if isinstance(token, token_type):\n return i, token\n return None, None\n\n\ndef contains_only_whitespace_and_comment_tokens(tokens: List[Token]):\n for token in tokens:\n if not isinstance(token, (WhitespaceToken, CommentToken, EndToken)):\n return False\n return True\n\n\nclass Quom:\n def __init__(self, src_file_path: Union[Path, str], dst: TextIO, stitch_format: str = None,\n include_guard_format: str = None, trim: bool = True,\n include_directories: List[Union[Path, str]] = None,\n relative_source_directories: List[Union[Path]] = None,\n source_directories: List[Union[Path]] = None):\n self.__dst = dst\n self.__stitch_format = stitch_format\n self.__include_guard_format = re.compile('^{}$'.format(include_guard_format)) if include_guard_format else None\n self.__trim = trim\n self.__include_directories = [Path(x) for x in include_directories] if include_directories else []\n self.__relative_source_directories = relative_source_directories if relative_source_directories else [] \\\n if source_directories else [Path('.')]\n self.__source_directories = source_directories if source_directories else [Path('.')]\n\n self.__processed_files = set()\n self.__source_files = Queue()\n self.__cont_lb = CONTINUOUS_LINE_BREAK_START\n self.__prev_token = EmptyToken()\n\n self.__process_file(Path(), src_file_path, False, True)\n\n if not self.__source_files.empty():\n if stitch_format is not None:\n raise QuomError('Couldn\\'t stitch source files. The stitch location \"{}\" was not found.'\n .format(stitch_format))\n while not self.__source_files.empty():\n self.__process_file(Path(), self.__source_files.get(), True)\n # Write last token.\n self.__write_token(self.__prev_token, True)\n elif self.__cont_lb == CONTINUOUS_LINE_BREAK_START or not isinstance(self.__prev_token,\n LinebreakWhitespaceToken):\n # Write last token, if not a continuous line break.\n self.__write_token(self.__prev_token, True)\n\n def __process_file(self, relative_path: Path, include_path: Path, is_source_file: bool,\n is_main_header=False):\n # First check if file exists relative.\n file_path = relative_path / include_path\n if file_path.exists():\n with file_path.open() as file:\n tokens = tokenize(file.read())\n else:\n # Otherwise search in include directories.\n for include_directory in self.__include_directories:\n file_path = include_directory / include_path\n if file_path.exists():\n with file_path.open() as file:\n tokens = tokenize(file.read())\n break\n else:\n raise QuomError('Include not found: \"{}\"'.format(include_path))\n\n # Skip already processed files.\n if file_path in self.__processed_files:\n return\n self.__processed_files.add(file_path)\n\n for token in tokens:\n # Find local includes.\n token = self.__scan_for_include(file_path, token, is_source_file)\n if not token or self.__scan_for_source_files_stitch(token):\n continue\n\n self.__write_token(token, is_main_header)\n\n file_path = self.__find_possible_source_file(file_path)\n if file_path:\n self.__source_files.put(file_path)\n\n def __write_token(self, token: Token, is_main_header: bool):\n if isinstance(token, StartToken) or isinstance(token, EndToken):\n return\n\n if (not is_main_header and self.__is_pragma_once(token)) or self.__is_include_guard(token):\n token = token.preprocessor_tokens[-2]\n if not isinstance(token, LinebreakWhitespaceToken):\n return\n\n if self.__is_cont_line_break(token):\n return\n\n # Write previous token, store current.\n if self.__prev_token:\n self.__dst.write(str(self.__prev_token.raw))\n self.__prev_token = token\n\n @staticmethod\n def __is_pragma_once(token: Token):\n if isinstance(token, PreprocessorPragmaOnceToken):\n return True\n return False\n\n def __is_include_guard(self, token: Token):\n if self.__include_guard_format is None:\n return False\n\n if isinstance(token, (PreprocessorIfNotDefinedToken, PreprocessorDefineToken)):\n # Find first remaining token matching the include guard format.\n i, remaining_token = find_token(token.preprocessor_arguments, RemainingToken)\n if remaining_token and self.__include_guard_format.match(str(remaining_token).strip()) and \\\n contains_only_whitespace_and_comment_tokens(token.preprocessor_arguments[i + 1:]):\n return True\n elif isinstance(token, PreprocessorEndIfToken):\n # Find first comment token matching the include guard format.\n i, comment_token = find_token(token.preprocessor_arguments, CommentToken)\n if comment_token and self.__include_guard_format.match(str(comment_token.content).strip()) and \\\n contains_only_whitespace_and_comment_tokens(token.preprocessor_arguments[i + 1:]):\n return True\n\n def __find_possible_source_file(self, header_file_path: Path) -> Union[Path, None]:\n if header_file_path.suffix in ['.c', '.cpp', '.cxx', '.cc', '.c++', '.cp', '.C']:\n return\n\n # Checks if a equivalent compilation unit exits.\n for extension in ['.c', '.cpp', '.cxx', '.cc', '.c++', '.cp', '.C']:\n for src_dir in self.__relative_source_directories:\n file_path = (header_file_path.parent / src_dir / header_file_path.name).with_suffix(extension)\n if file_path.exists():\n return file_path\n for src_dir in self.__source_directories:\n file_path = (src_dir / header_file_path.name).with_suffix(extension).resolve()\n if file_path.exists():\n return file_path\n return None\n\n def __scan_for_include(self, file_path: Path, token: Token, is_source_file: bool) -> Union[Token, None]:\n if not isinstance(token, PreprocessorIncludeToken) or not token.is_local_include:\n return token\n\n self.__process_file(file_path.parent, Path(str(token.path)), is_source_file)\n # Take include tokens line break token if any.\n token = token.preprocessor_tokens[-2]\n if isinstance(token, LinebreakWhitespaceToken):\n return token\n\n return None\n\n def __scan_for_source_files_stitch(self, token: Token) -> bool:\n if not isinstance(token, CommentToken) or str(token.content).strip() != self.__stitch_format:\n return False\n\n while not self.__source_files.empty():\n self.__process_file(Path(), self.__source_files.get(), True)\n\n return True\n\n def __is_cont_line_break(self, token: Token) -> bool:\n if not self.__trim:\n return False\n\n if isinstance(token, LinebreakWhitespaceToken):\n self.__cont_lb += 1\n elif isinstance(token, PreprocessorToken) and isinstance(token.preprocessor_tokens[-2],\n LinebreakWhitespaceToken):\n self.__cont_lb = CONTINUOUS_LINE_BREAK_START + 1\n else:\n self.__cont_lb = CONTINUOUS_LINE_BREAK_START\n\n return self.__cont_lb >= CONTINUOUS_BREAK_REACHED\n", "id": "7786992", "language": "Python", "matching_score": 3.873286485671997, "max_stars_count": 1, "path": "src/quom/quom.py" }, { "content": "import argparse\nimport sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom quom import Quom\n\ntry:\n from quom import __version__\nexcept ImportError:\n __version__ = 'unknown'\n\n\ndef main(args: List[str]):\n parser = argparse.ArgumentParser(prog='quom', description='Single header generator for C/C++ libraries.')\n parser.add_argument('--version', action='version', version='quom {ver}'.format(ver=__version__))\n parser.add_argument('input_path', metavar='input', type=Path, help='Input file path of the main file.')\n parser.add_argument('output_path', metavar='output', type=Path,\n help='Output file path of the generated single header file.')\n parser.add_argument('--stitch', '-s', metavar='format', type=str, default=None,\n help='Format of the comment where the source files should be placed (e.g. // ~> stitch <~). \\\n Default: %(default)s (at the end of the main file)')\n parser.add_argument('--include_guard', '-g', metavar='format', type=str, default=None,\n help='Regex format of the include guard. Default: %(default)s')\n parser.add_argument('--trim', '-t', action='store_true', default=True,\n help='Reduce continuous line breaks to one. Default: %(default)s')\n parser.add_argument('--include_directory', '-I', type=Path, action='append', default=[],\n help='Add include directories for header files.')\n parser.add_argument('--source_directory', '-S', type=str, action='append', default=['.'],\n help='Set the source directories for source files. '\n 'Use ./ or .\\\\ in front of a path to mark as relative to the header file.')\n\n args = parser.parse_args(args)\n\n # Transform source directories to distingue between:\n # - relative from header file (starting with dot)\n # - relative from workdir\n # - absolute path\n relative_source_directories = []\n source_directories = []\n for src in args.source_directory:\n path = Path(src)\n if src == '.' or src.startswith('./') or src.startswith('.\\\\'):\n relative_source_directories.append(path)\n else:\n source_directories.append(path.resolve())\n\n with args.output_path.open('w+') as file:\n Quom(args.input_path, file, args.stitch, args.include_guard, args.trim, args.include_directory,\n relative_source_directories, source_directories)\n\n\ndef run():\n main(sys.argv[1:])\n\n\nif __name__ == '__main__':\n run()\n", "id": "10340329", "language": "Python", "matching_score": 0.11180336773395538, "max_stars_count": 1, "path": "src/quom/__main__.py" }, { "content": "import re\nimport string\n\ndef generate(charset):\n dic = { letter: [] for letter in string.ascii_lowercase } # { 首字母: [汉字] }\n\n with open('pinyin.txt', encoding='utf8') as f:\n ranges = { letter: [0, 0] for letter in string.ascii_lowercase } # 连续汉字合并成范围\n total_ranges = { letter: [0, 0] for letter in string.ascii_lowercase }\n\n def append_range(letter, rng):\n length = rng[1] - rng[0]\n\n # 加入 dic\n lst = dic[letter]\n lst.append(chr(rng[0]))\n if length == 0: # a\n pass\n elif length == 1: # ab\n lst.append(chr(rng[1]))\n elif length == 2: # abc,应该略快于 a-c\n lst.append(chr(rng[0] + 1))\n lst.append(chr(rng[1]))\n else: # a-d\n lst.append('-')\n lst.append(chr(rng[1]))\n \n # 更新 total_ranges\n total_ranges[letter][1] = rng[1]\n\n\n for line in f.readlines()[2:]:\n try:\n hanzi = line[-2]\n\n # 只保留指定字符集汉字\n hanzi.encode(charset)\n\n # 获取拼音\n begin = line.find(': ') + 2\n pinyin_seq = line[begin:-6]\n # (?:: |,)[^a-zāáǎàēéěèê̄ếê̌ềōóǒòḿńňǹ]\n pinyin_seq = re.sub('[āáǎà]', 'a', pinyin_seq)\n pinyin_seq = re.sub('[ēéěèê̄ếê̌ề]', 'e', pinyin_seq)\n pinyin_seq = re.sub('[ōóǒò]', 'o', pinyin_seq)\n pinyin_seq = re.sub('[ḿ]', 'm', pinyin_seq)\n pinyin_seq = re.sub('[ńňǹ]', 'n', pinyin_seq)\n pinyins = pinyin_seq.split(',')\n\n # 处理拼音\n for pinyin in pinyins:\n letter = pinyin[0]\n rng = ranges[letter]\n if rng[1] == 0: # 初始化\n rng[:] = [ord(hanzi), ord(hanzi)]\n total_ranges[letter][0] = ord(hanzi)\n elif ord(hanzi) == rng[1]: # 忽略同一汉字\n pass\n elif ord(hanzi) == rng[1] + 1: # 并入范围\n rng[1] += 1\n else: # 开始新 range\n append_range(letter, rng)\n rng[:] = [ord(hanzi), ord(hanzi)]\n \n except UnicodeEncodeError:\n pass\n \n # 处理剩余 range\n for letter, rng in ranges.items():\n append_range(letter, rng)\n\n with open(f'output_pinyin_initial_regex_{charset}.txt', 'w', encoding='utf8') as f:\n for key, value in sorted(dic.items()):\n if len(value) == 1: # i u v\n f.write(f'{key}\\n')\n else:\n # f.write('({0}|(?=[{1}-{2}])[{3}])\\n'.format(key, chr(total_ranges[key][0]), chr(total_ranges[key][1]), ''.join(value)))\n f.write(f'[{key}{ \"\".join(value) }]\\n')\n\ngenerate('gb2312')\ngenerate('gbk')\ngenerate('utf8')", "id": "4600111", "language": "Python", "matching_score": 2.481654167175293, "max_stars_count": 0, "path": "generate/pinyin_initial_regex.py" }, { "content": "import re\n\ntables = {\n range(0x3400, 0x9FED+1): [], # .{1017}\\0\n range(0x20000, 0x2D016+1): [],\n range(0x3007, 0x3007+1): [],\n range(0xE815, 0xE864+1): [], # .{18472}\\0\n range(0xFA18, 0xFA18+1): [], # .{4532}\\0\n range(0x2F835, 0x2F835+1): [], # .{10271}\\0\n range(0x30EDD, 0x30EDE+1): [] # .{5800}\\0\n}\nfor rng, lst in tables.items():\n lst[:] = [0] * (rng.stop - rng.start)\n\nwith open('pinyin.txt', encoding='utf8') as f:\n for line in f.readlines()[2:]:\n # 获取拼音\n begin = line.find(': ') + 2\n pinyin_seq = line[begin:-6]\n # (?:: |,)[^a-zāáǎàēéěèê̄ếê̌ềōóǒòḿńňǹ]\n pinyin_seq = re.sub('[āáǎà]', 'a', pinyin_seq)\n pinyin_seq = re.sub('[ēéěèê̄ếê̌ề]', 'e', pinyin_seq)\n pinyin_seq = re.sub('[ōóǒò]', 'o', pinyin_seq)\n pinyin_seq = re.sub('[ḿ]', 'm', pinyin_seq)\n pinyin_seq = re.sub('[ńňǹ]', 'n', pinyin_seq)\n pinyins = pinyin_seq.split(',')\n\n # 转换成 flags\n pinyin_flags = 0\n for pinyin in pinyins:\n pinyin_flags |= 2 ** (ord(pinyin[0]) - ord('a'))\n\n # 保存到 tables\n hanzi = ord(line[-2])\n for rng, lst in tables.items():\n if hanzi in rng:\n lst[hanzi - rng.start] = pinyin_flags\n break\n else:\n raise\n\nwith open(f'output_pinyin_initial_table.txt', 'w', encoding='utf8') as f:\n for rng, lst in tables.items():\n f.write('uint32_t table_{:X}_{:X}[]{{ {} }};\\n'.format(\n rng.start,\n rng.stop - 1,\n ','.join(str(flags) for flags in lst) # 16^n ≥ 10^(n+2) -> n ≥ 10,十位以下用 0x十六进制 的编码效率低于十进制\n ))\n \n f.write(f'''\nPinyinRange pinyin_ranges[{ len(tables) }]{{\n''')\n\n for rng, lst in tables.items():\n f.write('{{ 0x{0:X}, 0x{1:X}, table_{0:X}_{1:X} }},\\n'.format(\n rng.start,\n rng.stop - 1\n ))\n \n f.write('};')", "id": "12033757", "language": "Python", "matching_score": 1.3930506706237793, "max_stars_count": 0, "path": "generate/pinyin_initial_table.py" }, { "content": "# -*- coding: utf-8 -*-\nimport collections\nimport re\n\ndef code_to_hanzi(code):\n hanzi = chr(int(code.replace('U+', '0x'), 16))\n return hanzi\n\n\ndef sort_pinyin_dict(pinyin_dict):\n dic = collections.OrderedDict(\n sorted(pinyin_dict.items(),\n key=lambda item: int(item[0].replace('U+', '0x'), 16))\n )\n for item in dic.items(): # pinyin_combinations 要求\n item[1][:] = sorted(item[1])\n return dic\n\n\ndef remove_dup_items(lst):\n new_lst = []\n for item in lst:\n if item not in new_lst:\n new_lst.append(item)\n return new_lst\n\n\ndef parse_pinyins(fp):\n pinyin_map = {}\n for line in fp:\n line = line.strip()\n if line.startswith('#') or not line:\n continue\n code, pinyin = line.split('#')[0].split(':')\n pinyin = ','.join([x.strip() for x in pinyin.split() if x.strip()])\n pinyin_map[code.strip()] = pinyin.split(',')\n return pinyin_map\n\n\ndef merge(raw_pinyin_map, adjust_pinyin_map, overwrite_pinyin_map):\n new_pinyin_map = {}\n for code, pinyins in raw_pinyin_map.items():\n if code in overwrite_pinyin_map:\n pinyins = overwrite_pinyin_map[code]\n elif code in adjust_pinyin_map:\n pinyins = adjust_pinyin_map[code] + pinyins\n new_pinyin_map[code] = remove_dup_items(pinyins)\n\n return new_pinyin_map\n\n\ndef save_data(pinyin_map, writer):\n for code, pinyins in pinyin_map.items():\n hanzi = code_to_hanzi(code)\n line = '{code}: {pinyin} # {hanzi}\\n'.format(\n code=code, pinyin=','.join(pinyins), hanzi=hanzi\n )\n writer.write(line)\n\ndef pinyin_to_ascii(pinyin):\n py = re.sub('[āáǎà]', 'a', pinyin)\n py = re.sub('[ēéěèếề]|ê̄|ê̌', 'e', py) # ê̄=ê+̄ , ê̌=ê+̌ \n py = re.sub('[īíǐì]', 'i', py)\n py = re.sub('[ōóǒò]', 'o', py)\n py = re.sub('[ūúǔù]', 'u', py)\n py = re.sub('[üǘǚǜ]', 'v', py)\n py = re.sub('[ńňǹ]', 'n', py)\n py = re.sub('[ḿ]|m̀', 'm', py)\n return py\n\ndef pinyin_to_ascii_num(pinyin):\n py = pinyin_to_ascii(pinyin)\n if re.search('[āēīōū]|ê̄', pinyin): # ü\n return py + '1'\n if re.search('[áéếíóúǘḿń]', pinyin):\n return py + '2'\n if re.search('[ǎěǐǒǔǚň]|ê̌', pinyin):\n return py + '3'\n if re.search('[àèềìòùǜǹ]|m̀', pinyin):\n return py + '4'\n return py + '5' # 0不好输入\n\ndef pinyin_convert(pinyin: str, pinyin_map: dict, initial_map: dict, final_map: dict):\n # https://en.wikipedia.org/wiki/Pinyin\n '''\n initials = {\n 'b', 'p', 'm', 'f',\n 'd', 't', 'n', 'z', 'c', 's', 'l',\n 'zh', 'ch', 'sh', 'r',\n 'j', 'q', 'x',\n 'g', 'k', 'h',\n 'y', 'w'\n }\n finals = {\n 'i', 'u', 'v',\n 'e', 'ie', 'o', 'uo', 'ue', 've',\n 'a', 'ia', 'ua',\n 'ei', 'ui',\n 'ai', 'uai',\n 'ou', 'iu',\n 'ao', 'iao',\n 'in', 'un', 'vn',\n 'en',\n 'an', 'ian', 'uan', 'van',\n 'ing',\n 'ong', 'iong',\n 'eng',\n 'ang', 'iang', 'uang',\n 'er'\n }\n '''\n # https://zh.wikipedia.org/wiki/汉语拼音\n '''\n finals = {\n 'a', 'o', 'e', 'er',\n 'i', 'ia', 'ie',\n 'u', 'ua', 'uo',\n 'v', 've', 'ue',\n\n 'ai', 'ei', 'ao', 'ou',\n 'iao', 'iou', 'iu'\n 'uai', 'uei', 'ui'\n\n 'an', 'en', 'ang', 'eng',\n 'ian', 'in', 'iang', 'ing',\n 'uan', 'uen', 'uang', 'ueng', 'ong',\n 'van', 'vn', 'un', 'iong'\n\n # 'uei', 'uen', 'ueng'\n }\n '''\n ascii = pinyin_to_ascii(pinyin)\n if ascii == 'hm': # 噷\n ascii = 'hen'\n elif ascii == 'hng': # 哼\n ascii = 'heng'\n elif ascii == 'm': # 呒呣嘸\n ascii = 'mu'\n elif ascii == 'n' or ascii == 'ng': # 唔嗯 㕶 𠮾\n ascii = 'en'\n \n if py := pinyin_map.get(ascii):\n return py\n \n result = ''\n for initial in sorted(initial_map, key=lambda x: -len(x)):\n if ascii.startswith(initial):\n ascii = ascii[len(initial):]\n result = initial_map[initial]\n break\n \n if final := final_map.get(ascii):\n result += final\n else:\n raise ValueError\n\n return result\n\n# 小鹤双拼\ndef pinyin_to_double_pinyin_xiaohe(pinyin):\n pinyin_map = {\n 'e': 'ee', 'o': 'oo',\n 'a': 'aa',\n 'ei': 'ei',\n 'ai': 'ai',\n 'ou': 'ou',\n 'ao': 'ao',\n 'en': 'en',\n 'an': 'an',\n 'eng': 'eg',\n 'ang': 'ah'\n }\n initial_map = {\n 'b': 'b', 'p': 'p', 'm': 'm', 'f': 'f',\n 'd': 'd', 't': 't', 'n': 'n', 'z': 'z', 'c': 'c', 's': 's', 'l': 'l',\n 'zh': 'v', 'ch': 'i', 'sh': 'u', 'r': 'r',\n 'j': 'j', 'q': 'q', 'x': 'x',\n 'g': 'g', 'k': 'k', 'h': 'h',\n 'y': 'y', 'w': 'w'\n }\n final_map = {\n 'i': 'i', 'u': 'u', 'v': 'v',\n 'e': 'e', 'ie': 'p', 'o': 'o', 'uo': 'o', 'ue': 't', 've': 't',\n 'a': 'a', 'ia': 'x', 'ua': 'x',\n 'ei': 'w', 'ui': 'v',\n 'ai': 'd', 'uai': 'k',\n 'ou': 'z', 'iu': 'q',\n 'ao': 'c', 'iao': 'n',\n 'in': 'b', 'un': 'y', 'vn': 'y',\n 'en': 'f',\n 'an': 'j', 'ian': 'm', 'uan': 'r', 'van': 'r',\n 'ing': 'k',\n 'ong': 's', 'iong': 's',\n 'eng': 'g',\n 'ang': 'h', 'iang': 'l', 'uang': 'l',\n 'er': 'er'\n }\n return pinyin_convert(pinyin, pinyin_map, initial_map, final_map)\n\ndef save_data2(pinyin_map):\n all_pinyins = set()\n pinyin_combinations = set()\n for pinyins in pinyin_map.values():\n for pinyin in pinyins:\n all_pinyins.add(pinyin)\n pinyin_combinations.add(' '.join(pinyins))\n all_pinyins = sorted(all_pinyins, key=lambda x: (pinyin_to_ascii_num(x), x))\n pinyin_combinations = sorted(pinyin_combinations, key=lambda x: (x.count(' '), x))\n\n pinyin_multi_combination_map = {}\n for pinyins in pinyin_map.values():\n if len(pinyins) > 1:\n pinyin_multi_combination_map[' '.join(pinyins)] = sorted([ all_pinyins.index(pinyin) for pinyin in pinyins ])\n pinyin_multi_combinations = sorted(pinyin_multi_combination_map.values())\n for key, val in pinyin_multi_combination_map.items():\n pinyin_multi_combination_map[key] = pinyin_multi_combinations.index(val)\n\n # pinyin_compact.txt\n tables = {\n # 粗略匹配有拼音的汉字:\n # [〇-礼][𠀀-𰻞]\n # [〇㐀-鿭-礼][𠀀-𭀖灰𰻝𰻞]\n range(0x3400, 0x9FED+1): [], # .{1017}\\0\n range(0x20000, 0x2D016+1): [],\n range(0x3007, 0x3007+1): [],\n range(0xE815, 0xE864+1): [], # .{18472}\\0\n range(0xFA18, 0xFA18+1): [], # .{4532}\\0\n range(0x2F835, 0x2F835+1): [], # .{10271}\\0\n range(0x30EDD, 0x30EDE+1): [] # .{5800}\\0\n }\n for rng, lst in tables.items():\n lst[:] = [0xFFFF] * (rng.stop - rng.start)\n for code, pinyins in pinyin_map.items():\n hanzi = int(code.replace('U+', '0x'), 16)\n for rng, lst in tables.items():\n if hanzi in rng:\n if len(pinyins) == 1:\n lst[hanzi - rng.start] = all_pinyins.index(pinyins[0])\n else:\n lst[hanzi - rng.start] = len(all_pinyins) + pinyin_multi_combination_map[' '.join(pinyins)]\n\n with open('pinyin_compact.txt', 'w', encoding='utf8') as f:\n f.write(f'''pinyins:\n{ chr(10).join(','.join((pinyin, pinyin_to_ascii(pinyin), pinyin_to_ascii_num(pinyin), pinyin_to_double_pinyin_xiaohe(pinyin))) for pinyin in all_pinyins) }\n\npinyin_combinations:\n{ chr(10).join(','.join(str(v) for v in combinations) for combinations in pinyin_multi_combinations) }\n\npinyin_tables:\n{ chr(10).join(f'0x{ rng.start :X}, 0x{ rng.stop - 1 :X}:{ chr(10) }{ \",\".join(str(v) for v in lst) }' for rng, lst in tables.items()) }''')\n\n\n # all_pinyin.md\n with open('all_pinyins.md', 'w', encoding='utf8') as f:\n f.write(f'''## All Pinyins\n{ len(all_pinyins) }\n```\n{ ' '.join(sorted(all_pinyins)) }\n```\n\n## All Pinyin Combinations\n{ len(pinyin_combinations) - len(pinyin_multi_combinations) } + { len(pinyin_multi_combinations) } = { len(pinyin_combinations) }\n```\n{ chr(10).join(pinyin_combinations) }\n```''')\n\n\ndef extend_pinyins(old_map, new_map, only_no_exists=False):\n for code, pinyins in new_map.items():\n if only_no_exists: # 只当 code 不存在时才更新\n if code not in old_map:\n old_map[code] = pinyins\n else:\n old_map.setdefault(code, []).extend(pinyins)\n\n\nif __name__ == '__main__':\n raw_pinyin_map = {}\n '''\n with open('kHanyuPinyin.txt', encoding='utf8') as fp:\n khanyupinyin = parse_pinyins(fp)\n raw_pinyin_map.update(khanyupinyin)\n with open('kXHC1983.txt', encoding='utf8') as fp:\n kxhc1983 = parse_pinyins(fp)\n extend_pinyins(raw_pinyin_map, kxhc1983)\n '''\n with open('kXHC1983.txt', encoding='utf8') as fp:\n kxhc1983 = parse_pinyins(fp)\n raw_pinyin_map.update(kxhc1983)\n with open('nonCJKUI.txt', encoding='utf8') as fp:\n noncjkui = parse_pinyins(fp)\n extend_pinyins(raw_pinyin_map, noncjkui)\n with open('kMandarin_8105.txt', encoding='utf8') as fp:\n adjust_pinyin_map = parse_pinyins(fp)\n extend_pinyins(raw_pinyin_map, adjust_pinyin_map)\n with open('kMandarin_overwrite.txt', encoding='utf8') as fp:\n _map = parse_pinyins(fp)\n extend_pinyins(adjust_pinyin_map, _map)\n extend_pinyins(raw_pinyin_map, adjust_pinyin_map)\n with open('kMandarin.txt', encoding='utf8') as fp:\n _map = parse_pinyins(fp)\n extend_pinyins(adjust_pinyin_map, _map)\n extend_pinyins(raw_pinyin_map, adjust_pinyin_map)\n with open('kTGHZ2013.txt', encoding='utf8') as fp:\n _map = parse_pinyins(fp)\n extend_pinyins(adjust_pinyin_map, _map)\n extend_pinyins(raw_pinyin_map, adjust_pinyin_map)\n with open('kHanyuPinlu.txt', encoding='utf8') as fp:\n khanyupinyinlu = parse_pinyins(fp)\n extend_pinyins(adjust_pinyin_map, _map)\n extend_pinyins(raw_pinyin_map, adjust_pinyin_map)\n with open('GBK_PUA.txt', encoding='utf8') as fp:\n pua_pinyin_map = parse_pinyins(fp)\n extend_pinyins(raw_pinyin_map, pua_pinyin_map)\n with open('kanji.txt', encoding='utf8') as fp:\n _map = parse_pinyins(fp)\n extend_pinyins(raw_pinyin_map, _map, only_no_exists=True)\n\n with open('overwrite.txt', encoding='utf8') as fp:\n overwrite_pinyin_map = parse_pinyins(fp)\n extend_pinyins(raw_pinyin_map, overwrite_pinyin_map)\n\n new_pinyin_map = merge(raw_pinyin_map, adjust_pinyin_map,\n overwrite_pinyin_map)\n new_pinyin_map = sort_pinyin_dict(new_pinyin_map)\n\n assert len(new_pinyin_map) == len(raw_pinyin_map)\n code_set = set(new_pinyin_map.keys())\n #assert set(khanyupinyin.keys()) - code_set == set()\n assert set(khanyupinyinlu.keys()) - code_set == set()\n assert set(kxhc1983.keys()) - code_set == set()\n assert set(adjust_pinyin_map.keys()) - code_set == set()\n assert set(overwrite_pinyin_map.keys()) - code_set == set()\n assert set(pua_pinyin_map.keys()) - code_set == set()\n with open('pinyin.txt', 'w', encoding='utf8') as fp:\n fp.write('# version: 0.11.0\\n')\n fp.write('# source: https://github.com/mozillazg/pinyin-data\\n')\n save_data(new_pinyin_map, fp)\n save_data2(new_pinyin_map)", "id": "2335772", "language": "Python", "matching_score": 2.121657133102417, "max_stars_count": 0, "path": "merge_unihan.py" }, { "content": "pinyins = []\npinyin_combinations = []\npinyin_tables = []\n\ndef read_pinyins(f):\n while line := f.readline()[:-1]:\n lst = line.split(',')\n pinyins.append(tuple(lst[i] for i in (0,2,3)))\n\ndef read_pinyin_combinations(f):\n while line := f.readline()[:-1]:\n pinyin_combinations.append(line)\n\ndef read_pinyin_tables(f):\n while line := f.readline()[:-1]:\n rng = line[:-1].split(',')\n pinyin_tables.append((range(int(rng[0], 16), int(rng[1], 16) + 1), f.readline()[:-1]))\n\ndef output():\n print(f'extern Pinyin pinyins[{len(pinyins)}];')\n pinyin_code = f'Pinyin pinyins[{len(pinyins)}] {{\\n'\n for pinyin in pinyins:\n '''\n s = \",\".join(f'IB_PINYIN_LITERAL(\"{ py }\")' for py in pinyin)\n pinyin_code += f'{{{ s }}},\\n'\n '''\n pinyin_code += f'P({pinyin[0]})'\n pinyin_code += '};\\n'\n\n max_comb = max(comb.count(\",\") for comb in pinyin_combinations) + 1 # 10\n print(f'extern PinyinCombination<{max_comb}> pinyin_combinations[{len(pinyin_combinations)}];')\n comb_code = f'PinyinCombination<{max_comb}> pinyin_combinations[{len(pinyin_combinations)}] {{\\n'\n for comb in pinyin_combinations:\n comb_code += f'{{{ comb.count(\",\") + 1 },{{{ comb }}}}},'\n comb_code += '};\\n'\n\n print(f'extern PinyinRange pinyin_ranges[{len(pinyin_tables)}];')\n table_code = ''\n range_code = f'PinyinRange pinyin_ranges[{len(pinyin_tables)}] {{\\n'\n for table in pinyin_tables:\n table_name = f'pinyin_table_{ table[0].start :X}_{ table[0].stop - 1 :X}'\n table_code += f'uint16_t { table_name }[] = {{{ table[1].replace(\",65535\", \",F\") }}};\\n'\n range_code += f'{{0x{ table[0].start :X}, 0x{ table[0].stop - 1 :X}, { table_name }}},\\n'\n range_code += '};'\n\n return f'''#include <IbPinyin/pinyin.hpp>\n\n#define P(s) {{IB_PINYIN_LITERAL(#s)}},\n#define F 65535\n\nnamespace pinyin {{\n{pinyin_code}\n{comb_code}\n{table_code}\n{range_code}\n}}\n'''\n\nwith open('data/pinyin_compact.txt', encoding='utf8') as f:\n if f.readline() == 'pinyins:\\n':\n read_pinyins(f)\n if f.readline() == 'pinyin_combinations:\\n':\n read_pinyin_combinations(f)\n if f.readline() == 'pinyin_tables:\\n':\n read_pinyin_tables(f)\n with open('data.cpp', 'w', encoding='utf-8-sig') as f:\n f.write(output())", "id": "12306479", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "data/generate.py" }, { "content": "from peak.util.proxies import CallbackProxy\n\n# shorthand\nibase = CallbackProxy(idaapi.get_imagebase)", "id": "7584849", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "IDAUSR/idapythonrc.py" } ]
2.121657
AnirudhVm
[ { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom PixelP.PixelP import Process\n\n", "id": "11514485", "language": "Python", "matching_score": 1.6165974140167236, "max_stars_count": 0, "path": "PixelP/__init__.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nfrom distutils.core import setup\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = 'PixelP', \n packages = ['PixelP'], \n version = '0.3', \n license='MIT', \n description = 'Image augmeantation and preprocessing tool', \n long_description=long_description,\n long_description_content_type='text/markdown',\n author = '<NAME>, <NAME>, <NAME>, <NAME>, <NAME>' , \n author_email = '<EMAIL>', \n url = 'https://github.com/AnirudhVm/PixelP', \n download_url = 'https://github.com/AnirudhVm/PixelP/archive/0.3.tar.gz', \n keywords = ['Image', 'Augment', 'Reshape','Color','Gray'], \n install_requires=[ \n 'validators',\n 'beautifulsoup4',\n 'numpy','pandas','opencv-python','keras'\n ],\n classifiers=[\n 'Development Status :: 3 - Alpha', \n 'Intended Audience :: Developers', \n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License', \n 'Programming Language :: Python :: 3', \n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n\n", "id": "862591", "language": "Python", "matching_score": 2.685990810394287, "max_stars_count": 0, "path": "setup.py" }, { "content": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[6]:\n\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport cv2\nfrom keras.preprocessing.image import ImageDataGenerator,array_to_img,img_to_array,load_img\n\n\n# In[3]:\n\nclass Process:\n def create_color_data(self,IMG_SIZE,training_data,Data_train,CATEGORIES):\n \n for category in CATEGORIES:\n c_path = os.path.join(Data_train,category)\n class_num = CATEGORIES.index(category)\n for img in os.listdir(c_path):\n try:\n img_array = cv2.imread(os.path.join(c_path,img))\n new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE)) \n training_data.append([new_array,class_num])\n except Exception as e:\n pass\n\n\n # In[47]:\n\n\n def create_gray_data(self,IMG_SIZE,training_data,Data_train,CATEGORIES):\n for category in CATEGORIES:\n path = os.path.join(Data_train,category)\n class_num = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)\n new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE)) \n training_data.append([new_array,class_num])\n except Exception as e:\n pass\n\n\n # In[48]:\n\n\n def idg(self,Data_path,CATEGORIES,Data_out,rotrange,wshift,hshift,srange,zoom,hflip,fill):\n datagen = ImageDataGenerator(rotation_range=rotrange,\n width_shift_range=wshift,\n height_shift_range=hshift,\n shear_range=srange,\n zoom_range=zoom,\n horizontal_flip=flip,\n fill_mode=fill)\n for category in CATEGORIES:\n path = os.path.join(Data_path,category)\n out_path=os.path.join(Data_out,category)\n for img in os.listdir(path):\n image = load_img(os.path.join(path,img))\n x = img_to_array(image)\n x = x.reshape((1,)+x.shape)\n i=0\n for batch in datagen.flow(x,batch_size=1,save_to_dir=out_path,save_prefix='gen',save_format='jpeg'):\n i=i+1\n if i>10:\n break\n\n\n", "id": "1566619", "language": "Python", "matching_score": 1.4002586603164673, "max_stars_count": 0, "path": "PixelP/PixelP.py" } ]
1.616597
ZacharyJohnson1
[ { "content": "import discord\n\n#create client\nintents = discord.Intents.default()\nintents.members = True\ndiscord_client = discord.Client(intents=intents)\n\n#read token from file\nwith open('token.txt') as token_file_client:\n token = token_file_client.readline(-1)\n\nemoji_happy_unicodes = [\n '\\U0001F600',\n '\\U0001F603',\n '\\U0001F604',\n '\\U0001F601',\n '\\U0001F642',\n '\\U0001F643',\n '\\U0001F60A'\n]\n\nemoji_sad_unicodes = [\n '\\U0001F62D'\n]\n\n'''event to debug ready state of bot'''\n@discord_client.event\nasync def on_ready():\n\n print('bot is online')\n\n\n'''assigning roles with reactions'''\n@discord_client.event\nasync def on_raw_reaction_add(payload):\n\n payload_message_id = payload.message_id\n target_message_id = 922283125843845241\n guild_id = payload.guild_id\n guild = discord_client.get_guild(guild_id)\n\n if payload_message_id == target_message_id:\n if payload.emoji.name in emoji_happy_unicodes:\n role = discord.utils.get(guild.roles, name='Happy')\n await payload.member.add_roles(role)\n elif payload.emoji.name in emoji_sad_unicodes:\n role = discord.utils.get(guild.roles, name='Sad')\n await payload.member.add_roles(role)\n\n\n'''remove roles with reactions'''\n@discord_client.event\nasync def on_raw_reaction_remove(payload):\n\n payload_message_id = payload.message_id\n target_message_id = 922283125843845241\n guild_id = payload.guild_id\n guild = discord_client.get_guild(guild_id)\n\n if payload_message_id == target_message_id:\n member = discord.utils.find(lambda m : m.id == payload.user_id, guild.members)\n if payload.emoji.name in emoji_happy_unicodes:\n role = discord.utils.get(guild.roles, name='Happy')\n await member.remove_roles(role)\n elif payload.emoji.name in emoji_sad_unicodes:\n role = discord.utils.get(guild.roles, name='Sad')\n await member.remove_roles(role)\n\n\n''' main '''\nif __name__ == \"__main__\":\n\n #run client\n discord_client.run(token)", "id": "631327", "language": "Python", "matching_score": 2.9499943256378174, "max_stars_count": 0, "path": "src/reaction-roles.py" }, { "content": "import discord\nimport re\n\n#create client\nintents = discord.Intents.default()\nintents.members = True\ndiscord_client = discord.Client(intents=intents)\n\n#read token from file\nwith open('token.txt') as token_file_client:\n token = token_file_client.readline(-1)\n\n\n'''event to debug ready state of bot'''\n@discord_client.event\nasync def on_ready():\n\n print('bot is online')\n\n\n'''event to handle messages addressed at bot'''\n@discord_client.event\nasync def on_message(msg):\n\n if msg.author == discord_client.user or msg == None:\n return\n\n user_msg_greeting = r'\\bhello\\b|\\bhi\\b|\\bhey\\b'\n user_msg_content = str(msg.content).lower()\n\n match = re.search(user_msg_greeting, user_msg_content)\n if match:\n await msg.channel.send('Hello World!')\n await msg.add_reaction('\\U0001F44B')\n\n\n'''echo user reactions'''\n@discord_client.event\nasync def on_reaction_add(reaction, user):\n\n if user != discord_client.user:\n await reaction.message.channel.send(f'{user} reacted with {reaction.emoji}')\n\n\n'''track edited messages'''\n@discord_client.event\nasync def on_message_edit(before, after):\n\n await before.channel.send(f'{before.author} edited a message:\\nBefore: {before.content}\\nAfter: {after.content}')\n\n\n''' main '''\nif __name__ == \"__main__\":\n\n #run client\n discord_client.run(token)", "id": "2821656", "language": "Python", "matching_score": 2.3459033966064453, "max_stars_count": 0, "path": "src/HelloWorld.py" }, { "content": "import discord\nfrom discord.ext import commands\nfrom functools import reduce\n\n#create client\ndiscord_client = commands.Bot(command_prefix='!')\n\n#read token from file\nwith open('token.txt') as token_file_client:\n token = token_file_client.readline(-1)\n\n\n'''event to debug ready state of bot'''\n@discord_client.event\nasync def on_ready():\n\n print('bot is online')\n\n\n'''hello command'''\n@discord_client.command()\nasync def hello(context, *args):\n if args != None:\n for arg in args:\n print(arg, sep=' ')\n await context.send(f'Hello {context.author}')\n\n\n''' simple addition '''\n@discord_client.command()\nasync def add(context, *args):\n\n if not check_list_digits(args):\n await context.send('Error: Invalid Arguments. Only Numbers Allowed.')\n\n values_str = ''\n if args != None and len(args) > 1:\n for arg in range(len(args)-1):\n values_str += f'{args[arg]} + '\n values_str += f'{args[-1]} '\n\n sum = reduce(lambda x, y: x + y, list(map(float, args)))\n await context.send(f'{values_str} = {sum}')\n else:\n await context.send(f' @{context.author} Invalid arguments')\n\n\n''' simple subtraction '''\n@discord_client.command()\nasync def subtract(context, *args):\n\n if not check_list_digits(args):\n await context.send('Error: Invalid Arguments. Only Numbers Allowed.')\n\n values_str = ''\n if args != None and len(args) > 1:\n for arg in range(len(args)-1):\n values_str += f'{args[arg]} - '\n values_str += f'{args[-1]} '\n\n diff = reduce(lambda x, y: x - y, list(map(float, args)))\n await context.send(f'{values_str} = {diff}')\n else:\n await context.send(f' @{context.author} Invalid arguments')\n\n''' simple multiplication '''\n@discord_client.command()\nasync def multiply(context, *args):\n\n if not check_list_digits(args):\n await context.send('Error: Invalid Arguments. Only Numbers Allowed.')\n\n values_str = ''\n if args != None and len(args) > 1:\n for arg in range(len(args)-1):\n values_str += f'{args[arg]} * '\n values_str += f'{args[-1]} '\n\n sum = reduce(lambda x, y: x * y, list(map(float, args)))\n await context.send(f'{values_str} = {sum}')\n else:\n await context.send(f' @{context.author} Invalid arguments')\n\n\n''' simple division '''\n@discord_client.command()\nasync def divide(context, *args):\n\n if not check_list_digits(args):\n await context.send('Error: Invalid Arguments. Only Numbers Allowed.')\n\n values_str = ''\n if args != None and len(args) > 1:\n for arg in range(len(args)-1):\n values_str += f'{args[arg]} / '\n values_str += f'{args[-1]} '\n\n diff = reduce(lambda x, y: x / y, list(map(float, args)))\n await context.send(f'{values_str} = {diff}')\n else:\n await context.send(f' @{context.author} Invalid arguments')\n\n\ndef check_list_digits(n):\n\n return all(element.isdigit() for element in n)\n\n\n''' main '''\nif __name__ == \"__main__\":\n\n #run client\n discord_client.run(token)", "id": "8110648", "language": "Python", "matching_score": 0.09315738081932068, "max_stars_count": 0, "path": "src/commands.py" }, { "content": "from Vertex import Vertex\nfrom Graph import Graph\n\ntime = 0\n\ndef dfs(G):\n \n for v in G.vertices:\n v.set_color('WHITE')\n v.set_parent(None)\n #time = 0\n \n for v in G.vertices:\n if v.get_color() == 'WHITE':\n dfs_visit(G, v)\n\n\ndef dfs_visit(G, u):\n\n global time\n time += 1\n u.set_distance(time)\n u.set_color('GRAY')\n\n for v in G.vertices[G.vertices.index(u)].adjacency_list:\n if v.get_color() == 'WHITE':\n v.set_parent(u)\n dfs_visit(G, v)\n u.set_color('BLACK')\n time += 1\n u.set_finish_time(time)\n print(f'{u.get_id()} - distance: {u.get_distance()}, finish: {u.get_finish_time()}')\n\nif __name__ == '__main__':\n \n u = Vertex('U')\n v = Vertex('V')\n w = Vertex('W')\n x = Vertex('X')\n y = Vertex('Y')\n z = Vertex('Z')\n\n G = Graph()\n\n G.insert_vertex(u)\n G.insert_vertex(v)\n G.insert_vertex(w)\n G.insert_vertex(x)\n G.insert_vertex(y)\n G.insert_vertex(z)\n\n G.define_edge(u, v)\n G.define_edge(w, x)\n G.define_edge(x, y)\n G.define_edge(y, z)\n G.define_edge(v, w)\n\n dfs(G)", "id": "3107243", "language": "Python", "matching_score": 3.869971752166748, "max_stars_count": 0, "path": "src/DFS.py" }, { "content": "from Vertex import Vertex\nfrom Graph import Graph\nimport math\n\ndef bfs(G, v):\n \n for u in G.vertices:\n if u is not v:\n u.set_color('WHITE')\n u.set_distance(math.inf)\n u.set_parent(None)\n \n v.set_color('GRAY')\n v.set_distance(0)\n v.set_parent(None)\n\n q = []\n q.append(v)\n\n while len(q) != 0:\n\n u = q.pop(0)\n index = G.vertices.index(u)\n for vertex in G.vertices[index].adjacency_list:\n if vertex.get_color() == 'WHITE':\n vertex.set_color('GRAY')\n vertex.set_distance(u.get_distance() + 1)\n vertex.set_parent(u)\n q.append(vertex)\n u.set_color('BLACK')\n print(f'id: {u.get_id()}, distance: {u.get_distance()}')\n\n\nif __name__ == '__main__':\n \n u = Vertex('U')\n v = Vertex('V')\n w = Vertex('W')\n x = Vertex('X')\n y = Vertex('Y')\n z = Vertex('Z')\n\n G = Graph()\n\n G.insert_vertex(u)\n G.insert_vertex(v)\n G.insert_vertex(w)\n G.insert_vertex(x)\n G.insert_vertex(y)\n G.insert_vertex(z)\n\n G.define_edge(u, v)\n #G.define_edge(w, x)\n G.define_edge(z, w)\n G.define_edge(x, y)\n G.define_edge(y, z)\n G.define_edge(u, x)\n G.define_edge(v, w)\n # for v in G.vertices: print(v.id)\n # for e in G.edges:\n # for v in e:\n # print(v.id)\n #bfs(G, u)\n bfs(G, z)", "id": "7980103", "language": "Python", "matching_score": 2.3231515884399414, "max_stars_count": 0, "path": "src/BFS.py" }, { "content": "from Edge import Edge\n\nclass Graph:\n\n def __init__(self, vertices=[], edges=[]):\n\n self.vertices = vertices\n self.edges = edges\n\n\n def insert_vertex(self, vertex):\n \n for v in self.vertices:\n if v.get_id() == vertex.get_id():\n v = vertex\n return\n self.vertices.append(vertex)\n\n \n def get_vertex(self, id):\n\n for v in self.vertices:\n if v.get_id() == id:\n return v\n return None\n \n\n def define_edge(self, vertex_one, vertex_two, weight=0):\n \n edge = Edge(vertex_one, vertex_two, weight)\n if edge not in self.edges:\n edge.u.set_adjacency_list(edge.v)\n edge.v.set_adjacency_list(edge.u)\n self.insert_vertex(edge.u)\n self.insert_vertex(edge.v)\n self.edges.append(edge)\n\n else:\n self.edges[self.edges.index(edge)].w = weight\n \n\n def add_edge(self, edge):\n\n if edge not in self.edges:\n edge.u.set_adjacency_list(edge.v)\n edge.v.set_adjacency_list(edge.u)\n self.insert_vertex(edge.u)\n self.insert_vertex(edge.v)\n self.edges.append(edge)\n else:\n self.edges[self.edges.index(edge)].w = edge.weight\n\n\n @staticmethod\n def is_acyclic(self, G):\n \n pass\n ", "id": "4225724", "language": "Python", "matching_score": 1.2120670080184937, "max_stars_count": 0, "path": "src/Graph.py" }, { "content": "class Edge:\n\n def __init__(self, u, v, w):\n\n self.u = u\n self.v = v\n self.w = w\n\n\n def __eq__(self, edge):\n\n if self.u.id == edge.u.id and self.v.id == edge.v.id:\n return True\n return False\n\n\n def __ge__(self, v):\n \n return True if self.w > v.w else False\n\n\n def __lt__(self, v):\n\n return True if self.w < v.w else False\n\n\n @staticmethod\n def weight(edge):\n\n return edge.w\n ", "id": "12491024", "language": "Python", "matching_score": 0.8271869421005249, "max_stars_count": 0, "path": "src/Edge.py" }, { "content": "from InitializeSingleSource import initialize_single_source\nfrom Relax import relax\nfrom Graph import Graph\n\nclass BellmanFord:\n\n def BellmanFord(G, w, s):\n\n initialize_single_source(G,s)\n \n for _ in range(0, len(G.vertices) - 1):\n for edge in G.edges:\n relax(edge.u, edge.v, w(edge))\n \n for edge in G.edges:\n if edge.v.get_distance() > edge.u.get_distance() + w(edge):\n return False\n return True\n", "id": "4556853", "language": "Python", "matching_score": 2.610600471496582, "max_stars_count": 0, "path": "src/BellmanFord.py" }, { "content": "from InitializeSingleSource import initialize_single_source\nfrom ExtractMinimum import extract_min\nfrom Relax import relax\n\nclass Dijkstra:\n\n def Dijkstra( G, w, s):\n\n initialize_single_source(G, s)\n S = []\n q = []\n q.append(G.vertices)\n \n while q:\n u = extract_min(q)\n S.append(u)\n for v in G.vertices[u].adjacency_list:\n relax(u, v, w)\n return S", "id": "9432757", "language": "Python", "matching_score": 2.0838382244110107, "max_stars_count": 0, "path": "src/Dijkstra.py" }, { "content": "from Vertex import Vertex\n\nclass ExtractMinimum:\n\n @staticmethod\n def extract_min(q):\n\n return min(q)\n", "id": "8693332", "language": "Python", "matching_score": 0.07180413603782654, "max_stars_count": 0, "path": "src/ExtractMinimum.py" }, { "content": "import math\n\nclass Vertex:\n\n def __init__(self, id, color='WHITE', distance=math.inf, discovery_time=0, finish_time=0, parent=None):\n\n self.id = id\n self.color = color\n self.distance = distance\n self.discovery_time = discovery_time\n self.finish_time = finish_time\n self.parent = parent\n self.adjacency_list = []\n\n\n def __ge__(self, v):\n \n return True if self.distance > v.distance else False\n\n\n def __lt__(self, v):\n\n return True if self.distance < v.distance else False\n\n\n def __eq__(self, v):\n \n if v == None: return False\n return True if self.id == v.id else False\n\n \n def get_id(self):\n\n return self.id\n\n \n def set_id(self, id):\n\n self.id = id\n\n\n def get_color(self):\n\n return self.color\n\n\n def set_color(self, color):\n\n self.color = color\n\n \n def get_distance(self):\n\n return self.distance\n\n \n def set_distance(self, distance):\n\n self.distance = distance\n\n \n def get_discovery_time(self):\n\n return self.discovery_time\n\n \n def set_discovery_time(self, discovery_time):\n\n self.discovery_time = discovery_time\n\n \n def get_finish_time(self):\n\n return self.finish_time\n\n \n def set_finish_time(self, finish_time):\n\n self.finish_time = finish_time\n\n \n def get_parent(self):\n\n return self.parent\n\n \n def set_parent(self, parent):\n\n self.parent = parent\n \n\n def set_adjacency_list(self, vertex):\n\n if vertex not in self.adjacency_list:\n self.adjacency_list.append(vertex)\n\n", "id": "5929078", "language": "Python", "matching_score": 1.9347740411758423, "max_stars_count": 0, "path": "src/Vertex.py" }, { "content": "class Relax:\n\n @staticmethod\n def relax(u, v, w):\n\n if v.get_distance() > u.get_distance() + w:\n v.set_distance(u.get_distance() + w)\n v.set_parent(u)", "id": "7605035", "language": "Python", "matching_score": 1.75840163230896, "max_stars_count": 0, "path": "src/Relax.py" }, { "content": "import math\nfrom ExtractMinimum import extract_min\n\nclass Prim:\n\n def prim(G, w, r):\n\n for v in G.vertices:\n v.set_distance(math.inf)\n v.set_parent(None)\n \n r.set_distance(0)\n q = []\n q.append(G.vertices)\n\n while q:\n u = extract_min(q)\n for v in G.vertices[u].adjacency_list:\n if v in q and w < v.get_distance():\n v.set_parent(u)\n v.set_distance(w)\n\nif __name__=='__main__':\n pass", "id": "10420946", "language": "Python", "matching_score": 2.0425615310668945, "max_stars_count": 0, "path": "src/Prim.py" }, { "content": "import math\n\nclass InitializeSingleSource:\n\n @staticmethod\n def initialize_single_source(G,s):\n \n for v in G.vertices:\n v.set_distance(math.inf)\n v.set_parent(None)\n s.set_distance(0)", "id": "6500601", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "src/InitializeSingleSource.py" }, { "content": "class Kruskal:\n\n def kruskal(G, w):\n\n A = []\n for v in G.vertices:\n pass\n ", "id": "4548012", "language": "Python", "matching_score": 0.1297982931137085, "max_stars_count": 0, "path": "src/Kruskal.py" } ]
1.934774
aryawicaksana
[ { "content": "from time import sleep\nfrom selenium import webdriver\nimport names\nimport clipboard\nimport random\nfrom selenium.webdriver.support.ui import Select\nchars = \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890!@$\"\nnum = \"123456789\"\nfor p in range(1):\n password = ''\n for c in range(8):\n password += random.choice(chars)\nbrowser = webdriver.Chrome(\"./chromedriver\")\nbrowser.implicitly_wait(5)\nmail ='https://generator.email/'\nbrowser.get(mail)\nemailgen = browser.find_element_by_id(\"copbtn\")\nemailgen.click()\nname = names.get_full_name()\nemail_str = clipboard.paste()\nprint(email_str,name,password)\nbrowser.execute_script(\"window.open()\")\nbrowser.switch_to.window(browser.window_handles[1])\nbrowser.get('https://www.instagram.com/')\nsleep(5)\nbrowser.refresh()\nlogin_link = browser.find_element_by_link_text(\"Sign up\")\nlogin_link.click()\nemailorphone = browser.find_element_by_name('emailOrPhone')\nf_name = browser.find_element_by_name('fullName')\npass_word = browser.find_element_by_name('password')\n\nemailorphone.send_keys(email_str)\nf_name.send_keys(name)\npass_word.send_keys(password)\nsleep(2)\nusername = browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/form/div[5]/div/div/div/button')\nusername.click()\nsignup = browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/form/div[7]/div/button')\nsignup.click()\nmonth = Select(browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/div[4]/div/div/span/span[1]/select') )\nmonth.select_by_index(random.choice(num))\ndays = Select (browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/div[4]/div/div/span/span[2]/select'))\ndays.select_by_index(random.choice(num))\nyear = Select (browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/div[4]/div/div/span/span[3]/select') )\nyear.select_by_value('1998')\nnext = browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div/div[6]/button')\nnext.click()\nbrowser.switch_to.window(browser.window_handles[0])\nsleep(25)\ncode = browser.find_element_by_css_selector('#email-table > div.e7m.list-group-item.list-group-item-info > div.e7m.subj_div_45g45gg')\nfixcode = code.text.split()\nprint(fixcode[0])\nbrowser.switch_to.window(browser.window_handles[1])\nenter_code = browser.find_element_by_name('email_confirmation_code')\nenter_code.send_keys(fixcode[0])\nverify = browser.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div[1]/div[2]/form/div/div[2]/button')\nverify.click()\n", "id": "11708569", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "main.py" } ]
0
tyh2333
[ { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport unittest\n\nfrom etw import common\n\n\nclass TestCOMMON(unittest.TestCase):\n\n def test_rel_ptr_to_ptr(self):\n \"\"\"\n Tests conversion of RVA to absolute address\n\n :return: None\n \"\"\"\n\n assert(common.rel_ptr_to_ptr(0x1000, 0x234).value == 0x1234)\n return\n\n def test_convert_bool_str(self):\n \"\"\"\n Tests conversion of boolean string to boolean type\n\n :return: None\n \"\"\"\n\n assert(common.convert_bool_str('True') is True)\n return\n\n def test_args(self):\n \"\"\"\n Tests setting base arguments\n\n :return: None\n \"\"\"\n parser = common.set_base_args('test')\n args = common.parse_base_args(parser)\n assert(len(args) == 11)\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "4521231", "language": "Python", "matching_score": 0.8318173289299011, "max_stars_count": 247, "path": "tests/test_common.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport argparse\nimport subprocess\n\n\ndef write_to_log(filename, data):\n with open(filename, 'a') as file:\n file.write(data)\n\n\ndef format_data(data):\n # for each data provider, truncate data if PIDs are listed.\n lines = data.split('\\r\\n')\n formatted_data = ['\\n-------------------------------------------------------------------------------']\n for line in lines:\n if 'PID' in line or 'The command completed successfully' in line:\n break\n formatted_data.append('{:s}\\r'.format(line))\n return ''.join(formatted_data)\n\n\ndef list_all_providers(filename):\n\n # first, get list of all providers\n cmd = 'logman query providers'\n out = None\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, _ = proc.communicate()\n except:\n print('exception occurred while trying to run {:s}'.format(cmd))\n exit(-1)\n\n provs = out.decode('utf-8').split('---------------------------------------'\n '----------------------------------------')[1]\n provs = provs.split('The command completed successfully.')[0]\n provs = provs.split('\\r\\n')\n\n # for each provider on system get list of properties for each\n for i in range(len(provs)):\n prov_name = '\\\"{:s}\\\"'.format(provs[i].split('{')[0].strip())\n if prov_name != '':\n cmd = 'logman query providers {:s}'.format(prov_name)\n try:\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, _ = proc.communicate()\n except:\n print('exception occurred while trying to run {:s}'.format(cmd))\n exit(-1)\n write_to_log(filename, format_data(out.decode('utf-8')))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-o',\n '--outfile',\n help=\"file to write output to\",\n required=True)\n\n args = parser.parse_args()\n list_all_providers(args.outfile)\n", "id": "7571160", "language": "Python", "matching_score": 1.625563144683838, "max_stars_count": 247, "path": "utils/list_providers.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport argparse\nimport logging\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\n\nANYSIZE_ARRAY = 1\n\nRETURN_RAW_DATA_ONLY = 0x1\nRETURN_RAW_DATA_ON_ERROR = 0x2\nRETURN_ONLY_RAW_DATA_ON_ERROR = 0x4\nRETURN_RAW_UNFORMATTED_DATA = 0x8\n\nif ct.sizeof(ct.c_void_p) == 8:\n ULONG_PTR = ct.c_ulonglong\nelse:\n ULONG_PTR = ct.c_ulong\n\nMAX_UINT = (2 ** 32) - 1\n\n# Defs for Microsoft's BOOL/BOOLEAN type\nTRUE = 1\nFALSE = 0\n\n# Defs for the privilege functions\nSE_PRIVILEGE_ENABLED = 2\n\n# Defs for Token Permissions\nTOKEN_ASSIGN_PRIMARY = 0x1\nTOKEN_DUPLICATE = 0x2\nTOKEN_IMPERSONATE = 0x4\nTOKEN_QUERY = 0x8\nTOKEN_QUERY_SOURCE = 0x10\nTOKEN_ADJUST_PRIVILEGES = 0x20\nTOKEN_ADJUST_GROUPS = 0x40\nTOKEN_ADJUST_DEFAULT = 0x80\nTOKEN_ADJUST_SESSIONID = 0x100\n\n# Defs for WIN32 error codes\nERROR_NOT_ALL_ASSIGNED = 0x514\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.ERROR)\n\n\nclass SYSTEMTIME(ct.Structure):\n _fields_ = [('wYear', wt.WORD),\n ('wMonth', wt.WORD),\n ('wDayOfWeek', wt.WORD),\n ('wDay', wt.WORD),\n ('wHour', wt.WORD),\n ('wMinute', wt.WORD),\n ('wSecond', wt.WORD),\n ('wMilliseconds', wt.WORD)]\n\n\nclass TIME_ZONE_INFORMATION(ct.Structure):\n _fields_ = [('Bias', ct.c_long),\n ('StandardName', ct.c_wchar * 32),\n ('StandardDate', SYSTEMTIME),\n ('StandardBias', ct.c_long),\n ('DaylightName', ct.c_wchar * 32),\n ('DaylightDate', SYSTEMTIME),\n ('DaylightBias', ct.c_long)]\n\n\nclass ETWException(Exception):\n \"\"\"\n Raise for an ETW exception\n \"\"\"\n\n\ndef rel_ptr_to_str(base, offset):\n \"\"\"\n Helper function to convert a relative offset to a string to the actual string.\n \"\"\"\n return ct.cast(rel_ptr_to_ptr(base, offset), ct.c_wchar_p).value\n\n\ndef rel_ptr_to_ptr(base, offset):\n \"\"\"\n Helper function to convert a relative offset to a void pointer.\n \"\"\"\n return ct.cast((ct.cast(base, ct.c_voidp).value + offset), ct.c_voidp)\n\n\ndef convert_bool_str(input_string):\n \"\"\"\n Helper to convert a string representation of a boolean to a real bool(tm).\n \"\"\"\n if input_string.lower() in ('1', 'true'):\n return True\n return False\n\n\ndef set_base_args(name):\n \"\"\"\n Sets base arguments for command line.\n\n :return: Instance of arg parser after adding base arguments.\n \"\"\"\n\n description_format = 'Use ETW (Event Tracing for Windows) to capture {:s} events'.format(name)\n parser = argparse.ArgumentParser(description=description_format)\n\n parser.add_argument('--ring-buffer-size', dest='ring_buf_size', default=1024, type=int,\n help='The size of the ring buffer used for capturing events')\n parser.add_argument('--max-str-len', default=1024, type=int,\n help='The maximum length of the strings that proceed the structure')\n parser.add_argument('--min-buffers', default=0, type=int,\n help='The minimum number of buffers for an event tracing session')\n parser.add_argument('--max-buffers', default=0, type=int,\n help='The maximum number of buffers for an event tracing session')\n parser.add_argument('--filters', default=None, nargs='+',\n help='A whitelist of task_names that we want to handle post-capture')\n parser.add_argument('--logfile', default=None,\n help='Name of file to store events')\n parser.add_argument('--no-conout', action='store_true',\n help='Output live capture to console')\n parser.add_argument('--level',\n default='information',\n choices=['critical', 'error', 'warning', 'information', 'verbose'],\n help='Information level of the capture. Options are critical, error, warning,\\\n information(default), and verbose')\n parser.add_argument('--any-keywords', default=None, nargs='+',\n help='Keywords to filter on pre-capture (can match any)')\n parser.add_argument('--all-keywords', default=None, nargs='+',\n help='Keywords to filter on pre-capture (must match all)')\n parser.add_argument('--default-filters', action='store_true',\n help='Apply default set of filters')\n return parser\n\n\ndef parse_base_args(parser):\n \"\"\"\n parses base arguments\n\n :return: dict of parsed base args.\n \"\"\"\n\n parsed_args = parser.parse_args()\n\n from etw import evntrace as et\n\n level = {'critical': et.TRACE_LEVEL_CRITICAL,\n 'error': et.TRACE_LEVEL_ERROR,\n 'warning': et.TRACE_LEVEL_WARNING,\n 'information': et.TRACE_LEVEL_INFORMATION,\n 'verbose': et.TRACE_LEVEL_VERBOSE,\n 'reserved6': et.TRACE_LEVEL_RESERVED6,\n 'reserved7': et.TRACE_LEVEL_RESERVED7,\n 'reserved8': et.TRACE_LEVEL_RESERVED8,\n 'reserved9': et.TRACE_LEVEL_RESERVED9}\n parsed_args.level = level[parsed_args.level]\n\n if parsed_args.default_filters is True and parsed_args.filters is not None:\n raise ETWException('Cannot specify use default filters and set filters')\n\n if parsed_args.no_conout is True and parsed_args.logfile is None:\n raise ETWException('Either console output or logfile must be specified')\n\n return vars(parsed_args)\n\n\ndef run(name, filters=None):\n \"\"\"\n Starts the capture using ETW.\n\n :param name: Name of the capture class to be displayed to the user.\n :param filters: List of filters to apply to capture.\n :return: Does not return anything.\n \"\"\"\n\n logger.setLevel(logging.INFO)\n logger.info('{:s} - Started (filters = {!s:s})'.format(name, filters))\n\n logger.info('Press ENTER to stop capture')\n input()\n\n logger.info('{:s} - Stopped'.format(name))\n\n\ndef on_event_callback(event_tufo, logfile=None, no_conout=False):\n \"\"\"\n Starts the capture using ETW.\n\n :param event_tufo: tufo containing event information\n :param logfile: Path to logfile.\n :param no_conout: If true does not output live capture to console.\n :return: Does not return anything.\n \"\"\"\n\n import pprint\n from collections import Mapping, Iterable\n\n def encode(data, encoding='utf-8'):\n if isinstance(data, str):\n return data.encode(encoding, 'ignore')\n elif isinstance(data, Mapping):\n return dict(map(encode, data.items()))\n elif isinstance(data, Iterable):\n return type(data)(map(encode, data))\n else:\n return data\n\n event_id, event = event_tufo\n if no_conout is False:\n logger.info('{:d} ({:s})\\n{:s}\\n'.format(event_id, event[\"Task Name\"], pprint.pformat(encode(event))))\n\n if logfile is not None:\n with open(logfile, 'a') as file:\n file.write('{:d} ({:s})\\n{:s}\\n'.format(event_id, event[\"Task Name\"], pprint.pformat(encode(event))))\n", "id": "3543413", "language": "Python", "matching_score": 4.016393661499023, "max_stars_count": 247, "path": "etw/common.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nfrom etw import ETW, ProviderInfo\nfrom etw.GUID import GUID\nfrom etw import common\nfrom etw import evntrace as et\n\n\nclass RDPETW(ETW):\n\n def __init__(\n self,\n ring_buf_size=1024,\n max_str_len=1024,\n min_buffers=0,\n max_buffers=0,\n level=et.TRACE_LEVEL_INFORMATION,\n any_keywords=None,\n all_keywords=None,\n filters=None,\n event_callback=None,\n logfile=None,\n no_conout=False):\n \"\"\"\n Initializes an instance of RDPETW. The default parameters represent a very typical use case and should not be\n overridden unless the user knows what they are doing.\n\n :param ring_buf_size: The size of the ring buffer used for capturing events.\n :param max_str_len: The maximum length of the strings the proceed the structure.\n Unless you know what you are doing, do not modify this value.\n :param min_buffers: The minimum number of buffers for an event tracing session.\n Unless you know what you are doing, do not modify this value.\n :param max_buffers: The maximum number of buffers for an event tracing session.\n Unless you know what you are doing, do not modify this value.\n :param level: Logging level\n :param any_keywords: List of keywords to match\n :param all_keywords: List of keywords that all must match\n :param filters: List of filters to apply to capture.\n :param event_callback: Callback for processing events\n :param logfile: Path to logfile.\n :param no_conout: If true does not output live capture to console.\n \"\"\"\n\n self.logfile = logfile\n self.no_conout = no_conout\n if event_callback:\n self.event_callback = event_callback\n else:\n self.event_callback = self.on_event\n\n providers = [ProviderInfo('Microsoft-Windows-TerminalServices-RemoteConnectionManager',\n GUID(\"{C76BAA63-AE81-421C-B425-340B4B24157F}\"),\n level,\n any_keywords,\n all_keywords),\n ProviderInfo('Microsoft-Windows-TerminalServices-LocalSessionManager',\n GUID(\"{5D896912-022D-40AA-A3A8-4FA5515C76D7}\"),\n level,\n any_keywords,\n all_keywords)]\n\n super().__init__(\n ring_buf_size=ring_buf_size,\n max_str_len=max_str_len,\n min_buffers=min_buffers,\n max_buffers=max_buffers,\n event_callback=self.event_callback,\n task_name_filters=filters,\n providers=providers)\n\n def on_event(self, event_tufo):\n '''\n Callback for ETW events\n\n :param event_tufo: tufo containing event information\n :return: Does not return anything\n '''\n\n common.on_event_callback(event_tufo, logfile=self.logfile, no_conout=self.no_conout)\n\n\ndef main(args):\n \"\"\"\n Main function of script. Creates object based on input parameters and calls common main.\n\n :param args: a dict of all args.\n :return: Does not return anything.\n \"\"\"\n\n if args['default_filters'] is True:\n args['filters'] = ['MICROSOFT-WINDOWS-TERMINALSERVICES-REMOTECONNECTIONMANAGER',\n 'SESSIONARBITRATION',\n 'NOTIFYLOGONTOLICENSING']\n args.pop('default_filters')\n\n # Create an RDPETW instance with the parameters provided.\n with RDPETW(**args):\n common.run('rdp_etw', args['filters'])\n\n\nif __name__ == '__main__':\n main(common.parse_base_args(common.set_base_args('RDP')))\n", "id": "12059746", "language": "Python", "matching_score": 0.9123473167419434, "max_stars_count": 247, "path": "examples/providers/rdpetw.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport time\nimport unittest\nimport subprocess as sp\n\nfrom examples.providers import procetw\n\n\nclass TestPROCETW(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Prior to running each of our tests, we should start the ETW code, create and delete a share, and capture the\n subsequent response.\n\n :return: None\n \"\"\"\n\n # Instantiate our list where all of the results will be stored\n cls.event_tufo_list = list()\n cls.context_fields = {'Description', 'Task Name'}\n\n # Instantiate an PROCETW object\n capture = procetw.PROCETW(event_callback=lambda event_tufo: cls.event_tufo_list.append(event_tufo),\n any_keywords=['WINEVENT_KEYWORD_PROCESS', 'WINEVENT_KEYWORD_THREAD'])\n capture.start()\n\n # start notepad\n args = ['notepad.exe']\n p = sp.Popen(args, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n time.sleep(5)\n p.kill()\n time.sleep(5)\n\n # Stop the PROCETW instance\n capture.stop()\n\n return\n\n def find_event(self, name):\n \"\"\"\n Retrieves an event from the event_tufo_list with the user's specified name. While the event\n itself is a TuFo, we only return the dictionary portion since the name is only needed during the search.\n\n :param name: The name of the event we want to find.\n :return: An event matching the name specified or None if no events match.\n \"\"\"\n return next((tufo[1] for tufo in self.event_tufo_list if tufo[1]['Task Name'] == name), None)\n\n def find_all_events(self, name):\n \"\"\"\n Retrieves all events matching the user's specified name from the event_tufo list. While the events themselves\n are TuFos, we only return the dictionary portion since the name is only needed during the search.\n\n :param name: The name of the events we want to find\n :return: A list of all events matching the name. If no events are found, an empty list is returned.\n \"\"\"\n return [tufo[1] for tufo in self.event_tufo_list if tufo[1]['Task Name'] == name]\n\n def trim_fields(self, event):\n \"\"\"\n We add additional fields for contextual information. In order to accurately test that we are parsing\n the correct fields as reported by the event, we need to trim these off.\n\n :return: A copy of the event without the contextual fields\n \"\"\"\n return {key: event[key] for key in event.keys() if key not in self.context_fields}\n\n def test_thread_start(self):\n \"\"\"\n Test a THREADSTART event.\n\n :return: None\n \"\"\"\n\n event = self.find_event('THREADSTART')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n keys = event.keys()\n\n # This event should have 11 field\n self.assertEqual(len(event), 11)\n\n self.assertIn('ProcessID', keys)\n self.assertIn('StackBase', keys)\n self.assertIn('StackLimit', keys)\n self.assertIn('StartAddr', keys)\n self.assertIn('SubProcessTag', keys)\n self.assertIn('TebBase', keys)\n self.assertIn('ThreadID', keys)\n self.assertIn('UserStackBase', keys)\n self.assertIn('UserStackLimit', keys)\n self.assertIn('Win32StartAddr', keys)\n\n return\n\n def test_thread_stop(self):\n \"\"\"\n Test a THREADSTOP event.\n\n :return: None\n \"\"\"\n\n event = self.find_event('THREADSTOP')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n keys = event.keys()\n\n # This event should have 12 fields\n self.assertEqual(len(event), 12)\n\n self.assertIn('ProcessID', keys)\n self.assertIn('StackBase', keys)\n self.assertIn('StackLimit', keys)\n self.assertIn('StartAddr', keys)\n self.assertIn('SubProcessTag', keys)\n self.assertIn('TebBase', keys)\n self.assertIn('ThreadID', keys)\n self.assertIn('UserStackBase', keys)\n self.assertIn('UserStackLimit', keys)\n self.assertIn('Win32StartAddr', keys)\n\n return\n\n def test_process_start(self):\n \"\"\"\n Test a PROCESSSTART event.\n\n :return: None\n \"\"\"\n\n event = self.find_event('PROCESSSTART')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n keys = event.keys()\n\n # This event should have 6 fields\n self.assertEqual(len(event), 6)\n\n self.assertIn('ImageName', keys)\n self.assertIn('ParentProcessID', keys)\n self.assertIn('ProcessID', keys)\n self.assertIn('SessionID', keys)\n\n return\n\n def test_process_stop(self):\n \"\"\"\n Test a PROCESSSTOP event.\n\n :return: None\n \"\"\"\n\n event = self.find_event('PROCESSSTOP')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n keys = event.keys()\n\n # This event should have 16 fields\n self.assertEqual(len(event), 16)\n\n self.assertIn('ExitCode', keys)\n self.assertIn('ExitTime', keys)\n self.assertIn('HandleCount', keys)\n self.assertIn('HardFaultCount', keys)\n self.assertIn('ImageName', keys)\n self.assertIn('ProcessID', keys)\n self.assertIn('ReadOperationCount', keys)\n self.assertIn('ReadTransferKiloBytes', keys)\n self.assertIn('TokenElevationType', keys)\n self.assertIn('WriteOperationCount', keys)\n self.assertIn('WriteTransferKiloBytes', keys)\n\n return\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "10227729", "language": "Python", "matching_score": 5.20667028427124, "max_stars_count": 247, "path": "tests/test_procetw.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport unittest\nimport time\nimport ctypes as ct\nimport ctypes.wintypes as wt\nimport subprocess as sp\n\nfrom etw import ETW, ProviderInfo\nfrom etw.etw import TraceProperties, ProviderParameters, EventConsumer\nfrom etw.GUID import GUID\nfrom etw import evntrace as et\nfrom etw import evntprov as ep\nfrom etw.etw import get_keywords_bitmask\nfrom .helpers import wininet as wi\nfrom etw import common\n\n\nclass TestETW(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Prior to running each of our tests, we should start the ETW code, create and delete a share, and capture the\n subsequent response.\n\n :return: None\n \"\"\"\n\n # Instantiate our list where all of the results will be stored\n cls.event_tufo_list = list()\n cls.context_fields = {'Description', 'Task Name'}\n cls.user_agent = 'Test<PASSWORD>'\n cls.url = 'www.gmail.com'\n cls.port = 80\n cls.verb = 'GET'\n cls.size = 1337\n return\n\n def makeRequest(cls):\n \"\"\"\n Issue a WININET request based on the class parameters.\n\n :return: None\n \"\"\"\n hInternet = wi.InternetOpenW(\n cls.user_agent,\n wi.INTERNET_OPEN_TYPE_DIRECT, None, None, 0)\n if hInternet is None:\n raise ct.WinError()\n\n hSession = wi.InternetConnectW(hInternet, cls.url, cls.port, None, None, wi.INTERNET_SERVICE_HTTP, 0, 0)\n if hSession is None:\n raise ct.WinError()\n\n hRequest = wi.HttpOpenRequestW(hSession, cls.verb, '', None, None, None, 0, 0)\n if hRequest is None:\n raise ct.WinError()\n\n request_sent = wi.HttpSendRequestW(hRequest, None, 0, None, 0)\n if request_sent == 0:\n raise ct.WinError()\n\n # Setup the necessary parameters to read the server's response\n buff_size = wt.DWORD(cls.size)\n buf = (ct.c_char * buff_size.value)()\n keep_reading = 1\n bytes_read = wt.DWORD(-1)\n response_str = str()\n\n while keep_reading == 1 and bytes_read.value != 0:\n # Read the entire response.\n keep_reading = wi.InternetReadFile(hRequest, buf, buff_size, ct.byref(bytes_read))\n response_str += str(buf.value)\n\n return response_str\n\n def find_event(self, name):\n \"\"\"\n Retrieves an event from the event_tufo_list with the user's specified name. While the event\n itself is a TuFo, we only return the dictionary portion since the name is only needed during the search.\n\n :param name: The name of the event we want to find.\n :return: An event matching the name specified or None if no events match.\n \"\"\"\n return next((tufo[1] for tufo in self.event_tufo_list if tufo[1]['Task Name'] == name), None)\n\n def find_all_events(self, name):\n \"\"\"\n Retrieves all events matching the user's specified name from the event_tufo list. While the events themselves\n are TuFos, we only return the dictionary portion since the name is only needed during the search.\n\n :param name: The name of the events we want to find\n :return: A list of all events matching the name. If no events are found, an empty list is returned.\n \"\"\"\n return [tufo[1] for tufo in self.event_tufo_list if tufo[1]['Task Name'] == name]\n\n def trim_fields(self, event):\n \"\"\"\n We add additional fields for contextual information. In order to accurately test that we are parsing\n the correct fields as reported by the event, we need to trim these off.\n\n :return: A copy of the event without the contextual fields\n \"\"\"\n return {key: event[key] for key in event.keys() if key not in self.context_fields}\n\n def test_etw_capture(self):\n \"\"\"\n Tests the etw capture\n\n :return: None\n \"\"\"\n\n # Instantiate an ETW object\n capture = ETW(providers=[ProviderInfo('Microsoft-Windows-WinINet',\n GUID(\"{43D1A55C-76D6-4F7E-995C-64C711E5CAFE}\"))],\n event_callback=lambda event_tufo: self.event_tufo_list.append(event_tufo))\n capture.start()\n\n self.makeRequest()\n\n # Ensure that we have a chance for all the events to come back\n time.sleep(5)\n\n # Stop the ETW instance\n capture.stop()\n event = self.find_event('WININET_READDATA')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n # This event should have 3 fields\n self.assertEqual(len(event), 3)\n self.event_tufo = []\n\n return\n\n def test_etw_capture_multi_providers(self):\n \"\"\"\n Tests the etw capture class using multiple providers\n\n :return: None\n \"\"\"\n\n # Instantiate an ETW object\n providers = [ProviderInfo('Microsoft-Windows-WinINet',\n GUID(\"{43D1A55C-76D6-4F7E-995C-64C711E5CAFE}\")),\n ProviderInfo('Microsoft-Windows-Kernel-Process',\n GUID(\"{22FB2CD6-0E7B-422B-A0C7-2FAD1FD0E716}\"))]\n\n capture = ETW(providers=providers,\n event_callback=lambda event_tufo: self.event_tufo_list.append(event_tufo))\n\n capture.start()\n\n # start ping\n args = ['ping.exe']\n p = sp.Popen(args, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n time.sleep(5)\n p.kill()\n\n self.makeRequest()\n\n # Stop the ETW instance\n capture.stop()\n\n # check for process start\n event = self.find_event('PROCESSSTART')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n # This event should have 6 fields\n self.assertEqual(len(event), 6)\n\n event = self.find_event('WININET_READDATA')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n # This event should have 3 fields\n self.assertEqual(len(event), 3)\n\n self.event_tufo = []\n\n return\n\n def test_etw_multi_providers_bitmask(self):\n \"\"\"\n Tests the etw capture class using multiple providers\n\n :return: None\n \"\"\"\n\n # Instantiate an ProviderInfo object\n provider = ProviderInfo('Microsoft-Windows-Kernel-Process',\n GUID(\"{22FB2CD6-0E7B-422B-A0C7-2FAD1FD0E716}\"),\n any_keywords=['WINEVENT_KEYWORD_PROCESS'],\n all_keywords=['WINEVENT_KEYWORD_THREAD'])\n\n assert(provider.any_bitmask == 0x0000000000000010)\n assert(provider.all_bitmask == 0x0000000000000020)\n\n # add provider\n provider = ProviderInfo('Microsoft-Windows-WinINet',\n GUID(\"{43D1A55C-76D6-4F7E-995C-64C711E5CAFE}\"),\n any_keywords=['WININET_KEYWORD_HANDLES'],\n all_keywords=['WININET_KEYWORD_HTTP'])\n\n assert(provider.any_bitmask == 0x0000000000000001)\n assert(provider.all_bitmask == 0x0000000000000002)\n\n return\n\n def test_etw_get_keywords_bitmask(self):\n \"\"\"\n Tests to ensure the correct bitmask is found for the provider (Windows Kernel Trace)\n\n :return: None\n \"\"\"\n\n assert(get_keywords_bitmask(\n GUID('{9E814AAD-3204-11D2-9A82-006008A86939}'),\n ['process']) == 0x0000000000000001)\n\n return\n\n def test_etw_nt_logger(self):\n \"\"\"\n Tests to ensure nt kernel logger capture works properly\n\n :return: None\n \"\"\"\n\n capture = ETW(session_name='NT Kernel Logger',\n providers=[ProviderInfo('Windows Kernel Trace',\n GUID(\"{9E814AAD-3204-11D2-9A82-006008A86939}\"),\n any_keywords=['process'])],\n event_callback=lambda event_tufo: self.event_tufo_list.append(event_tufo))\n capture.start()\n\n # start ping.exe\n args = ['ping.exe']\n p = sp.Popen(args, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n time.sleep(2)\n p.kill()\n capture.stop()\n\n event = self.find_event('PROCESS')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n # This event should have 10 fields\n self.assertEqual(len(event), 10)\n self.event_tufo = []\n return\n\n def test_etw_eq(self):\n \"\"\"\n Test container classes comparision\n\n :return: None\n \"\"\"\n\n params = et.ENABLE_TRACE_PARAMETERS()\n params.Version = 1\n other_params = et.ENABLE_TRACE_PARAMETERS()\n other_params.Version = 1\n\n provider = ProviderInfo('Microsoft-Windows-Kernel-Process',\n GUID(\"{22FB2CD6-0E7B-422B-A0C7-2FAD1FD0E716}\"),\n any_keywords=['WINEVENT_KEYWORD_PROCESS'],\n params=ct.pointer(params))\n\n other_provider = ProviderInfo('Microsoft-Windows-Kernel-Process',\n GUID(\"{22FB2CD6-0E7B-422B-A0C7-2FAD1FD0E716}\"),\n any_keywords=['WINEVENT_KEYWORD_PROCESS'],\n params=ct.pointer(other_params))\n self.assertEqual(provider, other_provider)\n other_params.Version = 2\n self.assertNotEqual(provider, other_provider)\n event_id_list = [54]\n event_filter = ep.EVENT_FILTER_EVENT_ID(common.TRUE, event_id_list).get()\n event_filters = [ep.EVENT_FILTER_DESCRIPTOR(ct.addressof(event_filter.contents),\n ct.sizeof(event_filter.contents) +\n ct.sizeof(wt.USHORT) * len(event_id_list),\n ep.EVENT_FILTER_TYPE_EVENT_ID)]\n properties = ProviderParameters(0, event_filters)\n other_properties = ProviderParameters(0, event_filters)\n self.assertEqual(properties, other_properties)\n\n other_properties.get().contents.Version = 1\n self.assertNotEqual(properties, other_properties)\n\n params = TraceProperties(1024, 1024, 0, 10)\n other_params = TraceProperties(1024, 1024, 0, 10)\n self.assertEqual(params, other_params)\n other_params.get().contents.BufferSize = 1025\n\n self.assertNotEqual(params, other_params)\n\n return\n\n def test_callback_flag_good(self):\n \"\"\"\n Test to check good flag value\n\n :return: None\n \"\"\"\n self.assertNotEqual(EventConsumer('test', None, None, None, common.RETURN_RAW_DATA_ONLY), None)\n self.assertNotEqual(EventConsumer('test', None, None, None, common.RETURN_RAW_DATA_ON_ERROR), None)\n self.assertNotEqual(EventConsumer('test', None, None, None, common.RETURN_ONLY_RAW_DATA_ON_ERROR), None)\n self.assertNotEqual(EventConsumer('test', None, None, None, common.RETURN_RAW_UNFORMATTED_DATA), None)\n\n def test_callback_flag_bad(self):\n \"\"\"\n Test to check bad flag value\n\n :return: None\n \"\"\"\n consumer = None\n try:\n consumer = EventConsumer('test', None, None, None, 1234)\n except:\n pass\n self.assertEqual(consumer, None)\n\n def test_etw_callback_wait(self):\n \"\"\"\n Tests the etw capture wait time\n\n :return: None\n \"\"\"\n\n # Instantiate an ETW object\n capture = ETW(providers=[ProviderInfo('Microsoft-Windows-Kernel-Process',\n GUID(\"{22FB2CD6-0E7B-422B-A0C7-2FAD1FD0E716}\"))],\n event_callback=lambda event_tufo: self.event_tufo_list.append(event_tufo),\n callback_wait_time=0.0025)\n capture.start()\n # start ping\n args = ['ping.exe']\n p = sp.Popen(args, stdout=sp.DEVNULL, stderr=sp.DEVNULL)\n time.sleep(5)\n p.kill()\n\n # Stop the ETW instance\n capture.stop()\n\n # check for process start\n event = self.find_event('PROCESSSTART')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n # This event should have 6 fields\n self.assertEqual(len(event), 6)\n self.event_tufo = []\n\n return\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "5458680", "language": "Python", "matching_score": 5.504612922668457, "max_stars_count": 247, "path": "tests/test_etw.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport unittest\nimport time\nimport ipaddress\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\nfrom .helpers import wininet as wi\nfrom examples.providers import inetetw\nfrom .helpers import httpstatus\n\n# Constants\nMAX_INT32 = 2**32 - 1\nMAX_INT16 = 2**16 - 1\nMAX_INT8 = 2**8 - 1\n\n\ndef validate_http_status(status_str):\n \"\"\"\n Enumerates the possible HTTP status codes and ensures that the supplied\n status is valid.\n \"\"\"\n if int(status_str, 10) in map(int, httpstatus.HTTPStatus):\n return True\n\n return False\n\n\ndef validate_verb(verb_str):\n \"\"\"\n Determines whether or not the supplied verb is a valid HTTP verb.\n \"\"\"\n valid_verbs = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE']\n\n if verb_str in valid_verbs:\n return True\n\n return False\n\n\ndef value_between(value, low, high):\n \"\"\"\n Determines whether the value is in the acceptable range. low and high are inclusive. This is\n just for convenience and consistency regarding inclusive or exclusive bounds.\n \"\"\"\n if low > value < high:\n return False\n\n return True\n\n\ndef validate_ip(ip_str):\n \"\"\"\n Validates the entire IP Address string by ensuring there are 4 octets and\n that each octet's\n value is between 0 and 255.\n \"\"\"\n try:\n ipaddress.ip_address(ip_str)\n except ValueError:\n return False\n\n return True\n\n\ndef validate_port(port_str):\n \"\"\"\n Ensures that we ahve a valid port. In other words, the port number is\n between 0 and 65535.\n This is just for convenience as it simply wraps value_between().\n \"\"\"\n return value_between(int(port_str, 10), 0, MAX_INT16)\n\n\nclass TestINETETW(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Prior to running each of our tests, we should start the ETW code, issue\n a request, and capture the response.\n\n :return: None\n \"\"\"\n\n # Instantiate our list where all of the results will be stored\n cls.event_tufo_list = []\n\n # The parameters for the HTTP request\n cls.user_agent = 'TestAgent'\n cls.url = 'www.gmail.com'\n cls.port = 80\n cls.verb = 'GET'\n cls.size = 1337\n cls.context_fields = {'Description', 'Task Name'}\n\n # Instantiate an INETETW object\n p = inetetw.INETETW(event_callback=lambda event_tufo: cls.event_tufo_list.append(event_tufo))\n p.start()\n\n # Make a WinINet request and save the actual response.\n cls.wininet_response = cls.makeRequest()\n\n # Ensure that we have a chance for all the events to come back\n time.sleep(5)\n\n # Stop capturing the WinINet provider and processing with the consumer.\n p.stop()\n\n @classmethod\n def makeRequest(cls):\n \"\"\"\n Issue a WININET request based on the class parameters.\n\n :return: None\n \"\"\"\n hInternet = wi.InternetOpenW(\n cls.user_agent,\n wi.INTERNET_OPEN_TYPE_DIRECT, None, None, 0)\n if hInternet is None:\n raise ct.WinError()\n\n hSession = wi.InternetConnectW(hInternet, cls.url, cls.port, None, None, wi.INTERNET_SERVICE_HTTP, 0, 0)\n if hSession is None:\n raise ct.WinError()\n\n hRequest = wi.HttpOpenRequestW(hSession, cls.verb, '', None, None, None, 0, 0)\n if hRequest is None:\n raise ct.WinError()\n\n request_sent = wi.HttpSendRequestW(hRequest, None, 0, None, 0)\n if request_sent == 0:\n raise ct.WinError()\n\n # Setup the necessary parameters to read the server's response\n buff_size = wt.DWORD(cls.size)\n buf = (ct.c_char * buff_size.value)()\n keep_reading = 1\n bytes_read = wt.DWORD(-1)\n response_str = str()\n\n while keep_reading == 1 and bytes_read.value != 0:\n # Read the entire response.\n keep_reading = wi.InternetReadFile(hRequest, buf, buff_size, ct.byref(bytes_read))\n response_str += str(buf.value)\n\n return response_str\n\n def find_event(self, name):\n \"\"\"\n Retrieves an event from the event_tufo_list with the user's specified name. While the event\n itself is a TuFo, we only return the dictionary portion since the name is only needed during the search.\n\n :param name: The name of the event we want to find.\n :return: An event matching the name specified or None if no events match.\n \"\"\"\n return next((tufo[1] for tufo in self.event_tufo_list if tufo[1]['Task Name'] == name), None)\n\n def find_all_events(self, name):\n \"\"\"\n Retrieves all events matching the user's specified name from the event_tufo list. While the events themselves\n are TuFos, we only return the dictionary portion since the name is only needed during the search.\n\n :param name: The name of the events we want to find\n :return: A list of all events matching the name. If no events are found, an empty list is returned.\n \"\"\"\n return [tufo[1] for tufo in self.event_tufo_list if tufo[1]['Task Name'] == name]\n\n def trim_fields(self, event):\n \"\"\"\n We add additional fields for contextual information. In order to accurately test that we are parsing\n the correct fields as reported by the event, we need to trim these off.\n\n :return: A copy of the event without the contextual fields\n \"\"\"\n return {key: event[key] for key in event.keys() if key not in self.context_fields}\n\n def test_wininet_usagelogrequest(self):\n \"\"\"\n Test a WinINet_UsageLogRequest event.\n\n :return: None\n \"\"\"\n event = self.find_event('WININET_USAGELOGREQUEST')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n event_length = len(event)\n keys = event.keys()\n\n self.assertTrue(value_between(event_length, 6, 24))\n\n # This target contains exactly 6 fields. Despite this, the RequestHeaders and\n # ResponseHeaders fields can contain an unknown amount of additional fields. So, if we have\n # over 6 fields, we ensure that they are both present.\n if event_length > 6:\n self.assertIn('RequestHeaders', keys)\n self.assertIn('ResponseHeaders', keys)\n\n # Assert that we captured a request with the verb that we sent.\n self.assertEqual(event['Verb'], self.verb)\n\n # Assert that we captured a request with user agent we supplied\n if 'User-Agent' in keys:\n self.assertEqual(event['User-Agent'], self.user_agent)\n\n return\n\n def test_wininet_readdata(self):\n \"\"\"\n Test a WinINet_ReadData event.\n\n :return: None\n \"\"\"\n event = self.find_event('WININET_READDATA')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n keys = event.keys()\n\n # This target should have 3 fields\n self.assertEqual(len(event), 3)\n\n # Ensure these are the 2 fields that should be there\n self.assertIn('Request', keys)\n self.assertIn('Size', keys)\n\n # Values like this appear to be a handle. They should be 32-bits.\n self.assertTrue(value_between(int(event['Request'], 16), 0, MAX_INT32))\n\n # In testing, 16384 (0x4000) appeared to be the max value for this target\n self.assertTrue(value_between(int(str(event['Size']), 10), 0, 16384))\n\n return\n\n def test_wininet_connect(self):\n \"\"\"\n Test a WinINet_Connect event.\n\n :return:\n \"\"\"\n event = self.find_event('WININET_CONNECT')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n event_length = len(event)\n keys = event.keys()\n\n # There appear to be 2 types of WININET_CONNECT targets with 2 and 8 fields respectively.\n self.assertTrue(value_between(event_length, 2, 8))\n\n if event_length == 2:\n self.assertIn('Request', keys)\n self.assertTrue(value_between(int(event['Request'], 16), 0, MAX_INT32))\n return\n\n # Ensure that the Socket field exists and is valid.\n self.assertIn('Socket', keys)\n self.assertTrue(validate_port(event['Socket']))\n\n # Ensure that the Protocol is exists. We don't know how many different protocols are\n # supported in this type of target.\n self.assertIn('Protocol', keys)\n\n # Validate that LocalAddressLength and LocalAddress both exists. They should not reflect one\n # another. THe length always appears to be 16 regardless of the address itself.\n self.assertIn('LocalAddressLength', keys)\n self.assertIn('LocalAddress', event)\n\n # Validate that Socket and the port number are the same. Validate the IP address.\n if int(event['LocalAddressLength'], 10) != 0:\n addr, port = event['LocalAddress'].split(':')\n self.assertEqual(port, event['Socket'])\n self.assertTrue(validate_ip(addr))\n\n # Validate that RemoteAddressLength and RemoteAddress both exist. They should not reflect\n # one another. The length always appears to be 16 regardless of the address itself.\n self.assertIn('RemoteAddressLength', keys)\n self.assertIn('RemoteAddress', keys)\n\n if int(event['RemoteAddressLength'], 10) != 0:\n addr, port = event['RemoteAddress'].split(':')\n self.assertTrue(validate_ip(addr))\n self.assertTrue(validate_port(port))\n\n return\n\n def test_wininet_dns_query(self):\n \"\"\"\n Test a WinINet_DNS_Query event.\n\n :return:\n \"\"\"\n\n event = self.find_event('WININET_DNS_QUERY')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n event_length = len(event)\n keys = event.keys()\n\n # There are 3 separate instances of WININET_DNS_QUERY targets with 3, 4, and 5 fields\n # respectively.\n self.assertTrue(value_between(event_length, 3, 5))\n\n # Every type of WININET_DNS_QUERY target has a _HostNameLength, and HostName field.\n # Ensure that they both exist and accurately reflects each other. RFC 1035 states that the\n # maximum length for a hostname is 255 bytes -- ensure that our values are valid.\n self.assertIn('_HostNameLength', keys)\n self.assertIn('HostName', keys)\n self.assertEqual(int(event['_HostNameLength'], 10), len(event['HostName']))\n self.assertTrue(value_between(int(event['_HostNameLength'], 10), 0, MAX_INT8))\n\n # Every type of WININET_DNS_QUERY has a RequestHandle field. We believe this should be at\n # most 4 bytes.\n self.assertIn('RequestHandle', keys)\n self.assertTrue(value_between(int(event['RequestHandle'], 16), 0, MAX_INT32))\n\n # The WININET_DNS_QUERY target that has 4 fields, exclusively has the Error field. We do\n # not know what the acceptable values are here.\n if event_length == 5:\n self.assertIn('Error', keys)\n\n # If the WININET_DNS_QUERY target has 6 fields, the _AddressListLength and AddressList\n # fields must be present. Ensure that they accurately reflect each other and that the\n # address list is a valid IP address.\n if event_length == 6:\n self.assertIn('_AddressListLength', keys)\n self.assertIn('AddressList', keys)\n self.assertEqual(int(event['_AddressListLength'], 10), len(event['AddressList']))\n\n for address in event['AddressList'].rstrip(';').split(';'):\n self.assertTrue(validate_ip(address))\n\n return\n\n def test_wininet_http_response(self):\n event = self.find_event('WININET_HTTP_RESPONSE')\n self.assertTrue(event)\n event = self.trim_fields(event)\n\n keys = event.keys()\n\n # There are precisely 8 fields in this structure\n self.assertEqual(len(event), 8)\n\n # Ensure that the ResponseCode field is present and is a valid code\n self.assertIn('ResponseCode', keys)\n self.assertTrue(validate_http_status(str(event['ResponseCode'])))\n\n # We believe that the RequestHandle field should be 4 bytes at most.\n self.assertIn('RequestHandle', keys)\n self.assertTrue(value_between(int(event['RequestHandle'], 16), 0, MAX_INT32))\n\n # We do not know what the acceptable values for the SocketHandle field\n self.assertIn('SocketHandle', keys)\n\n # Ensure that the Verb length and Verb are present, reflect one another, and are valid.\n self.assertIn('_VerbLength', keys)\n self.assertIn('Verb', keys)\n self.assertEqual(int(event['_VerbLength'], 0), len(event['Verb']))\n self.assertTrue(validate_verb(event['Verb']))\n\n # Ensure the _ContentLengthStrLength and ContentLength fields are present and that they\n # reflect one another. RFC-2616 does not place a maximum value on ContentLength -- it must\n # simply be >0.\n self.assertIn('_ContentLengthStrLength', keys)\n self.assertIn('ContentLength', keys)\n self.assertEqual(int(event['_ContentLengthStrLength'], 10), len(event['ContentLength']))\n self.assertGreaterEqual(int(event['ContentLength'], 10), 0)\n\n def test_wininet_capture(self):\n event_list = self.find_all_events('MICROSOFT-WINDOWS-WININET-CAPTURE')\n if not event_list:\n self.skipTest('This OS does not support WININET-CAPTURE')\n\n for event in event_list:\n event = self.trim_fields(event)\n keys = event.keys()\n\n # There are 4 fields in the WININET_CAPTURE target or 5 fields if PayloadByteLength is\n # not 0.\n self.assertTrue(value_between(len(event), 4, 5))\n\n # Ensure that the SessionId, SequenceNumber, and Flags fields are all present.\n self.assertIn('SessionId', keys)\n self.assertIn('SequenceNumber', keys)\n self.assertIn('Flags', keys)\n\n # Ensure that PayloadByteLength is present. If it is, ensure that Payload is present and\n # that PayloadByteLength is no more than the value specified globally here.\n self.assertIn('PayloadByteLength', keys)\n\n payload_byte_length = int(event['PayloadByteLength'], 10)\n self.assertLessEqual(payload_byte_length, self.size)\n\n if payload_byte_length > 0:\n self.assertIn('Payload', keys)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "5953319", "language": "Python", "matching_score": 4.159433841705322, "max_stars_count": 247, "path": "tests/test_inetetw.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\nfrom etw.common import ULONG_PTR\n\n\n# Types\nHINTERNET = wt.HANDLE\nDWORD_PTR = ULONG_PTR\nINTERNET_PORT = wt.USHORT\n\n# Definitions\nINTERNET_OPEN_TYPE_PRECONFIG = 0\nINTERNET_OPEN_TYPE_DIRECT = 1\nINTERNET_OPEN_TYPE_PROXY = 3\n\nINTERNET_SERVICE_FTP = 1\nINTERNET_SERVICE_GOPHER = 2\nINTERNET_SERVICE_HTTP = 3\n\nInternetOpenW = ct.windll.Wininet.InternetOpenW\nInternetOpenW.argtypes = [wt.LPCWSTR,\n wt.DWORD,\n wt.LPCWSTR,\n wt.LPCWSTR,\n wt.DWORD]\nInternetOpenW.restype = HINTERNET\n\n\nInternetConnectW = ct.windll.Wininet.InternetConnectW\nInternetConnectW.argtypes = [HINTERNET,\n wt.LPCWSTR,\n INTERNET_PORT,\n wt.LPCWSTR,\n wt.LPCWSTR,\n wt.DWORD,\n wt.DWORD,\n DWORD_PTR]\nInternetConnectW.restype = HINTERNET\n\nHttpOpenRequestW = ct.windll.Wininet.HttpOpenRequestW\nHttpOpenRequestW.argtypes = [HINTERNET,\n wt.LPCWSTR,\n wt.LPCWSTR,\n wt.LPCWSTR,\n wt.LPCWSTR,\n ct.POINTER(wt.LPCWSTR),\n wt.DWORD,\n DWORD_PTR]\nHttpOpenRequestW.restype = HINTERNET\n\nHttpSendRequestW = ct.windll.Wininet.HttpSendRequestW\nHttpSendRequestW.argtypes = [HINTERNET,\n wt.LPCWSTR,\n wt.DWORD,\n wt.LPVOID,\n wt.DWORD]\nHttpSendRequestW.restype = wt.BOOL\n\nInternetReadFile = ct.windll.Wininet.InternetReadFile\nInternetReadFile.argtypes = [HINTERNET,\n wt.LPVOID,\n wt.DWORD,\n wt.LPDWORD]\nInternetReadFile.restype = wt.BOOL\n", "id": "2323478", "language": "Python", "matching_score": 0.32872679829597473, "max_stars_count": 247, "path": "tests/helpers/wininet.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\nfrom etw.common import convert_bool_str\nfrom etw.GUID import GUID\nfrom etw import evntcons as ec\nfrom etw import evntprov as ep\n\nERROR_SUCCESS = 0x0\nERROR_INSUFFICIENT_BUFFER = 0x7A\nERROR_NOT_FOUND = 0x490\nERROR_EVT_INVALID_EVENT_DATA = 0x3A9D\nERROR_ALREADY_EXISTS = 0xB7\n\n# enum _TDH_IN_TYPE {\n# TDH_INTYPE_NULL,\n# TDH_INTYPE_UNICODESTRING,\n# TDH_INTYPE_ANSISTRING,\n# TDH_INTYPE_INT8,\n# TDH_INTYPE_UINT8,\n# TDH_INTYPE_INT16,\n# TDH_INTYPE_UINT16,\n# TDH_INTYPE_INT32,\n# TDH_INTYPE_UINT32,\n# TDH_INTYPE_INT64,\n# TDH_INTYPE_UINT64,\n# TDH_INTYPE_FLOAT,\n# TDH_INTYPE_DOUBLE,\n# TDH_INTYPE_BOOLEAN,\n# TDH_INTYPE_BINARY,\n# TDH_INTYPE_GUID,\n# TDH_INTYPE_POINTER,\n# TDH_INTYPE_FILETIME,\n# TDH_INTYPE_SYSTEMTIME,\n# TDH_INTYPE_SID,\n# TDH_INTYPE_HEXINT32,\n# TDH_INTYPE_HEXINT64, // End of winmeta intypes.\n# TDH_INTYPE_COUNTEDSTRING = 300, // Start of TDH intypes for WBEM.\n# TDH_INTYPE_COUNTEDANSISTRING,\n# TDH_INTYPE_REVERSEDCOUNTEDSTRING,\n# TDH_INTYPE_REVERSEDCOUNTEDANSISTRING,\n# TDH_INTYPE_NONNULLTERMINATEDSTRING,\n# TDH_INTYPE_NONNULLTERMINATEDANSISTRING,\n# TDH_INTYPE_UNICODECHAR,\n# TDH_INTYPE_ANSICHAR,\n# TDH_INTYPE_SIZET,\n# TDH_INTYPE_HEXDUMP,\n# TDH_INTYPE_WBEMSID\n# };\n\nTDH_INTYPE_NULL = 0\nTDH_INTYPE_UNICODESTRING = 1\nTDH_INTYPE_ANSISTRING = 2\nTDH_INTYPE_INT8 = 3\nTDH_INTYPE_UINT8 = 4\nTDH_INTYPE_INT16 = 5\nTDH_INTYPE_UINT16 = 6\nTDH_INTYPE_INT32 = 7\nTDH_INTYPE_UINT32 = 8\nTDH_INTYPE_INT64 = 9\nTDH_INTYPE_UINT64 = 10\nTDH_INTYPE_FLOAT = 11\nTDH_INTYPE_DOUBLE = 12\nTDH_INTYPE_BOOLEAN = 13\nTDH_INTYPE_BINARY = 14\nTDH_INTYPE_GUID = 15\nTDH_INTYPE_POINTER = 16\nTDH_INTYPE_FILETIME = 17\nTDH_INTYPE_SYSTEMTIME = 18\nTDH_INTYPE_SID = 19\nTDH_INTYPE_HEXINT32 = 20\nTDH_INTYPE_HEXINT64 = 21\nTDH_INTYPE_COUNTEDSTRING = 300\nTDH_INTYPE_COUNTEDANSISTRING = 301\nTDH_INTYPE_REVERSEDCOUNTEDSTRING = 302\nTDH_INTYPE_REVERSEDCOUNTEDANSISTRING = 303\nTDH_INTYPE_NONNULLTERMINATEDSTRING = 304\nTDH_INTYPE_NONNULLTERMINATEDANSISTRING = 305\nTDH_INTYPE_UNICODECHAR = 306\nTDH_INTYPE_ANSICHAR = 307\nTDH_INTYPE_SIZET = 308\nTDH_INTYPE_HEXDUMP = 309\nTDH_INTYPE_WBEMSID = 310\n\n# enum _TDH_OUT_TYPE {\n# TDH_OUTTYPE_NULL,\n# TDH_OUTTYPE_STRING,\n# TDH_OUTTYPE_DATETIME,\n# TDH_OUTTYPE_BYTE,\n# TDH_OUTTYPE_UNSIGNEDBYTE,\n# TDH_OUTTYPE_SHORT,\n# TDH_OUTTYPE_UNSIGNEDSHORT,\n# TDH_OUTTYPE_INT,\n# TDH_OUTTYPE_UNSIGNEDINT,\n# TDH_OUTTYPE_LONG,\n# TDH_OUTTYPE_UNSIGNEDLONG,\n# TDH_OUTTYPE_FLOAT,\n# TDH_OUTTYPE_DOUBLE,\n# TDH_OUTTYPE_BOOLEAN,\n# TDH_OUTTYPE_GUID,\n# TDH_OUTTYPE_HEXBINARY,\n# TDH_OUTTYPE_HEXINT8,\n# TDH_OUTTYPE_HEXINT16,\n# TDH_OUTTYPE_HEXINT32,\n# TDH_OUTTYPE_HEXINT64,\n# TDH_OUTTYPE_PID,\n# TDH_OUTTYPE_TID,\n# TDH_OUTTYPE_PORT,\n# TDH_OUTTYPE_IPV4,\n# TDH_OUTTYPE_IPV6,\n# TDH_OUTTYPE_SOCKETADDRESS,\n# TDH_OUTTYPE_CIMDATETIME,\n# TDH_OUTTYPE_ETWTIME,\n# TDH_OUTTYPE_XML,\n# TDH_OUTTYPE_ERRORCODE,\n# TDH_OUTTYPE_WIN32ERROR,\n# TDH_OUTTYPE_NTSTATUS,\n# TDH_OUTTYPE_HRESULT, // End of winmeta outtypes.\n# TDH_OUTTYPE_CULTURE_INSENSITIVE_DATETIME, //Culture neutral datetime string.\n# TDH_OUTTYPE_JSON,\n# TDH_OUTTYPE_REDUCEDSTRING = 300, // Start of TDH outtypes for WBEM.\n# TDH_OUTTYPE_NOPRINT\n# }\n\nTDH_OUTTYPE_NULL = 0\nTDH_OUTTYPE_STRING = 1\nTDH_OUTTYPE_DATETIME = 2\nTDH_OUTTYPE_BYTE = 3\nTDH_OUTTYPE_UNSIGNEDBYTE = 4\nTDH_OUTTYPE_SHORT = 5\nTDH_OUTTYPE_UNSIGNEDSHORT = 6\nTDH_OUTTYPE_INT = 7\nTDH_OUTTYPE_UNSIGNEDINT = 8\nTDH_OUTTYPE_LONG = 9\nTDH_OUTTYPE_UNSIGNEDLONG = 10\nTDH_OUTTYPE_FLOAT = 11\nTDH_OUTTYPE_DOUBLE = 12\nTDH_OUTTYPE_BOOLEAN = 13\nTDH_OUTTYPE_GUID = 14\nTDH_OUTTYPE_HEXBINARY = 15\nTDH_OUTTYPE_HEXINT8 = 16\nTDH_OUTTYPE_HEXINT16 = 17\nTDH_OUTTYPE_HEXINT32 = 18\nTDH_OUTTYPE_HEXINT64 = 19\nTDH_OUTTYPE_PID = 20\nTDH_OUTTYPE_TID = 21\nTDH_OUTTYPE_PORT = 22\nTDH_OUTTYPE_IPV4 = 23\nTDH_OUTTYPE_IPV6 = 24\nTDH_OUTTYPE_SOCKETADDRESS = 25\nTDH_OUTTYPE_CIMDATETIME = 26\nTDH_OUTTYPE_ETWTIME = 27\nTDH_OUTTYPE_XML = 28\nTDH_OUTTYPE_ERRORCODE = 29\nTDH_OUTTYPE_WIN32ERROR = 30\nTDH_OUTTYPE_NTSTATUS = 31\nTDH_OUTTYPE_HRESULT = 32\nTDH_OUTTYPE_CULTURE_INSENSITIVE_DATETIME = 33\nTDH_OUTTYPE_JSON = 34\nTDH_OUTTYPE_REDUCEDSTRING = 300\nTDH_OUTTYPE_NOPRIN = 301\n\nTDH_CONVERTER_LOOKUP = {\n TDH_OUTTYPE_INT: int,\n TDH_OUTTYPE_UNSIGNEDINT: int,\n TDH_OUTTYPE_LONG: int,\n TDH_OUTTYPE_UNSIGNEDLONG: int,\n TDH_OUTTYPE_FLOAT: float,\n TDH_OUTTYPE_DOUBLE: float,\n TDH_OUTTYPE_BOOLEAN: convert_bool_str\n}\n\n\nclass PROPERTY_DATA_DESCRIPTOR(ct.Structure):\n _fields_ = [('PropertyName', ct.c_ulonglong),\n ('ArrayIndex', ct.c_ulong),\n ('Reserved', ct.c_ulong)]\n\n\nPropertyStruct = 0x1\nPropertyParamLength = 0x2\nPropertyParamCount = 0x4\nPropertyWBEMXmlFragment = 0x8\nPropertyParamFixedLength = 0x10\nPropertyParamFixedCount = 0x20\nPropertyHasTags = 0x40\nPropertyHasCustomSchema = 0x80\nPROPERTY_FLAGS = ct.c_uint\n\n# typedef enum _TDH_CONTEXT_TYPE {\n# TDH_CONTEXT_WPP_TMFFILE = 0,\n# TDH_CONTEXT_WPP_TMFSEARCHPATH = 1,\n# TDH_CONTEXT_WPP_GMT = 2,\n# TDH_CONTEXT_POINTERSIZE = 3,\n# TDH_CONTEXT_PDB_PATH = 4,\n# TDH_CONTEXT_MAXIMUM = 5\n# } TDH_CONTEXT_TYPE;\nTDH_CONTEXT_TYPE = ct.c_uint\n\n\nclass TDH_CONTEXT(ct.Structure):\n _fields_ = [('ParameterValue', ct.c_ulonglong),\n ('ParameterType', TDH_CONTEXT_TYPE),\n ('ParameterSize', ct.c_ulong)]\n\n\n# typedef enum _DECODING_SOURCE {\n# DecodingSourceXMLFile = 0,\n# DecodingSourceWbem = 1,\n# DecodingSourceWPP = 2,\n# DecodingSourceTlg = 3\n# } DECODING_SOURCE;\nDECODING_SOURCE = ct.c_uint\n\n# typedef struct _EVENT_PROPERTY_INFO {\n# PROPERTY_FLAGS Flags;\n# ULONG NameOffset;\n# union {\n# struct _nonStructType {\n# USHORT InType;\n# USHORT OutType;\n# ULONG MapNameOffset;\n# } nonStructType;\n# struct _structType {\n# USHORT StructStartIndex;\n# USHORT NumOfStructMembers;\n# ULONG padding;\n# } structType;\n# };\n# union {\n# USHORT count;\n# USHORT countPropertyIndex;\n# };\n# union {\n# USHORT length;\n# USHORT lengthPropertyIndex;\n# };\n# union {\n# ULONG Reserved;\n# struct {\n# ULONG Tags : 28;\n# };\n# };\n# } EVENT_PROPERTY_INFO;\n\n\nclass nonStructType(ct.Structure):\n _fields_ = [('InType', ct.c_ushort),\n ('OutType', ct.c_ushort),\n ('MapNameOffset', ct.c_ulong)]\n\n\nclass structType(ct.Structure):\n _fields_ = [('StructStartIndex', wt.USHORT),\n ('NumOfStructMembers', wt.USHORT),\n ('padding', wt.ULONG)]\n\n\nclass epi_u1(ct.Union):\n _fields_ = [('nonStructType', nonStructType),\n ('structType', structType)]\n\n\nclass epi_u2(ct.Union):\n _fields_ = [('count', wt.USHORT),\n ('countPropertyIndex', wt.USHORT)]\n\n\nclass epi_u3(ct.Union):\n _fields_ = [('length', wt.USHORT),\n ('lengthPropertyIndex', wt.USHORT)]\n\n\nclass epi_u4(ct.Union):\n _fields_ = [('Reserved', wt.ULONG),\n ('Tags', wt.ULONG)]\n\n\nclass EVENT_PROPERTY_INFO(ct.Structure):\n _fields_ = [('Flags', PROPERTY_FLAGS),\n ('NameOffset', ct.c_ulong),\n ('epi_u1', epi_u1),\n ('epi_u2', epi_u2),\n ('epi_u3', epi_u3),\n ('epi_u4', epi_u4)]\n\n\nclass TRACE_EVENT_INFO(ct.Structure):\n _fields_ = [('ProviderGuid', GUID),\n ('EventGuid', GUID),\n ('EventDescriptor', ep.EVENT_DESCRIPTOR),\n ('DecodingSource', DECODING_SOURCE),\n ('ProviderNameOffset', ct.c_ulong),\n ('LevelNameOffset', ct.c_ulong),\n ('ChannelNameOffset', ct.c_ulong),\n ('KeywordsNameOffset', ct.c_ulong),\n ('TaskNameOffset', ct.c_ulong),\n ('OpcodeNameOffset', ct.c_ulong),\n ('EventMessageOffset', ct.c_ulong),\n ('ProviderMessageOffset', ct.c_ulong),\n ('BinaryXMLOffset', ct.c_ulong),\n ('BinaryXMLSize', ct.c_ulong),\n ('ActivityIDNameOffset', ct.c_ulong),\n ('RelatedActivityIDNameOffset', ct.c_ulong),\n ('PropertyCount', ct.c_ulong),\n ('TopLevelPropertyCount', ct.c_ulong),\n ('Flags', ct.c_ulong),\n ('EventPropertyInfoArray', EVENT_PROPERTY_INFO * 0)]\n\n\n# typedef enum {\n# EVENTMAP_INFO_FLAG_MANIFEST_VALUEMAP = 1,\n# EVENTMAP_INFO_FLAG_MANIFEST_BITMAP = 2,\n# EVENTMAP_INFO_FLAG_MANIFEST_PATTERNMAP = 4,\n# EVENTMAP_INFO_FLAG_WBEM_VALUEMAP = 8,\n# EVENTMAP_INFO_FLAG_WBEM_BITMAP = 16,\n# EVENTMAP_INFO_FLAG_WBEM_FLAG = 32,\n# EVENTMAP_INFO_FLAG_WBEM_NO_MAP = 64\n# } MAP_FLAGS;\nMAP_FLAGS = ct.c_uint\n\n\nclass EVENT_MAP_ENTRY(ct.Structure):\n _fields_ = [('OutputOffset', ct.c_ulong),\n ('InputOffset', ct.c_ulong)]\n\n\nclass EVENT_MAP_INFO(ct.Structure):\n _fields_ = [('NameOffset', ct.c_ulong),\n ('Flag', MAP_FLAGS),\n ('EntryCount', ct.c_ulong),\n ('FormatStringOffset', ct.c_ulong),\n ('MapEntryArray', EVENT_MAP_ENTRY * 0)]\n\n\nTdhGetEventInformation = ct.windll.Tdh.TdhGetEventInformation\nTdhGetEventInformation.argtypes = [ct.POINTER(ec.EVENT_RECORD),\n ct.c_ulong,\n ct.POINTER(TDH_CONTEXT),\n ct.POINTER(TRACE_EVENT_INFO),\n ct.POINTER(ct.c_ulong)]\nTdhGetEventInformation.restype = ct.c_ulong\n\nTdhGetPropertySize = ct.windll.Tdh.TdhGetPropertySize\nTdhGetPropertySize.argtypes = [ct.POINTER(ec.EVENT_RECORD),\n ct.c_ulong,\n ct.POINTER(TDH_CONTEXT),\n ct.c_ulong,\n ct.POINTER(PROPERTY_DATA_DESCRIPTOR),\n ct.POINTER(ct.c_ulong)]\nTdhGetPropertySize.restype = ct.c_ulong\n\nTdhGetProperty = ct.windll.Tdh.TdhGetProperty\nTdhGetProperty.argtypes = [ct.POINTER(ec.EVENT_RECORD),\n ct.c_ulong,\n ct.POINTER(TDH_CONTEXT),\n ct.c_ulong,\n ct.POINTER(PROPERTY_DATA_DESCRIPTOR),\n ct.c_ulong,\n ct.POINTER(ct.c_byte)]\nTdhGetProperty.restype = ct.c_ulong\n\nTdhGetEventMapInformation = ct.windll.Tdh.TdhGetEventMapInformation\nTdhGetEventMapInformation.argtypes = [ct.POINTER(ec.EVENT_RECORD),\n wt.LPWSTR,\n ct.POINTER(EVENT_MAP_INFO),\n ct.POINTER(ct.c_ulong)]\nTdhGetEventMapInformation.restype = ct.c_ulong\n\nTdhFormatProperty = ct.windll.Tdh.TdhFormatProperty\nTdhFormatProperty.argtypes = [ct.POINTER(TRACE_EVENT_INFO),\n ct.POINTER(EVENT_MAP_INFO),\n ct.c_ulong,\n ct.c_ushort,\n ct.c_ushort,\n ct.c_ushort,\n ct.c_ushort,\n ct.POINTER(ct.c_byte),\n ct.POINTER(ct.c_ulong),\n ct.c_wchar_p,\n ct.POINTER(ct.c_ushort)]\nTdhFormatProperty.restype = ct.c_ulong\n\n# typedef enum _EVENT_FIELD_TYPE {\n# EventKeywordInformation = 0,\n# EventLevelInformation = 1,\n# EventChannelInformation = 2,\n# EventTaskInformation = 3,\n# EventOpcodeInformation = 4,\n# EventInformationMax = 5\n# } EVENT_FIELD_TYPE;\n\nEventKeywordInformation = 0\nEventLevelInformation = 1\nEventChannelInformation = 2\nEventTaskInformation = 3\nEventOpcodeInformation = 4\nEventInformationMax = 5\n\nEVENT_FIELD_TYPE = ct.c_uint\n\n# typedef struct _PROVIDER_FIELD_INFO {\n# ULONG NameOffset;\n# ULONG DescriptionOffset;\n# ULONGLONG Value;\n# } PROVIDER_FIELD_INFO;\n\n\nclass PROVIDER_FIELD_INFO(ct.Structure):\n _fields_ = [('NameOffset', wt.ULONG),\n ('DescriptionOffset', wt.ULONG),\n ('Value', ct.c_ulonglong)]\n\n\n# typedef struct _PROVIDER_FIELD_INFOARRAY {\n# ULONG NumberOfElements;\n# EVENT_FIELD_TYPE FieldType;\n# PROVIDER_FIELD_INFO FieldInfoArray[ANYSIZE_ARRAY];\n# } PROVIDER_FIELD_INFOARRAY;\n\n\nclass PROVIDER_FIELD_INFOARRAY(ct.Structure):\n _fields_ = [('NumberOfElements', wt.LONG),\n ('FieldType', EVENT_FIELD_TYPE),\n ('FieldInfoArray', PROVIDER_FIELD_INFO * 0)]\n\n\n# ULONG __stdcall TdhEnumerateProviderFieldInformation(\n# _In_ LPGUID pGuid,\n# _In_ EVENT_FIELD_TYPE EventFieldType,\n# _Out_opt_ PPROVIDER_FIELD_INFOARRAY pBuffer,\n# _Inout_ ULONG *pBufferSize\n# );\n\nTdhEnumerateProviderFieldInformation = ct.windll.Tdh.TdhEnumerateProviderFieldInformation\nTdhEnumerateProviderFieldInformation.argtypes = [ct.POINTER(GUID),\n EVENT_FIELD_TYPE,\n ct.POINTER(PROVIDER_FIELD_INFOARRAY),\n ct.POINTER(wt.ULONG)]\nTdhEnumerateProviderFieldInformation.restype = ct.c_ulong\n", "id": "8142505", "language": "Python", "matching_score": 5.632635593414307, "max_stars_count": 247, "path": "etw/tdh.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\n# Public packages\nimport threading\nimport logging\nimport uuid\nimport time\nimport traceback\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\n# Custom packages\nfrom etw import evntrace as et\nfrom etw import evntprov as ep\nfrom etw import in6addr as ia\nfrom etw import evntcons as ec\nfrom etw import wmistr as ws\nfrom etw import tdh as tdh\nfrom etw.common import rel_ptr_to_str, MAX_UINT, ETWException, RETURN_RAW_DATA_ONLY, RETURN_RAW_DATA_ON_ERROR, \\\n RETURN_ONLY_RAW_DATA_ON_ERROR, RETURN_RAW_UNFORMATTED_DATA\n\nlogger = logging.getLogger(__name__)\n\n\nclass TraceProperties:\n \"\"\"\n The TraceProperties class represents the EVENT_TRACE_PROPERTIES structure. The class wraps\n this structure to make it easier to interact with.\n \"\"\"\n\n def __init__(self, ring_buf_size=1024, max_str_len=1024, min_buffers=0, max_buffers=0, props=None):\n \"\"\"\n Initializes an EVENT_TRACE_PROPERTIES structure.\n\n :param ring_buf_size: The size of the ring buffer used for capturing events.\n :param max_str_len: The maximum length of the strings the proceed the structure.\n Unless you know what you are doing, do not modify this value.\n :param min_buffers: The minimum number of buffers for an event tracing session.\n Unless you know what you are doing, do not modify this value.\n :param max_buffers: The maximum number of buffers for an event tracing session.\n Unless you know what you are doing, do not modify this value.\n :param props: pointer to a EVENT_TRACE_PROPERTIES structure to use for the capture session.\n Use this when you wish to set all trace properties. If this is used then ring_buf_size,\n min_buffers, and max_buffers are ignored.\n \"\"\"\n # In this structure, the LoggerNameOffset and other string fields reside immediately\n # after the EVENT_TRACE_PROPERTIES structure. So allocate enough space for the\n # structure and any strings we are using.\n buf_size = ct.sizeof(et.EVENT_TRACE_PROPERTIES) + 2 * ct.sizeof(ct.c_wchar) * max_str_len\n\n # noinspection PyCallingNonCallable\n self._buf = (ct.c_char * buf_size)()\n self._props = ct.cast(ct.pointer(self._buf), ct.POINTER(et.EVENT_TRACE_PROPERTIES))\n\n if props:\n ct.memmove(self._props, props, ct.sizeof(et.EVENT_TRACE_PROPERTIES))\n else:\n self._props.contents.BufferSize = ring_buf_size\n\n if min_buffers != 0:\n self._props.contents.MinimumBuffers = min_buffers\n\n if max_buffers != 0:\n self._props.contents.MaximumBuffers = max_buffers\n\n self._props.contents.Wnode.Flags = ws.WNODE_FLAG_TRACED_GUID\n self._props.contents.LogFileMode = et.EVENT_TRACE_REAL_TIME_MODE\n\n self._props.contents.Wnode.BufferSize = buf_size\n self._props.contents.LoggerNameOffset = ct.sizeof(et.EVENT_TRACE_PROPERTIES)\n\n def __eq__(self, other):\n for field in self.get().contents._fields_:\n attr_name = field[0]\n a, b = getattr(self.get().contents, attr_name), getattr(other.get().contents, attr_name)\n is_wnode = isinstance(a, ws.WNODE_HEADER)\n if is_wnode is True:\n for wnode_field in a._fields_:\n wnode_attr_name = wnode_field[0]\n a_wnode, b_wnode = getattr(a, wnode_attr_name), getattr(b, wnode_attr_name)\n if a_wnode != b_wnode:\n return False\n else:\n if a != b:\n return False\n return True\n\n def get(self):\n \"\"\"\n This class wraps the construction of a struct for ctypes. As a result, in order to properly use it as a ctypes\n structure, you must use the private field _props. To maintain proper encapsulation, this getter is used to\n retrieve this value when needed.\n\n :return: The _props field needed for using this class as a ctypes EVENT_TRACE_PROPERTIES structure.\n \"\"\"\n return self._props\n\n\nclass EventProvider:\n \"\"\"\n Wraps all interactions with Event Tracing for Windows (ETW) event providers. This includes\n starting and stopping them.\n\n N.B. If using this class, do not call start() and stop() directly. Only use through via ctxmgr\n \"\"\"\n\n def __init__(\n self,\n session_name,\n session_properties,\n providers):\n \"\"\"\n Sets the appropriate values for an ETW provider.\n\n :param session_name: The name of the provider session.\n :param session_properties: A TraceProperties instance used to specify the parameters for the provider\n :param providers: A list of ProviderInfo instances to use in the capture. Do not reuse providers.\n \"\"\"\n\n # check if the session name is \"NT Kernel Logger\"\n self.kernel_trace = False\n self.kernel_trace_was_running = False\n if session_name.lower() == et.KERNEL_LOGGER_NAME_LOWER:\n self.session_name = et.KERNEL_LOGGER_NAME\n self.kernel_trace = True\n else:\n self.session_name = session_name\n\n self.providers = providers\n self.session_properties = session_properties\n self.session_handle = et.TRACEHANDLE()\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc, ex, tb):\n self.stop()\n\n def start(self):\n \"\"\"\n Wraps the necessary processes needed for starting an ETW provider session.\n\n :return: Does not return anything.\n \"\"\"\n self.kernel_trace_was_running = False\n if self.kernel_trace is True:\n provider = self.providers[0] # there should only be one provider\n self.session_properties.get().contents.Wnode.Guid = provider.guid\n self.session_properties.get().contents.LogFileMode |= et.EVENT_TRACE_SYSTEM_LOGGER_MODE\n\n if provider.any_bitmask:\n self.session_properties.get().contents.EnableFlags = provider.any_bitmask\n else:\n self.session_properties.get().contents.EnableFlags = et.DEFAULT_NT_KERNEL_LOGGER_FLAGS\n\n status = et.StartTraceW(ct.byref(self.session_handle), self.session_name, self.session_properties.get())\n if status != tdh.ERROR_SUCCESS:\n if self.kernel_trace is True and status == tdh.ERROR_ALREADY_EXISTS:\n self.kernel_trace_was_running = True\n raise ct.WinError(status)\n\n if self.kernel_trace is False:\n for provider in self.providers:\n\n if provider.params:\n provider.params.contents.SourceId = self.session_properties.get().contents.Wnode.Guid\n\n status = et.EnableTraceEx2(self.session_handle,\n ct.byref(provider.guid),\n et.EVENT_CONTROL_CODE_ENABLE_PROVIDER,\n provider.level,\n provider.any_bitmask,\n provider.all_bitmask,\n 0,\n provider.params)\n if status != tdh.ERROR_SUCCESS:\n raise ct.WinError(status)\n\n def stop(self):\n \"\"\"\n Wraps the necessary processes needed for stopping an ETW provider session.\n\n :return: Does not return anything\n \"\"\"\n # don't stop if we don't have a handle, or it's the kernel trace and we started it ourself\n if (\n (self.session_handle.value == 0 and self.kernel_trace is False)\n or (self.kernel_trace is True and self.kernel_trace_was_running is True)\n ):\n return\n\n if self.kernel_trace is False:\n for provider in self.providers:\n\n status = et.EnableTraceEx2(self.session_handle,\n ct.byref(provider.guid),\n et.EVENT_CONTROL_CODE_DISABLE_PROVIDER,\n provider.level,\n provider.any_bitmask,\n provider.all_bitmask,\n 0,\n None)\n if status != tdh.ERROR_SUCCESS:\n raise ct.WinError(status)\n\n status = et.ControlTraceW(self.session_handle,\n self.session_name,\n self.session_properties.get(),\n et.EVENT_TRACE_CONTROL_STOP)\n if status != tdh.ERROR_SUCCESS:\n raise ct.WinError(status)\n\n et.CloseTrace(self.session_handle)\n\n\nclass EventConsumer:\n \"\"\"\n Wraps all interactions with Event Tracing for Windows (ETW) event consumers. This includes\n starting and stopping the consumer. Additionally, each consumer begins processing events in\n a separate thread and uses a callback to process any events it receives in this thread -- those\n methods are implemented here as well.\n\n N.B. If using this class, do not call start() and stop() directly. Only use through via ctxmgr\n \"\"\"\n\n def __init__(self,\n logger_name,\n event_callback=None,\n task_name_filters=None,\n event_id_filters=None,\n providers_event_id_filters=None,\n pid_whitelist=None,\n pid_blacklist=None,\n callback_data_flag=0,\n callback_wait_time=0.0,\n trace_logfile=None):\n \"\"\"\n Initializes a real time event consumer object.\n\n :param logger_name: The name of the session that we want to consume events from.\n :param event_callback: The optional callback function which can be used to return the values.\n :param task_name_filters: List of filters to apply to the ETW capture\n :param event_id_filters: List of event ids to filter on.\n :param providers_event_id_filters: Dict of provider/ list of ids to filter on.\n :param pid_whitelist: List of PID for which we want to receive events (only events for those PIDs will be processed).\n :param pid_blacklist: List of PID for which we don't want to receive events (events for all PIDs except those will be processed).\n :param callback_data_flag: Determines how to format data passed into callback.\n :param callback_wait_time: Time callback will sleep when called. If used, this may cause events to be dropped.\n :param trace_logfile: EVENT_TRACE_LOGFILE structure.\n \"\"\"\n self.trace_handle = None\n self.process_thread = None\n self.logger_name = logger_name\n self.end_capture = threading.Event()\n self.event_callback = event_callback\n self.vfield_length = None\n self.index = 0\n self.task_name_filters = task_name_filters if task_name_filters else []\n self.event_id_filters = event_id_filters if event_id_filters else []\n self.providers_event_id_filters = providers_event_id_filters if providers_event_id_filters else {}\n self.callback_data_flag = callback_data_flag if not callback_data_flag else self.check_callback_flag(callback_data_flag) # NOQA\n self.callback_wait_time = callback_wait_time\n\n self.pid_whitelist = set(pid_whitelist) if pid_whitelist else set()\n self.pid_blacklist = set(pid_blacklist) if pid_blacklist else set()\n\n # check if the logger name is \"NT Kernel Logger\"\n self.kernel_trace = False\n if logger_name.lower() == et.KERNEL_LOGGER_NAME_LOWER:\n self.kernel_trace = True\n\n if not trace_logfile:\n # Construct the EVENT_TRACE_LOGFILE structure\n self.trace_logfile = et.EVENT_TRACE_LOGFILE()\n self.trace_logfile.ProcessTraceMode = (ec.PROCESS_TRACE_MODE_REAL_TIME | ec.PROCESS_TRACE_MODE_EVENT_RECORD)\n self.trace_logfile.LoggerName = logger_name\n else:\n self.trace_logfile = trace_logfile\n\n if not self.trace_logfile.EventRecordCallback and \\\n self.trace_logfile.ProcessTraceMode & (ec.PROCESS_TRACE_MODE_REAL_TIME | ec.PROCESS_TRACE_MODE_EVENT_RECORD):\n self.trace_logfile.EventRecordCallback = et.EVENT_RECORD_CALLBACK(self._processEvent)\n\n def add_pid_whitelist(self, pid):\n self.pid_whitelist.add(pid)\n\n def remove_pid_whitelist(self, pid):\n self.pid_whitelist.discard(pid)\n\n def reset_whitelist(self):\n self.pid_whitelist = set()\n\n def add_pid_blacklist(self, pid):\n self.pid_blacklist.add(pid)\n\n def remove_pid_blacklist(self, pid):\n self.pid_blacklist.discard(pid)\n\n def reset_blacklist(self):\n self.pid_blacklist = set()\n\n def __enter__(self):\n self.start()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts a trace consumer.\n\n :return: Returns True on Success or False on Failure\n \"\"\"\n self.trace_handle = et.OpenTraceW(ct.byref(self.trace_logfile))\n if self.trace_handle == et.INVALID_PROCESSTRACE_HANDLE:\n raise ct.WinError()\n\n # For whatever reason, the restype is ignored\n self.trace_handle = et.TRACEHANDLE(self.trace_handle)\n self.process_thread = threading.Thread(target=self._run, args=(self.trace_handle, self.end_capture))\n self.process_thread.daemon = True\n self.process_thread.start()\n\n def stop(self):\n \"\"\"\n Stops a trace consumer.\n\n :return: Returns True on Success or False on Failure\n \"\"\"\n # Signal to the thread that we are reading to stop processing events.\n self.end_capture.set()\n\n # Call CloseTrace to cause ProcessTrace to return (unblock)\n et.CloseTrace(self.trace_handle)\n\n # If ProcessThread is actively parsing an event, we want to give it a chance to finish\n # before pulling the rug out from underneath it.\n self.process_thread.join()\n\n @staticmethod\n def check_callback_flag(flag):\n \"\"\"\n Checks callback flags.\n\n :return: Returns flags on success, on failure raises exception\n \"\"\"\n flags = [RETURN_RAW_DATA_ONLY,\n RETURN_RAW_DATA_ON_ERROR,\n RETURN_ONLY_RAW_DATA_ON_ERROR,\n RETURN_RAW_UNFORMATTED_DATA]\n if flag not in flags:\n raise Exception('Callback flag value {:d} passed into EventConsumer is invalid'.format(flag))\n return flag\n\n @staticmethod\n def _run(trace_handle, end_capture):\n \"\"\"\n Because ProcessTrace() blocks, this function is used to spin off new threads.\n\n :param trace_handle: The handle for the trace consumer that we want to begin processing.\n :param end_capture: A callback function which determines what should be done with the results.\n :return: Does not return a value.\n \"\"\"\n while True:\n if tdh.ERROR_SUCCESS != et.ProcessTrace(ct.byref(trace_handle), 1, None, None):\n end_capture.set()\n\n if end_capture.isSet():\n break\n\n @staticmethod\n def _getEventInformation(record):\n \"\"\"\n Initially we are handed an EVENT_RECORD structure. While this structure technically contains\n all of the information necessary, TdhGetEventInformation parses the structure and simplifies it\n so we can more effectively parse and handle the various fields.\n\n :param record: The EventRecord structure for the event we are parsing\n :return: Returns a pointer to a TRACE_EVENT_INFO structure or None on error.\n \"\"\"\n info = ct.POINTER(tdh.TRACE_EVENT_INFO)()\n buffer_size = wt.DWORD()\n\n # Call TdhGetEventInformation once to get the required buffer size and again to actually populate the structure.\n status = tdh.TdhGetEventInformation(record, 0, None, None, ct.byref(buffer_size))\n if tdh.ERROR_INSUFFICIENT_BUFFER == status:\n info = ct.cast((ct.c_byte * buffer_size.value)(), ct.POINTER(tdh.TRACE_EVENT_INFO))\n status = tdh.TdhGetEventInformation(record, 0, None, info, ct.byref(buffer_size))\n\n if tdh.ERROR_SUCCESS != status:\n raise ct.WinError(status)\n\n return info\n\n @staticmethod\n def _getArraySize(record, info, event_property):\n \"\"\"\n Some of the properties encountered when parsing represent an array of values. This function\n will retrieve the size of the array.\n\n :param record: The EventRecord structure for the event we are parsing\n :param info: The TraceEventInfo structure for the event we are parsing\n :param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing\n :return: Returns a DWORD representing the size of the array or None on error.\n \"\"\"\n event_property_array = ct.cast(info.contents.EventPropertyInfoArray, ct.POINTER(tdh.EVENT_PROPERTY_INFO))\n flags = event_property.Flags\n\n if flags & tdh.PropertyParamCount:\n data_descriptor = tdh.PROPERTY_DATA_DESCRIPTOR()\n j = event_property.epi_u2.countPropertyIndex\n property_size = wt.DWORD()\n count = wt.DWORD()\n\n data_descriptor.PropertyName = info + event_property_array[j].NameOffset\n data_descriptor.ArrayIndex = MAX_UINT\n\n status = tdh.TdhGetPropertySize(record, 0, None, 1, ct.byref(data_descriptor), ct.byref(property_size))\n if tdh.ERROR_SUCCESS != status:\n raise ct.WinError(status)\n\n status = tdh.TdhGetProperty(record, 0, None, 1, ct.byref(data_descriptor), property_size, ct.byref(count))\n if tdh.ERROR_SUCCESS != status:\n raise ct.WinError(status)\n return count\n\n if flags & tdh.PropertyParamFixedCount:\n raise ETWException('PropertyParamFixedCount not supported')\n\n return event_property.epi_u2.count\n\n @staticmethod\n def _getPropertyLength(record, info, event_property):\n \"\"\"\n Each property encountered when parsing the top level property has an associated length. If the\n length is available, retrieve it here. In some cases, the length is 0. This can signify that\n we are dealing with a variable length field such as a structure, an IPV6 data, or a string.\n\n :param record: The EventRecord structure for the event we are parsing\n :param info: The TraceEventInfo structure for the event we are parsing\n :param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing\n :return: Returns the length of the property as a c_ulong() or None on error\n \"\"\"\n flags = event_property.Flags\n\n if flags & tdh.PropertyParamLength:\n data_descriptor = tdh.PROPERTY_DATA_DESCRIPTOR()\n event_property_array = ct.cast(info.contents.EventPropertyInfoArray, ct.POINTER(tdh.EVENT_PROPERTY_INFO))\n j = wt.DWORD(event_property.epi_u3.length)\n property_size = ct.c_ulong()\n length = wt.DWORD()\n\n # Setup the PROPERTY_DATA_DESCRIPTOR structure\n data_descriptor.PropertyName = (ct.cast(info, ct.c_voidp).value + event_property_array[j.value].NameOffset)\n data_descriptor.ArrayIndex = MAX_UINT\n\n status = tdh.TdhGetPropertySize(record, 0, None, 1, ct.byref(data_descriptor), ct.byref(property_size))\n if tdh.ERROR_SUCCESS != status:\n raise ct.WinError(status)\n\n status = tdh.TdhGetProperty(record,\n 0,\n None,\n 1,\n ct.byref(data_descriptor),\n property_size,\n ct.cast(ct.byref(length), ct.POINTER(ct.c_byte)))\n if tdh.ERROR_SUCCESS != status:\n raise ct.WinError(status)\n return length.value\n\n in_type = event_property.epi_u1.nonStructType.InType\n out_type = event_property.epi_u1.nonStructType.OutType\n\n # This is a special case in which the input and output types dictate the size\n if (in_type == tdh.TDH_INTYPE_BINARY) and (out_type == tdh.TDH_OUTTYPE_IPV6):\n return ct.sizeof(ia.IN6_ADDR)\n\n return event_property.epi_u3.length\n\n @staticmethod\n def _getMapInfo(record, info, event_property):\n \"\"\"\n When parsing a field in the event property structure, there may be a mapping between a given\n name and the structure it represents. If it exists, we retrieve that mapping here.\n\n Because this may legitimately return a NULL value we return a tuple containing the success or\n failure status as well as either None (NULL) or an EVENT_MAP_INFO pointer.\n\n :param record: The EventRecord structure for the event we are parsing\n :param info: The TraceEventInfo structure for the event we are parsing\n :param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing\n :return: A tuple of the map_info structure and boolean indicating whether we succeeded or not\n \"\"\"\n map_name = rel_ptr_to_str(info, event_property.epi_u1.nonStructType.MapNameOffset)\n map_size = wt.DWORD()\n map_info = ct.POINTER(tdh.EVENT_MAP_INFO)()\n\n status = tdh.TdhGetEventMapInformation(record, map_name, None, ct.byref(map_size))\n if tdh.ERROR_INSUFFICIENT_BUFFER == status:\n map_info = ct.cast((ct.c_char * map_size.value)(), ct.POINTER(tdh.EVENT_MAP_INFO))\n status = tdh.TdhGetEventMapInformation(record, map_name, map_info, ct.byref(map_size))\n\n if tdh.ERROR_SUCCESS == status:\n return map_info, True\n\n # ERROR_NOT_FOUND is actually a perfectly acceptable status\n if tdh.ERROR_NOT_FOUND == status:\n return None, True\n\n # We actually failed.\n raise ct.WinError()\n\n def _unpackSimpleType(self, record, info, event_property):\n \"\"\"\n This method handles dumping all simple types of data (i.e., non-struct types).\n\n :param record: The EventRecord structure for the event we are parsing\n :param info: The TraceEventInfo structure for the event we are parsing\n :param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing\n :return: Returns a key-value pair as a dictionary. If we fail, the dictionary is {}\n \"\"\"\n # Get the EVENT_MAP_INFO, if it is present.\n map_info, success = self._getMapInfo(record, info, event_property)\n if not success:\n return {}\n\n # Get the length of the value of the property we are dealing with.\n property_length = self._getPropertyLength(record, info, event_property)\n if property_length is None:\n return {}\n # The version of the Python interpreter may be different than the system architecture.\n if record.contents.EventHeader.Flags & ec.EVENT_HEADER_FLAG_32_BIT_HEADER:\n ptr_size = 4\n else:\n ptr_size = 8\n\n name_field = rel_ptr_to_str(info, event_property.NameOffset)\n if property_length == 0 and self.vfield_length is not None:\n if self.vfield_length == 0:\n self.vfield_length = None\n return {name_field: None}\n\n # If vfield_length isn't 0, we should be able to parse the property.\n property_length = self.vfield_length\n\n # After calling the TdhFormatProperty function, use the UserDataConsumed parameter value to set the new values\n # of the UserData and UserDataLength parameters (Subtract UserDataConsumed from UserDataLength and use\n # UserDataLength to increment the UserData pointer).\n\n # All of the variables needed to actually use TdhFormatProperty retrieve the value\n user_data = record.contents.UserData + self.index\n user_data_remaining = record.contents.UserDataLength - self.index\n\n # if there is no data remaining then return\n if user_data_remaining <= 0:\n logger.warning('No more user data left, returning none for field {:s}'.format(name_field))\n return {name_field: None}\n\n in_type = event_property.epi_u1.nonStructType.InType\n out_type = event_property.epi_u1.nonStructType.OutType\n formatted_data_size = wt.DWORD()\n formatted_data = wt.LPWSTR()\n user_data_consumed = ct.c_ushort()\n\n status = tdh.TdhFormatProperty(info,\n map_info,\n ptr_size,\n in_type,\n out_type,\n ct.c_ushort(property_length),\n user_data_remaining,\n ct.cast(user_data, ct.POINTER(ct.c_byte)),\n ct.byref(formatted_data_size),\n None,\n ct.byref(user_data_consumed))\n\n if status == tdh.ERROR_INSUFFICIENT_BUFFER:\n formatted_data = ct.cast((ct.c_char * formatted_data_size.value)(), wt.LPWSTR)\n status = tdh.TdhFormatProperty(info,\n map_info,\n ptr_size,\n in_type,\n out_type,\n ct.c_ushort(property_length),\n user_data_remaining,\n ct.cast(user_data, ct.POINTER(ct.c_byte)),\n ct.byref(formatted_data_size),\n formatted_data,\n ct.byref(user_data_consumed))\n\n if status != tdh.ERROR_SUCCESS:\n # We can handle this error and still capture the data.\n logger.warning('Failed to get data field data for {:s}, incrementing by reported size'.format(name_field))\n self.index += property_length\n return {name_field: None}\n\n # Increment where we are in the user data segment that we are parsing.\n self.index += user_data_consumed.value\n\n if name_field.lower().endswith('length'):\n try:\n self.vfield_length = int(formatted_data.value, 10)\n except ValueError:\n logger.warning('Setting vfield_length to None')\n self.vfield_length = None\n\n data = formatted_data.value\n # Convert the formatted data if necessary\n if out_type in tdh.TDH_CONVERTER_LOOKUP and type(data) == tdh.TDH_CONVERTER_LOOKUP[out_type]:\n data = tdh.TDH_CONVERTER_LOOKUP[out_type](data)\n\n return {name_field: data}\n\n def _parseExtendedData(self, record):\n \"\"\"\n This method handles dumping all extended data from the record\n\n :param record: The EventRecord structure for the event we are parsing\n :return: Returns a key-value pair as a dictionary.\n \"\"\"\n result = {}\n for i in range(record.contents.ExtendedDataCount):\n ext_type = record.contents.ExtendedData[i].ExtType\n data_ptr = record.contents.ExtendedData[i].DataPtr\n data_size = record.contents.ExtendedData[i].DataSize\n try:\n if ext_type == ec.EVENT_HEADER_EXT_TYPE_RELATED_ACTIVITYID:\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_RELATED_ACTIVITYID))\n result['RelatedActivityID'] = str(d.contents.RelatedActivityId)\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_SID:\n buff = ct.create_string_buffer(data_size)\n ct.memmove(buff, data_ptr, data_size)\n sid_string = wt.LPWSTR()\n res = et.ConvertSidToStringSidW(ct.cast(buff, ct.c_void_p), ct.byref(sid_string))\n if res > 0:\n result['SID'] = str(sid_string.value)\n et.LocalFree(sid_string)\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_TS_ID:\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_TS_ID))\n result['TSID'] = d.contents.SessionId\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_INSTANCE_INFO:\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_INSTANCE))\n instance = {\n 'InstanceId': d.contents.InstanceId,\n 'ParentInstanceId': d.contents.ParentInstanceId,\n 'ParentGuid': str(d.contents.ParentGuid)\n }\n result['InstanceInfo'] = instance\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_STACK_TRACE32:\n nb_address = int((data_size - ct.sizeof(ct.c_ulonglong)) / ct.sizeof(ct.c_ulong))\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_STACK_TRACE32))\n match_id = d.contents.MatchId\n addr_buf = ct.cast(ct.addressof(d.contents.Address), ct.POINTER((ct.c_ulong * nb_address)))\n addr_list = []\n for j in range(nb_address):\n addr_list.append(addr_buf.contents[j])\n result['StackTrace32'] = {\n 'MatchId': match_id,\n 'Address': addr_list\n }\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_STACK_TRACE64:\n nb_address = int((data_size - ct.sizeof(ct.c_ulonglong)) / ct.sizeof(ct.c_ulonglong))\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_STACK_TRACE64))\n match_id = d.contents.MatchId\n addr_buf = ct.cast(ct.addressof(d.contents.Address), ct.POINTER((ct.c_ulonglong * nb_address)))\n addr_list = []\n for j in range(nb_address):\n addr_list.append(addr_buf.contents[j])\n result['StackTrace64'] = {\n 'MatchId': match_id,\n 'Address': addr_list\n }\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_PEBS_INDEX:\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_PEBS_INDEX))\n result['PebsIndex'] = d.contents.PebsIndex\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_PMC_COUNTERS:\n nb_counters = int(data_size / ct.sizeof(ct.c_ulonglong))\n counters_buf = ct.cast(data_ptr, ct.POINTER((ct.c_ulonglong * nb_counters)))\n counters_list = []\n for j in range(nb_counters):\n counters_list.append(counters_buf.contents[j])\n result['PMCCounters'] = counters_list\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_PSM_KEY:\n pass\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_EVENT_KEY:\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_EVENT_KEY))\n result['EventKey'] = d.contents.Key\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_EVENT_SCHEMA_TL:\n pass\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_PROV_TRAITS:\n pass\n elif ext_type == ec.EVENT_HEADER_EXT_TYPE_PROCESS_START_KEY:\n d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_PROCESS_START_KEY))\n result['StartKey'] = d.contents.ProcessStartKey\n except Exception as e:\n logger.warning('Extended data parse error (type %d, size %d) : %s' % (ext_type, data_size, str(e)))\n return result\n\n def _unpackComplexType(self, record, info, event_property):\n \"\"\"\n A complex type (e.g., a structure with sub-properties) can only contain simple types. Loop over all\n sub-properties and dump the property name and value.\n\n :param record: The EventRecord structure for the event we are parsing\n :param info: The TraceEventInfo structure for the event we are parsing\n :param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing\n :return: A dictionary of the property and value for the event we are parsing\n \"\"\"\n out = {}\n\n array_size = self._getArraySize(record, info, event_property)\n if array_size is None:\n return {}\n\n for _ in range(array_size):\n start_index = event_property.epi_u1.structType.StructStartIndex\n last_member = start_index + event_property.epi_u1.structType.NumOfStructMembers\n\n for j in range(start_index, last_member):\n # Because we are no longer dealing with the TopLevelProperty, we need to get the event_property_array\n # again so we can get the EVENT_PROPERTY_INFO structure of the sub-property we are currently parsing.\n event_property_array = ct.cast(info.contents.EventPropertyInfoArray,\n ct.POINTER(tdh.EVENT_PROPERTY_INFO))\n\n key, value = self._unpackSimpleType(record, info, event_property_array[j])\n if key is None and value is None:\n break\n\n out[key] = value\n\n return out\n\n def _processEvent(self, record):\n \"\"\"\n This is a callback function that fires whenever an event needs handling. It iterates through the structure to\n parse the properties of each event. If a user defined callback is specified it then passes the parsed data to\n it.\n\n\n :param record: The EventRecord structure for the event we are parsing\n :return: Nothing\n \"\"\"\n\n if self.callback_wait_time:\n time.sleep(self.callback_wait_time)\n\n parsed_data = {}\n record_parse_error = True\n field_parse_error = False\n\n if self.callback_data_flag == RETURN_RAW_UNFORMATTED_DATA:\n event_id = 0\n out = record\n else:\n # event ID is in \"Opcode\" field in kernel events, Id is always 0\n if self.kernel_trace:\n event_id = record.contents.EventHeader.EventDescriptor.Opcode\n else:\n event_id = record.contents.EventHeader.EventDescriptor.Id\n if self.event_id_filters and event_id not in self.event_id_filters:\n return\n # set task name to provider guid for the time being\n task_name = str(record.contents.EventHeader.ProviderId)\n\n # filter event ID in provider if requested (otherwise, we handle all events)\n task_name_upper = task_name.upper()\n if task_name_upper in self.providers_event_id_filters and event_id not in self.providers_event_id_filters[task_name_upper]:\n return\n\n pid = record.contents.EventHeader.ProcessId\n # if we have a whitelist set, keep only events for those PIDs\n # don't look at blacklist in that case\n if self.pid_whitelist:\n if pid not in self.pid_whitelist:\n return\n # no whitelist, check for a blacklist\n else:\n if self.pid_blacklist and pid in self.pid_blacklist:\n return\n\n # add all header fields from EVENT_HEADER structure\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa363759(v=vs.85).aspx\n out = {'EventHeader': {\n 'Size': record.contents.EventHeader.Size,\n 'HeaderType': record.contents.EventHeader.HeaderType,\n 'Flags': record.contents.EventHeader.Flags,\n 'EventProperty': record.contents.EventHeader.EventProperty,\n 'ThreadId': record.contents.EventHeader.ThreadId,\n 'ProcessId': record.contents.EventHeader.ProcessId,\n 'TimeStamp': record.contents.EventHeader.TimeStamp,\n 'ProviderId': task_name,\n 'EventDescriptor': {'Id': event_id,\n 'Version': record.contents.EventHeader.EventDescriptor.Version,\n 'Channel': record.contents.EventHeader.EventDescriptor.Channel,\n 'Level': record.contents.EventHeader.EventDescriptor.Level,\n 'Opcode': record.contents.EventHeader.EventDescriptor.Opcode,\n 'Task': record.contents.EventHeader.EventDescriptor.Task,\n 'Keyword':\n record.contents.EventHeader.EventDescriptor.Keyword},\n 'KernelTime': record.contents.EventHeader.KernelTime,\n 'UserTime': record.contents.EventHeader.UserTime,\n 'ActivityId': str(record.contents.EventHeader.ActivityId)},\n 'Task Name': task_name}\n\n if self.callback_data_flag != RETURN_RAW_DATA_ONLY:\n try:\n info = self._getEventInformation(record)\n\n # Some events do not have an associated task_name value. In this case, we should use the provider\n # name instead.\n if info.contents.TaskNameOffset == 0:\n task_name = rel_ptr_to_str(info, info.contents.ProviderNameOffset)\n else:\n task_name = rel_ptr_to_str(info, info.contents.TaskNameOffset)\n\n task_name = task_name.strip().upper()\n\n # Add a description for the event, if present\n if info.contents.EventMessageOffset:\n description = rel_ptr_to_str(info, info.contents.EventMessageOffset)\n else:\n description = ''\n\n # Windows 7 does not support predicate filters. Instead, we use a whitelist to filter things on the\n # consumer.\n if self.task_name_filters and task_name not in self.task_name_filters:\n return\n\n user_data = record.contents.UserData\n if user_data is None:\n user_data = 0\n\n end_of_user_data = user_data + record.contents.UserDataLength\n self.index = 0\n self.vfield_length = None\n property_array = ct.cast(info.contents.EventPropertyInfoArray, ct.POINTER(tdh.EVENT_PROPERTY_INFO))\n\n for i in range(info.contents.TopLevelPropertyCount):\n # If the user_data is the same value as the end_of_user_data, we are ending with a 0-length\n # field. Though not documented, this is completely valid.\n if user_data == end_of_user_data:\n break\n\n # Determine whether we are processing a simple type or a complex type and act accordingly\n if property_array[i].Flags & tdh.PropertyStruct:\n field = self._unpackComplexType(record, info, property_array[i])\n else:\n field = self._unpackSimpleType(record, info, property_array[i])\n\n if field == {} or None in field.values():\n field_parse_error = True\n parsed_data.update(field)\n\n # Add the description field in\n parsed_data['Description'] = description\n parsed_data['Task Name'] = task_name\n # Add ExtendedData if any\n if record.contents.EventHeader.Flags & ec.EVENT_HEADER_FLAG_EXTENDED_INFO:\n parsed_data['EventExtendedData'] = self._parseExtendedData(record)\n\n record_parse_error = False\n except Exception as e:\n logger.warning('Unable to parse event: {}'.format(e))\n\n try:\n if self.callback_data_flag == RETURN_RAW_DATA_ONLY or \\\n ((self.callback_data_flag == RETURN_RAW_DATA_ON_ERROR or\n self.callback_data_flag == RETURN_ONLY_RAW_DATA_ON_ERROR) and\n (field_parse_error or record_parse_error)):\n out['UserData'] = b''.join([ct.cast(record.contents.UserData + i, wt.PBYTE).contents\n for i in range(record.contents.UserDataLength)])\n\n if (self.callback_data_flag == RETURN_ONLY_RAW_DATA_ON_ERROR and field_parse_error is False) or \\\n self.callback_data_flag == RETURN_RAW_DATA_ON_ERROR or self.callback_data_flag == 0:\n\n out.update(parsed_data)\n\n # Call the user's specified callback function\n if self.event_callback:\n self.event_callback((event_id, out))\n\n except Exception as e:\n logger.error('Exception during callback: {}'.format(e))\n logger.error(traceback.format_exc())\n\n\nclass ETW:\n \"\"\"\n Serves as a base class for each capture trace type.\n \"\"\"\n\n def __init__(\n self,\n session_name=None,\n ring_buf_size=1024,\n max_str_len=1024,\n min_buffers=0,\n max_buffers=0,\n event_callback=None,\n task_name_filters=None,\n properties=None,\n providers=None,\n ignore_exists_error=True,\n event_id_filters=None,\n providers_event_id_filters=None,\n pid_whitelist=None,\n pid_blacklist=None,\n callback_data_flag=0,\n callback_wait_time=0.0,\n trace_logfile=None):\n \"\"\"\n Initializes an instance of the ETW class. The default buffer parameters represent a very typical use case and\n should not be overridden unless the user knows what they are doing.\n\n :param session_name: Session name for the ETW capture session\n :param ring_buf_size: The size of the ring buffer used for capturing events.\n :param max_str_len: The maximum length of the strings the proceed the structure.\n Unless you know what you are doing, do not modify this value.\n :param min_buffers: The minimum number of buffers for an event tracing session.\n Unless you know what you are doing, do not modify this value.\n :param max_buffers: The maximum number of buffers for an event tracing session.\n Unless you know what you are doing, do not modify this value.\n :param event_callback: An optional parameter allowing the caller to specify a callback function for each event\n that is parsed.\n :param task_name_filters: List of filters to apply to the ETW capture\n :param properties: An instance of TraceProperties class to use for the capture\n :param providers: A list of ProviderInfo class instances that will be used for the capture session. Do not reuse\n providers.\n :param ignore_exists_error: If true (default), the library will ignore an ERROR_ALREADY_EXISTS on the\n EventProvider start.\n :param event_id_filters: List of event ids to filter on.\n :param providers_event_id_filters: Dict of provider/ list of ids to filter on.\n :param pid_whitelist: List of PID for which we want to receive events (only events for those PIDs will be processed).\n :param pid_blacklist: List of PID for which we don't want to receive events (events for all PIDs except those will be processed).\n :param callback_data_flag: Determines how to format data passed into callback.\n :param callback_wait_time: Time callback will sleep when called. If used, this may cause events to be dropped.\n :param trace_logfile: EVENT_TRACE_LOGFILE structure to be passed to the consumer.\n \"\"\"\n\n if task_name_filters is None:\n self.task_name_filters = []\n else:\n self.task_name_filters = task_name_filters\n\n if event_id_filters is None:\n self.event_id_filters = []\n else:\n self.event_id_filters = event_id_filters\n\n if providers_event_id_filters is None:\n self.providers_event_id_filters = {}\n else:\n self.providers_event_id_filters = providers_event_id_filters\n\n if pid_whitelist is None:\n self.pid_whitelist = set()\n else:\n self.pid_whitelist = set(pid_whitelist)\n\n if pid_blacklist is None:\n self.pid_blacklist = set()\n else:\n self.pid_blacklist = set(pid_blacklist)\n\n if providers is None:\n self.providers = []\n else:\n self.providers = providers\n\n if properties is None:\n self.properties = TraceProperties(ring_buf_size,\n max_str_len,\n min_buffers,\n max_buffers)\n else:\n self.properties = properties\n\n if session_name is None:\n self.session_name = '{:s}'.format(str(uuid.uuid4()))\n else:\n self.session_name = session_name\n\n self.provider = None\n self.consumer = None\n self.running = False\n self.event_callback = event_callback\n self.ignore_exists_error = ignore_exists_error\n self.callback_data_flag = callback_data_flag\n self.callback_wait_time = callback_wait_time\n self.trace_logfile = trace_logfile\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, exc, ex, tb):\n self.stop()\n\n def start(self):\n \"\"\"\n Starts the providers and the consumers for capturing data using ETW.\n\n :return: Does not return anything.\n \"\"\"\n\n if self.provider is None:\n self.provider = EventProvider(self.session_name, self.properties, self.providers)\n\n if self.running is False:\n self.running = True\n try:\n self.provider.start()\n except WindowsError as wex:\n if (wex.winerror == tdh.ERROR_ALREADY_EXISTS and not self.ignore_exists_error) or \\\n wex.winerror != tdh.ERROR_ALREADY_EXISTS:\n raise wex\n\n # Start the consumer\n self.consumer = EventConsumer(self.session_name,\n self.event_callback,\n self.task_name_filters,\n self.event_id_filters,\n self.providers_event_id_filters,\n self.pid_whitelist,\n self.pid_blacklist,\n self.callback_data_flag,\n self.callback_wait_time,\n self.trace_logfile)\n self.consumer.start()\n\n def stop(self):\n \"\"\"\n Stops the current consumer and provider.\n\n :return: Does not return anything.\n \"\"\"\n\n if self.provider:\n self.running = False\n self.provider.stop()\n self.consumer.stop()\n\n def add_provider(self, provider):\n '''\n Adds a ProviderInfo instance to the capture\n\n :param provider: ProviderInfo class instance to add\n :return: Does not return anything\n '''\n\n self.providers.append(provider)\n\n def query(self):\n props = TraceProperties()\n et.ControlTraceW(et.TRACEHANDLE(0),\n self.session_name,\n props.get(),\n et.EVENT_TRACE_CONTROL_QUERY)\n return props.get().contents\n\n def update(self, trace_properties):\n '''\n Update the trace session properties on the fly\n\n :param trace_properties: TraceProperties class instance to use\n :return: Does not return anything\n '''\n et.ControlTraceW(et.TRACEHANDLE(0),\n self.session_name,\n trace_properties.get(),\n et.EVENT_TRACE_CONTROL_UPDATE)\n\n def control_stop(self, trace_properties):\n '''\n stop the trace session properties on the fly\n\n :param trace_properties: TraceProperties class instance to use\n :return: Does not return anything\n '''\n et.ControlTraceW(et.TRACEHANDLE(0),\n self.session_name,\n trace_properties.get(),\n et.EVENT_TRACE_CONTROL_STOP)\n\n def add_pid_whitelist(self, pid):\n '''\n add a PID to the whitelisted list of PIDs\n\n :param pid: pid to whitelist\n :return: Does not return anything\n '''\n # keep in our current list\n self.pid_whitelist.add(pid)\n # if consumer is started, update the list in the consumer\n if self.consumer:\n self.consumer.add_pid_whitelist(pid)\n\n def remove_pid_whitelist(self, pid):\n '''\n remove a PID from the whitelisted list of PIDs\n\n :param pid: pid to un-whitelist\n :return: Does not return anything\n '''\n # remove from our list\n self.pid_whitelist.discard(pid)\n # if consumer is started, update the list in the consumer\n if self.consumer:\n self.consumer.remove_pid_whitelist(pid)\n\n def reset_whitelist(self):\n '''\n reset the list of whitelisted PIDs\n\n :return: Does not return anything\n '''\n self.pid_whitelist = set()\n # if consumer is started, update the list in the consumer\n if self.consumer:\n self.consumer.reset_whitelist()\n\n def add_pid_blacklist(self, pid):\n '''\n add a PID to the blacklisted list of PIDs\n\n :param pid: pid to blacklist\n :return: Does not return anything\n '''\n # keep in our current list\n self.pid_blacklist.add(pid)\n # if consumer is started, update the list in the consumer\n if self.consumer:\n self.consumer.add_pid_blacklist(pid)\n\n def remove_pid_blacklist(self, pid):\n '''\n remove a PID from the blacklisted list of PIDs\n\n :param pid: pid to un-blacklist\n :return: Does not return anything\n '''\n # remove from our list\n self.pid_blacklist.discard(pid)\n # if consumer is started, update the list in the consumer\n if self.consumer:\n self.consumer.remove_pid_blacklist(pid)\n\n def reset_blacklist(self):\n '''\n reset the list of blacklisted PIDs\n\n :return: Does not return anything\n '''\n self.pid_blacklist = set()\n # if consumer is started, update the list in the consumer\n if self.consumer:\n self.consumer.reset_blacklist()\n\n\nclass ProviderInfo:\n \"\"\" Container class for provider info \"\"\"\n def __init__(self, name, guid, level=et.TRACE_LEVEL_INFORMATION, any_keywords=None, all_keywords=None, params=None):\n \"\"\"\n Initializes an instance of the ProviderInfo class.\n\n :param name: Name of the provider.\n :param guid: GUID of the provider.\n :param level: The info level for the provider.\n :param any_keywords: list of any keywords to add for provider, or a bitmask\n :param all_keywords: list of all keywords to add for provider, or a bitmask\n Unless you know what you are doing, do not modify this value.\n :param params: pointer to optional ENABLE_TRACE_PARAMETERS structure\n \"\"\"\n self.name = name\n self.guid = guid\n self.level = level\n if type(any_keywords) is list or any_keywords is None:\n self.any_bitmask = get_keywords_bitmask(guid, any_keywords)\n else:\n self.any_bitmask = any_keywords\n\n if type(all_keywords) is list or all_keywords is None:\n self.all_bitmask = get_keywords_bitmask(guid, all_keywords)\n else:\n self.all_bitmask = all_keywords\n self.params = params\n\n def __eq__(self, other):\n result = True\n self_dict = self.__dict__\n other_dict = other.__dict__\n self_params = self_dict.pop('params')\n other_params = other_dict.pop('params')\n\n if self_params:\n if other_params:\n for field in self_params.contents._fields_:\n attr_name = field[0]\n a, b = getattr(self_params.contents, attr_name), getattr(other_params.contents, attr_name)\n is_desc = isinstance(a, ct.POINTER(ep.EVENT_FILTER_DESCRIPTOR))\n if is_desc is True:\n if a:\n for desc_field in a.contents._fields_:\n desc_attr_name = desc_field[0]\n a_desc, b_desc = getattr(a.contents, desc_attr_name),\\\n getattr(b.contents, desc_attr_name)\n if a_desc != b_desc:\n result = False\n break\n else:\n if a != b:\n result = False\n break\n else:\n result = False\n\n result = self_dict == other_dict and result\n self_dict['params'] = self_params\n other_dict['params'] = other_params\n return result\n\n\nclass ProviderParameters:\n \"\"\"\n The ProviderParameters class represents the ENABLE_TRACE_PARAMETERS structure. The class wraps\n this structure to make it easier to interact with.\n \"\"\"\n\n def __init__(self, event_property, event_filters):\n \"\"\"\n Initializes an ENABLE_TRACE_PARAMETERS structure.\n\n :param event_property: Property to enable.\n See https://msdn.microsoft.com/en-us/library/windows/desktop/dd392306(v=vs.85).aspx\n :param event_filters: List of EVENT_FILTER_DESCRIPTOR structures\n \"\"\"\n\n self._props = ct.pointer(et.ENABLE_TRACE_PARAMETERS())\n\n filter_buf_size = ct.sizeof(ep.EVENT_FILTER_DESCRIPTOR) * len(event_filters)\n # noinspection PyCallingNonCallable\n filter_buf = (ct.c_char * filter_buf_size)()\n # copy contents to buffer\n for i in range(len(event_filters)):\n ct.memmove(ct.cast(ct.addressof(filter_buf) + (ct.sizeof(ep.EVENT_FILTER_DESCRIPTOR) * i), ct.c_void_p),\n ct.byref(event_filters[i]),\n ct.sizeof(ep.EVENT_FILTER_DESCRIPTOR))\n\n self._props.contents.Version = et.ENABLE_TRACE_PARAMETERS_VERSION_2\n self._props.contents.EnableProperty = event_property\n self._props.contents.ControlFlags = 0\n self._props.contents.EnableFilterDesc = ct.cast(ct.pointer(filter_buf), ct.POINTER(ep.EVENT_FILTER_DESCRIPTOR))\n self._props.contents.FilterDescCount = len(event_filters)\n\n def __eq__(self, other):\n for field in self.get().contents._fields_:\n attr_name = field[0]\n a, b = getattr(self.get().contents, attr_name), getattr(other.get().contents, attr_name)\n is_desc = isinstance(a, ct.POINTER(ep.EVENT_FILTER_DESCRIPTOR))\n if is_desc is True:\n if a:\n for desc_field in a.contents._fields_:\n desc_attr_name = desc_field[0]\n a_desc, b_desc = getattr(a.contents, desc_attr_name), getattr(b.contents, desc_attr_name)\n if a_desc != b_desc:\n return False\n else:\n if a != b:\n return False\n return True\n\n def get(self):\n \"\"\"\n This class wraps the construction of a struct for ctypes. As a result, in order to properly use it as a ctypes\n structure, you must use the private field _props. To maintain proper encapsulation, this getter is used to\n retrieve this value when needed.\n\n :return: The _props field needed for using this class as a ctypes EVENT_FILTER_DESCRIPTOR structure.\n \"\"\"\n return self._props\n\n\ndef get_keywords_bitmask(guid, keywords):\n \"\"\"\n Queries available keywords of the provider and returns a bitmask of the associated values\n\n :param guid: The GUID of the ETW provider.\n :param keywords: List of keywords to resolve.\n :return Bitmask of the keyword flags ORed together\n \"\"\"\n\n bitmask = 0\n if keywords is None or len(keywords) == 0:\n return bitmask\n\n # enumerate the keywords for the provider as well as the bitmask values\n provider_info = None\n providers_size = wt.ULONG(0)\n status = tdh.TdhEnumerateProviderFieldInformation(\n ct.byref(guid),\n tdh.EventKeywordInformation,\n provider_info,\n ct.byref(providers_size))\n\n if status == tdh.ERROR_INSUFFICIENT_BUFFER:\n\n provider_info = ct.cast((ct.c_char * providers_size.value)(), ct.POINTER(tdh.PROVIDER_FIELD_INFOARRAY))\n status = tdh.TdhEnumerateProviderFieldInformation(\n ct.byref(guid),\n tdh.EventKeywordInformation,\n provider_info,\n ct.byref(providers_size))\n\n if tdh.ERROR_SUCCESS != status and tdh.ERROR_NOT_FOUND != status:\n raise ct.WinError(status)\n\n if provider_info:\n field_info_array = ct.cast(provider_info.contents.FieldInfoArray, ct.POINTER(tdh.PROVIDER_FIELD_INFO))\n provider_keywords = {}\n for i in range(provider_info.contents.NumberOfElements):\n provider_keyword = rel_ptr_to_str(provider_info, field_info_array[i].NameOffset)\n provider_keywords[provider_keyword] = field_info_array[i].Value\n\n for keyword in keywords:\n if keyword in provider_keywords:\n bitmask |= provider_keywords[keyword]\n\n return bitmask\n\n", "id": "3231673", "language": "Python", "matching_score": 8.771580696105957, "max_stars_count": 247, "path": "etw/etw.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\nfrom etw.GUID import GUID\nfrom etw import evntprov as ep\n\n# see https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/etw/tracelog/event_header.htm\nEVENT_HEADER_FLAG_EXTENDED_INFO = 0x01\nEVENT_HEADER_FLAG_PRIVATE_SESSION = 0x02\nEVENT_HEADER_FLAG_STRING_ONLY = 0x04\nEVENT_HEADER_FLAG_TRACE_MESSAGE = 0x08\nEVENT_HEADER_FLAG_NO_CPUTIME = 0x10\nEVENT_HEADER_FLAG_32_BIT_HEADER = 0x20\nEVENT_HEADER_FLAG_64_BIT_HEADER = 0x40\nEVENT_HEADER_FLAG_CLASSIC_HEADER = 0x100\nEVENT_HEADER_FLAG_PROCESSOR_INDEX = 0x200\n\nEVENT_HEADER_EXT_TYPE_RELATED_ACTIVITYID = 0x0001\nEVENT_HEADER_EXT_TYPE_SID = 0x0002\nEVENT_HEADER_EXT_TYPE_TS_ID = 0x0003\nEVENT_HEADER_EXT_TYPE_INSTANCE_INFO = 0x0004\nEVENT_HEADER_EXT_TYPE_STACK_TRACE32 = 0x0005\nEVENT_HEADER_EXT_TYPE_STACK_TRACE64 = 0x0006\nEVENT_HEADER_EXT_TYPE_PEBS_INDEX = 0x0007\nEVENT_HEADER_EXT_TYPE_PMC_COUNTERS = 0x0008\nEVENT_HEADER_EXT_TYPE_PSM_KEY = 0x0009\nEVENT_HEADER_EXT_TYPE_EVENT_KEY = 0x000A\nEVENT_HEADER_EXT_TYPE_EVENT_SCHEMA_TL = 0x000B\nEVENT_HEADER_EXT_TYPE_PROV_TRAITS = 0x000C\nEVENT_HEADER_EXT_TYPE_PROCESS_START_KEY = 0x000D\nEVENT_HEADER_EXT_TYPE_MAX = 0x000E\n\nEVENT_ENABLE_PROPERTY_SID = 0x00000001\nEVENT_ENABLE_PROPERTY_TS_ID = 0x00000002\nEVENT_ENABLE_PROPERTY_STACK_TRACE = 0x00000004\nEVENT_ENABLE_PROPERTY_PSM_KEY = 0x00000008\nEVENT_ENABLE_PROPERTY_IGNORE_KEYWORD_0 = 0x00000010\nEVENT_ENABLE_PROPERTY_PROVIDER_GROUP = 0x00000020\nEVENT_ENABLE_PROPERTY_ENABLE_KEYWORD_0 = 0x00000040\nEVENT_ENABLE_PROPERTY_PROCESS_START_KEY = 0x00000080\n\n# Definitions from evntcons.h file\nPROCESS_TRACE_MODE_REAL_TIME = 0x00000100\nPROCESS_TRACE_MODE_RAW_TIMESTAMP = 0x00001000\nPROCESS_TRACE_MODE_EVENT_RECORD = 0x10000000\n\n\nclass EVENT_EXTENDED_ITEM_PROCESS_START_KEY(ct.Structure):\n _fields_ = [('ProcessStartKey', ct.c_ulonglong)]\n\n\nclass EVENT_EXTENDED_ITEM_RELATED_ACTIVITYID(ct.Structure):\n _fields_ = [('RelatedActivityId', GUID)]\n\n\nclass EVENT_EXTENDED_ITEM_TS_ID(ct.Structure):\n _fields_ = [('SessionId', ct.c_ulong)]\n\n\nclass EVENT_EXTENDED_ITEM_INSTANCE(ct.Structure):\n _fields_ = [('InstanceId', ct.c_ulong),\n ('ParentInstanceId', ct.c_ulong),\n ('ParentGuid', GUID),\n ]\n\n\nclass EVENT_EXTENDED_ITEM_EVENT_KEY(ct.Structure):\n _fields_ = [('Key', ct.c_ulonglong)]\n\n\nclass EVENT_EXTENDED_ITEM_STACK_TRACE32(ct.Structure):\n _fields_ = [('MatchId', ct.c_ulonglong),\n ('Address', ct.c_ulong * 1),\n ]\n\n\nclass EVENT_EXTENDED_ITEM_STACK_TRACE64(ct.Structure):\n _fields_ = [('MatchId', ct.c_ulonglong),\n ('Address', ct.c_ulonglong * 1),\n ]\n\n\nclass EVENT_EXTENDED_ITEM_PEBS_INDEX(ct.Structure):\n _fields_ = [('PebsIndex', ct.c_ulonglong)]\n\n\nclass EVENT_HEADER(ct.Structure):\n _fields_ = [('Size', ct.c_ushort),\n ('HeaderType', ct.c_ushort),\n ('Flags', ct.c_ushort),\n ('EventProperty', ct.c_ushort),\n ('ThreadId', ct.c_ulong),\n ('ProcessId', ct.c_ulong),\n ('TimeStamp', wt.LARGE_INTEGER),\n ('ProviderId', GUID),\n ('EventDescriptor', ep.EVENT_DESCRIPTOR),\n ('KernelTime', ct.c_ulong),\n ('UserTime', ct.c_ulong),\n ('ActivityId', GUID)]\n\n\nclass ETW_BUFFER_CONTEXT(ct.Structure):\n _fields_ = [('ProcessorNumber', ct.c_ubyte),\n ('Alignment', ct.c_ubyte),\n ('LoggerId', ct.c_ushort)]\n\n\nclass EVENT_HEADER_EXTENDED_DATA_ITEM(ct.Structure):\n _fields_ = [('Reserved1', ct.c_ushort),\n ('ExtType', ct.c_ushort),\n ('Linkage', ct.c_ushort), # struct{USHORT :1, USHORT :15}\n ('DataSize', ct.c_ushort),\n ('DataPtr', ct.c_ulonglong)]\n\n\nclass EVENT_RECORD(ct.Structure):\n _fields_ = [('EventHeader', EVENT_HEADER),\n ('BufferContext', ETW_BUFFER_CONTEXT),\n ('ExtendedDataCount', ct.c_ushort),\n ('UserDataLength', ct.c_ushort),\n ('ExtendedData', ct.POINTER(EVENT_HEADER_EXTENDED_DATA_ITEM)),\n ('UserData', ct.c_void_p),\n ('UserContext', ct.c_void_p)]\n", "id": "10087857", "language": "Python", "matching_score": 3.914290428161621, "max_stars_count": 247, "path": "etw/evntcons.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\nfrom etw.GUID import GUID\n\n\n# WNODE_HEADER flags\nWNODE_FLAG_TRACED_GUID = 0x00020000\n\n\nclass WNODE_HEADER(ct.Structure):\n _fields_ = [('BufferSize', ct.c_ulong),\n ('ProviderId', ct.c_ulong),\n ('HistoricalContext', ct.c_uint64),\n ('TimeStamp', wt.LARGE_INTEGER),\n ('Guid', GUID),\n ('ClientContext', ct.c_ulong),\n ('Flags', ct.c_ulong)]\n", "id": "11440795", "language": "Python", "matching_score": 0.586976945400238, "max_stars_count": 247, "path": "etw/wmistr.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport ctypes as ct\nimport ctypes.wintypes as wt\n\n\nEVENT_FILTER_TYPE_NONE = 0x00000000\nEVENT_FILTER_TYPE_SCHEMATIZED = 0x80000000\nEVENT_FILTER_TYPE_SYSTEM_FLAGS = 0x80000001\nVENT_FILTER_TYPE_TRACEHANDLE = 0x80000002\nEVENT_FILTER_TYPE_PID = 0x80000004\nEVENT_FILTER_TYPE_EXECUTABLE_NAME = 0x80000008\nEVENT_FILTER_TYPE_PACKAGE_ID = 0x80000010\nEVENT_FILTER_TYPE_PACKAGE_APP_ID = 0x80000020\nEVENT_FILTER_TYPE_PAYLOAD = 0x80000100\nEVENT_FILTER_TYPE_EVENT_ID = 0x80000200\nEVENT_FILTER_TYPE_STACKWALK = 0x80001000\n\n\nMAX_EVENT_FILTER_EVENT_ID_COUNT = 64\nMAX_EVENT_FILTER_DATA_SIZE = 1024\n\n\nclass EVENT_FILTER_DESCRIPTOR(ct.Structure):\n _fields_ = [('Ptr', ct.c_ulonglong),\n ('Size', ct.c_ulong),\n ('Type', ct.c_ulong)]\n\n\nclass EVENT_FILTER_HEADER(ct.Structure):\n _fields_ = [('Id', wt.USHORT),\n ('Version', wt.CHAR),\n ('Reserved', wt.CHAR * 5),\n ('InstanceId', ct.c_ulonglong),\n ('Size', wt.ULONG),\n ('NextOffset', wt.ULONG)]\n\n\nclass EVENT_FILTER_EVENT_ID(ct.Structure):\n _fields_ = [('FilterIn', wt.BOOLEAN),\n ('Reserved', wt.CHAR),\n ('Count', wt.USHORT),\n ('Events', wt.USHORT * 0)]\n\n def __init__(self, filter_in, events):\n struct_size = len(events) * ct.sizeof(wt.USHORT) + ct.sizeof(EVENT_FILTER_EVENT_ID)\n self._buf = (ct.c_char * struct_size)()\n self._props = ct.cast(ct.pointer(self._buf), ct.POINTER(EVENT_FILTER_EVENT_ID))\n self._props.contents.FilterIn = filter_in\n self._props.contents.Reserved = 0\n self._props.contents.Count = len(events)\n\n for i in range(len(events)):\n ct.memmove(ct.cast(ct.addressof(self._buf) + ct.sizeof(EVENT_FILTER_EVENT_ID) + (ct.sizeof(wt.WCHAR) * i),\n ct.c_void_p),\n ct.byref(wt.USHORT(events[i])),\n ct.sizeof(wt.WCHAR))\n\n def get(self):\n return self._props\n\n\nclass EVENT_FILTER_LEVEL_KW(ct.Structure):\n _fields_ = [('MatchAnyKeyword', ct.c_ulonglong),\n ('MatchAllKeyword', ct.c_ulonglong),\n ('Level', wt.CHAR),\n ('FilterIn', wt.BOOLEAN)]\n\n\nclass EVENT_FILTER_EVENT_NAME(ct.Structure):\n _fields_ = [('MatchAnyKeyword', ct.c_ulonglong),\n ('MatchAllKeyword', ct.c_ulonglong),\n ('Level', wt.CHAR),\n ('FilterIn', wt.BOOLEAN),\n ('NameCount', wt.USHORT),\n ('Names', wt.CHAR * 0)]\n\n def __init__(self, match_any, match_all, level, filter_in, names):\n struct_size = ((sum([len(name) for name in names]) * ct.sizeof(wt.CHAR)) + (ct.sizeof(wt.CHAR) * len(names))) +\\\n ct.sizeof(EVENT_FILTER_EVENT_NAME)\n self._buf = (ct.c_char * struct_size)()\n self._props = ct.cast(ct.pointer(self._buf), ct.POINTER(EVENT_FILTER_EVENT_NAME))\n self._props.contents.MatchAnyKeyword = match_any\n self._props.contents.MatchAllKeyword = match_all\n self._props.contents.Level = level\n self._props.contents.FilterIn = filter_in\n self._props.contents.NameCount = len(names)\n\n str_off = 0\n for i in range(len(names)):\n ct.memmove(ct.cast(ct.addressof(self._buf) + ct.sizeof(EVENT_FILTER_EVENT_NAME) + str_off,\n ct.c_void_p),\n names[i],\n len(names[i]))\n str_off += len(names[i]) + ct.sizeof(wt.CHAR)\n\n def get(self):\n return self._props\n\n\nclass EVENT_DESCRIPTOR(ct.Structure):\n _fields_ = [('Id', ct.c_ushort),\n ('Version', ct.c_ubyte),\n ('Channel', ct.c_ubyte),\n ('Level', ct.c_ubyte),\n ('Opcode', ct.c_ubyte),\n ('Task', ct.c_ushort),\n ('Keyword', ct.c_ulonglong)]\n", "id": "51307", "language": "Python", "matching_score": 1.166673183441162, "max_stars_count": 247, "path": "etw/evntprov.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\n# This simply parses a CS file containing definitions for all types of an MOF-based ETW provider.\n# Ultimately, it is looking for all of the valid targets as well as their associated fields.\n\nimport sys\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage: parse_cs.py [PATH_TO_CS]')\n return\n\n with open(sys.argv[1], 'r') as cs:\n buf = cs.read()\n\n artifact = 'public sealed class '\n field_artifact = 'payloadNames = new string[] {'\n offset = 0\n i = -1\n\n while True:\n i += 1\n\n # Get the start of the target\n offset = buf.find(artifact, offset)\n if offset == -1:\n break\n\n # Increment past the artifact\n offset += len(artifact)\n\n # Get the end of the target\n end_offset = buf.find(' ', offset)\n\n # Skip the first match -- it is erroneous\n if i == 0:\n continue\n\n # Print the target name\n print(buf[offset:end_offset].rstrip('Args').upper())\n\n # Get the offset to the list of fields for this target\n offset = buf.find(field_artifact, offset)\n offset += len(field_artifact)\n\n # Get the offset to the end of the list of fields for this target\n end_offset = buf.find('}', offset)\n\n # Print each field for the current target\n for field in buf[offset:end_offset].split(','):\n print('\\t- %s' % field.strip(' \"'))\n\n print()\n\n\nif __name__ == '__main__':\n main()\n", "id": "5816918", "language": "Python", "matching_score": 0.19895558059215546, "max_stars_count": 247, "path": "utils/parse_cs.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport time\nimport etw\n\n\nclass MyETW(etw.ETW):\n\n def __init__(self, event_callback):\n # define capture provider info\n providers = [etw.ProviderInfo('Some Provider', etw.GUID(\"{11111111-1111-1111-1111-111111111111}\"))]\n super().__init__(providers=providers, event_callback=event_callback)\n\n def start(self):\n # do pre-capture setup\n self.do_capture_setup()\n super().start()\n\n def stop(self):\n super().stop()\n # do post-capture teardown\n self.do_capture_teardown()\n\n def do_capture_setup(self):\n # do whatever setup for capture here\n pass\n\n def do_capture_teardown(self):\n # do whatever for capture teardown here\n pass\n\n\ndef my_capture():\n # instantiate class\n capture = MyETW(lambda x: print(x))\n # start capture\n capture.start()\n # wait some time to capture data\n time.sleep(5)\n # stop capture\n capture.stop()\n\n\nif __name__ == '__main__':\n my_capture()\n", "id": "7964645", "language": "Python", "matching_score": 3.1344215869903564, "max_stars_count": 247, "path": "examples/simple_class.py" }, { "content": "########################################################################\n# Copyright 2017 FireEye Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n########################################################################\n\nimport time\nimport sys\nimport etw\n\n# append the path of pywintrace-master\nsys.path.append('''C:\\\\Users\\\\tyh\\\\desktop\\\\pywintrace-master''')\n\n\n# python -u \"C:\\Users\\tyh_2\\desktop\\pywintrace-master\\examples\\simple2.py\"\n\ndef some_func():\n # define capture provider info\n\n providers0 = [etw.ProviderInfo('PowerShell', etw.GUID(\"{A0C1853B-5C40-4B15-8766-3CF1C58F985A}\"))]\n providers1 = [etw.ProviderInfo('Kernel-Registry', etw.GUID(\"{70EB4F03-C1DE-4F73-A051-33D13D5413BD}\"))]\n providers2 = [etw.ProviderInfo('Kernel-Network', etw.GUID(\"{7DD42A49-5329-4832-8DFD-43D979153A88}\"))]\n providers3 = [etw.ProviderInfo('Kernel-File', etw.GUID(\"{EDD08927-9CC4-4E65-B970-C2560FB5C289}\"))]\n providers4 = [etw.ProviderInfo('Kernel-Audit-API-Calls', etw.GUID(\"{E02A841C-75A3-4FA7-AFC8-AE09CF9B7F23}\"))]\n providers5 = [etw.ProviderInfo('Kernel-Disk', etw.GUID(\"{C7BDE69A-E1E0-4177-B6EF-283AD1525271}\"))]\n providers6 = [etw.ProviderInfo('HttpEvent', etw.GUID(\"{7B6BC78C-898B-4170-BBF8-1A469EA43FC5}\"))]\n providers7 = [etw.ProviderInfo('HttpLog', etw.GUID(\"{C42A2738-2333-40A5-A32F-6ACC36449DCC}\"))]\n providers8 = [etw.ProviderInfo('HttpService', etw.GUID(\"{DD5EF90A-6398-47A4-AD34-4DCECDEF795F}\"))]\n\n # change providers0 to providers1,2,..8 to get other logs\n job0 = etw.ETW(providers=providers0, event_callback=lambda x: print(x))\n job1 = etw.ETW(providers=providers1, event_callback=lambda x: print(x))\n job2 = etw.ETW(providers=providers2, event_callback=lambda x: print(x))\n job3 = etw.ETW(providers=providers3, event_callback=lambda x: print(x))\n job4 = etw.ETW(providers=providers4, event_callback=lambda x: print(x))\n job5 = etw.ETW(providers=providers5, event_callback=lambda x: print(x))\n job6 = etw.ETW(providers=providers6, event_callback=lambda x: print(x))\n job7 = etw.ETW(providers=providers7, event_callback=lambda x: print(x))\n job8 = etw.ETW(providers=providers8, event_callback=lambda x: print(x))\n job0.start()\n job1.start()\n job2.start()\n job3.start()\n job4.start()\n job5.start()\n job6.start()\n job7.start()\n job8.start()\n time.sleep(600)\n job0.stop()\n job0.stop()\n job1.stop()\n job2.stop()\n job3.stop()\n job4.stop()\n job5.stop()\n job6.stop()\n job7.stop()\n job8.stop()\n\n\nif __name__ == '__main__':\n some_func()\n\n", "id": "7383990", "language": "Python", "matching_score": 1.7421073913574219, "max_stars_count": 0, "path": "examples/simple2.py" } ]
2.438264
many7695
[ { "content": "from django.shortcuts import reverse\nfrom django.test import override_settings\nfrom selenium.webdriver.support.ui import Select\n\nfrom .base import HerokuFunctionalTest\n\n\nclass PostMenuTest(HerokuFunctionalTest):\n\n def assertChangeEnvVarPost(self, indx, new_value, ):\n self.assertPost(\"id_new_value\", new_value, indx)\n\n def assertDropdownSelectPost(self, id, new_value, indx=0):\n select_input = Select(self.wait_for_finding(lambda: self.browser.find_elements_by_id(id))[indx])\n select_input.select_by_value(new_value)\n self.browser.find_element_by_name(\"language_button\").click()\n self.check_alert()\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_posting_forms(self):\n self.browser.get(self.live_server_url + reverse(\"menu\") + \"?key=mycoolsecretkey\")\n self.assertDropdownSelectPost(\"id_language\", \"en\")\n self.assertChangeEnvVarPost(0, \"https://benc.com\")\n self.assertChangeEnvVarPost(1, 73)\n self.assertChangeEnvVarPost(2, 144)\n self.assertChangeEnvVarPost(3, \"akey\")\n self.assertChangeEnvVarPost(4, \"asecret\")\n self.assertChangeEnvVarPost(5, \"tsid\")\n self.assertChangeEnvVarPost(6, \"tsecret\")\n self.assertPost(\"id_time\", \"16:01\", clear=True)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\", TOKEN=\"\")\n def test_posting_forms_without_token(self):\n self.test_posting_forms()\n", "id": "7079945", "language": "Python", "matching_score": 2.5351650714874268, "max_stars_count": 0, "path": "remider/tests/test_functional_post_forms_menu.py" }, { "content": "from .base import FunctionalTest\nfrom django.shortcuts import reverse\nfrom django.test import override_settings\n\n\nclass NotificationCentrumTest(FunctionalTest):\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_button(self):\n self.browser.get(self.live_server_url + reverse(\"menu\") + \"?key=mycoolsecretkey\")\n self.wait_for_finding(lambda: self.browser.find_element_by_css_selector(\".btn-primary\")).click()\n self.assertUrlNow(\"notif-center\", add_secret_key=True)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\", TRIGGER_IFTTT=False, SEND_SMS=True, TOKEN=\"\")\n def test_posting_forms(self):\n self.browser.get(self.live_server_url + reverse(\"notif-center\") + \"?key=mycoolsecretkey\")\n ifttt_checkbox = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_ifttt_notifications\"))\n sms_checkbox = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_sms_notifications\"))\n self.assertFalse(ifttt_checkbox.is_selected())\n self.assertTrue(sms_checkbox.is_selected())\n ifttt_checkbox.click()\n self.browser.find_elements_by_css_selector(\".btn-primary\")[0].click()\n self.wait_for_finding(lambda: self.check_alert())\n ifttt_checkbox = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_ifttt_notifications\"))\n sms_checkbox = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_sms_notifications\"))\n self.assertTrue(ifttt_checkbox.is_selected())\n self.assertTrue(sms_checkbox.is_selected())\n sms_checkbox.click()\n self.browser.find_elements_by_css_selector(\".btn-primary\")[0].click()\n self.check_alert()\n ifttt_checkbox = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_ifttt_notifications\"))\n sms_checkbox = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_sms_notifications\"))\n self.assertTrue(ifttt_checkbox.is_selected())\n self.assertFalse(sms_checkbox.is_selected())\n", "id": "12756691", "language": "Python", "matching_score": 0.18369844555854797, "max_stars_count": 0, "path": "remider/tests/test_functional_notifcations_center.py" }, { "content": "import requests\nimport responses\nimport datetime\n\nfrom django.test import TestCase, override_settings\n\nfrom ..data_processing import *\nfrom ..models import InfusionChanged, SensorChanged\n\n\nclass DataProcessingTests(TestCase):\n\n @responses.activate\n def test_process_nighscout_response_empty(self):\n responses.add(responses.GET, 'https://benc.com/api/v1/treatments',\n json={}, status=200)\n response = requests.get(\"https://benc.com/api/v1/treatments\")\n processed = process_nightscouts_api_response(response)\n self.assertIsNone(processed[0])\n self.assertIsNone(processed[1])\n\n @responses.activate\n def test_process_nighscout_response(self):\n responses.add(responses.GET, 'https://benc.com/api/v1/treatments',\n json=[{\"_id\": \"5d361635a7775b341af642eb\", \"created_at\": \"2019-07-22T21:37:28+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 118, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642cd\", \"absolute\": 1.3,\n \"created_at\": \"2019-07-22T20:14:57+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL816AEC83\", \"notes\": \"Temp Basal: 1.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d361634a7775b341af642b1\", \"carbs\": 12, \"created_at\": \"2019-07-22T20:03:13+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1, \"key600\": \"BOLUS816AE9C3\",\n \"notes\": \"carb 12g 1.0U, {75~100} 0.0U, iob 2.0 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642b3\", \"carbs\": 24, \"created_at\": \"2019-07-22T19:18:33+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2, \"key600\": \"BOLUS816ADF4B\",\n \"notes\": \"carb 24g 2.0U, {75~100} 0.0U, iob 1.3 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642ed\", \"created_at\": \"2019-07-22T18:30:12+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 144, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642b5\", \"created_at\": \"2019-07-22T18:15:25+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.5, \"key600\": \"BOLUS816AD07F\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.5U, iob 3.3 -0.5U (12g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d361635a7775b341af642ef\", \"created_at\": \"2019-07-22T18:14:47+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 158, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642b7\", \"carbs\": 30, \"created_at\": \"2019-07-22T17:20:58+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.5, \"key600\": \"BOLUS816AC3BC\",\n \"notes\": \"carb 30g 2.5U, {75~100} 0.0U, iob 2.8 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642cf\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-22T17:20:46+02:00\", \"duration\": 15, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL816AC3B0\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes * canceled, duration 15 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642b9\", \"carbs\": 21, \"created_at\": \"2019-07-22T16:58:19+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.125, \"key600\": \"BOLUS816ABE6D\",\n \"notes\": \"carb 21g 2.1U, {75~100} 0.0U, iob 1.0 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642bb\", \"carbs\": 8, \"created_at\": \"2019-07-22T16:43:52+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.8, \"key600\": \"BOLUS816ABB0A\",\n \"notes\": \"carb 8g 0.8U, {75~100} 0.0U, iob 0.3 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642d1\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-22T16:30:29+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL816AB7E7\", \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642d3\", \"created_at\": \"2019-07-22T16:30:14+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816AB7D8\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642d5\", \"absolute\": 0,\n \"created_at\": \"2019-07-22T15:36:43+02:00\", \"duration\": 53, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND816AAB4C\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642bd\", \"carbs\": 7, \"created_at\": \"2019-07-22T15:15:02+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.7, \"key600\": \"BOLUS816AA637\",\n \"notes\": \"carb 7g 0.7U, {75~100} 0.0U, iob 1.5 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642d7\", \"created_at\": \"2019-07-22T15:03:14+02:00\",\n \"duration\": 22, \"eventType\": \"Temp Basal\", \"key600\": \"BASAL816AA373\",\n \"notes\": \"Temp Basal: 200%, duration 30 minutes * canceled, duration 22 minutes\",\n \"percent\": 100, \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642d9\", \"created_at\": \"2019-07-22T14:45:49+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816A9F5E\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642db\", \"absolute\": 0,\n \"created_at\": \"2019-07-22T14:36:59+02:00\", \"duration\": 8, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND816A9D4C\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d361635a7775b341af642dd\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-22T13:31:35+02:00\",\n \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL816A8DF8\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642bf\", \"carbs\": 10, \"created_at\": \"2019-07-22T13:24:27+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1, \"key600\": \"BOLUS816A8C4C\",\n \"notes\": \"carb 10g 1.0U, {75~100} 0.0U, iob 6.0 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642c1\", \"carbs\": 45, \"created_at\": \"2019-07-22T13:22:12+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 4.575, \"key600\": \"BOLUS816A8BC5\",\n \"notes\": \"carb 45g 4.6U, {75~100} 0.0U, iob 1.5 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642f1\", \"created_at\": \"2019-07-22T12:56:15+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 130, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642c3\", \"carbs\": 20, \"created_at\": \"2019-07-22T11:48:30+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.175, \"key600\": \"BOLUS816A75CF\",\n \"notes\": \"carb 20g 2.2U, {75~100} 0.0U, iob 2.8 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642c5\", \"created_at\": \"2019-07-22T11:27:48+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1, \"key600\": \"BOLUS816A70F5\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.9U, iob 2.2 -0.9U (9g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d361635a7775b341af642f3\", \"created_at\": \"2019-07-22T11:27:47+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 212, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642c7\", \"carbs\": 22, \"created_at\": \"2019-07-22T10:58:44+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.4, \"key600\": \"BOLUS816A6A25\",\n \"notes\": \"carb 22g 2.4U, {75~100} 0.0U, iob 0.5 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642df\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-22T10:45:33+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL816A670E\", \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642e1\", \"created_at\": \"2019-07-22T10:43:37+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816A669A\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642e3\", \"absolute\": 0,\n \"created_at\": \"2019-07-22T09:16:34+02:00\", \"duration\": 87, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND816A5232\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642e5\", \"created_at\": \"2019-07-22T08:48:53+02:00\",\n \"duration\": 4, \"eventType\": \"Temp Basal\", \"key600\": \"BASAL816A4BB5\",\n \"notes\": \"Temp Basal: 200%, duration 30 minutes * canceled, duration 4 minutes\",\n \"percent\": 100, \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642c9\", \"carbs\": 12, \"created_at\": \"2019-07-22T08:33:35+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1.3, \"key600\": \"BOLUS816A481F\",\n \"notes\": \"carb 12g 1.3U, {75~100} 0.0U, iob 3.7 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642cb\", \"carbs\": 35, \"created_at\": \"2019-07-22T08:15:27+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 3.825, \"key600\": \"BOLUS816A43DF\",\n \"notes\": \"carb 35g 3.8U, {75~100} 0.0U, iob 0.0 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d361635a7775b341af642e7\", \"created_at\": \"2019-07-22T07:22:42+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816A3782\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642e9\", \"absolute\": 0,\n \"created_at\": \"2019-07-22T06:40:57+02:00\", \"duration\": 41, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND816A2DB9\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d361635a7775b341af642f5\", \"created_at\": \"2019-07-22T06:40:29+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 134, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d350baba7775b341af0fcf1\", \"created_at\": \"2019-07-22T03:01:49+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.2, \"key600\": \"BOLUS8169FA60\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.5U, iob 0.0 0.0U (10g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d361635a7775b341af642f7\", \"created_at\": \"2019-07-22T02:58:35+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 160, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34eb69a7775b341af0907c\", \"created_at\": \"2019-07-22T00:43:09+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8169D9E0\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose auto resume - max suspend period\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34eb69a7775b341af09080\", \"created_at\": \"2019-07-21T23:31:59+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 84, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34d4f9a7775b341af01584\", \"created_at\": \"2019-07-21T23:08:56+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 88, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34d4f9a7775b341af01586\", \"created_at\": \"2019-07-21T22:50:08+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 91, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34eb69a7775b341af0907e\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T22:43:03+02:00\", \"duration\": 120, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8169BDBB\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34cf3fa7775b341aefee51\", \"created_at\": \"2019-07-21T22:26:06+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 89, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34ca9fa7775b341aefcfdf\", \"created_at\": \"2019-07-21T22:09:04+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8169B5C3\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose manual resume\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b9c\", \"created_at\": \"2019-07-21T21:39:02+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 81, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34ca9fa7775b341aefcfe1\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T21:27:58+02:00\", \"duration\": 41, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8169AC21\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b9e\", \"created_at\": \"2019-07-21T20:59:49+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 95, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8ba0\", \"created_at\": \"2019-07-21T20:39:24+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 133, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b7e\", \"created_at\": \"2019-07-21T20:32:24+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME81699F1B\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8bb8\", \"created_at\": \"2019-07-21T20:30:40+02:00\",\n \"eventType\": \"Site Change\", \"key600\": \"MISC81699EB3\", \"notes\": \"Reservoir changed\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d34c068a7775b341aef8b80\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T20:30:01+02:00\", \"duration\": 2,\n \"eventType\": \"Temp Basal\", \"key600\": \"SUSPEND81699E8C\",\n \"notes\": \"Pump suspended insulin delivery: Set change suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b6a\", \"carbs\": 24, \"created_at\": \"2019-07-21T19:26:14+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2, \"key600\": \"BOLUS81698F99\",\n \"notes\": \"carb 24g 2.0U, {75~100} 0.9U, iob 5.2 -0.9U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d34c068a7775b341aef8ba2\", \"created_at\": \"2019-07-21T19:25:39+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 207, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b6c\", \"carbs\": 50, \"created_at\": \"2019-07-21T19:11:18+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 4.15, \"key600\": \"BOLUS81698C19\",\n \"notes\": \"carb 50g 4.2U, {75~100} 1.0U, iob 1.5 -1.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d34c068a7775b341aef8ba4\", \"created_at\": \"2019-07-21T19:05:29+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 226, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8bba\", \"created_at\": \"2019-07-21T18:58:52+02:00\",\n \"eventType\": \"Sensor Start\", \"key600\": \"MISC8169892F\", \"notes\": \"Sensor changed\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b6e\", \"created_at\": \"2019-07-21T18:16:30+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2, \"key600\": \"BOLUS81697F41\",\n \"notes\": \"carb 0g 0.0U, {75~100} 1.7U, iob 0.0 0.0U (12g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d34c068a7775b341aef8b82\", \"created_at\": \"2019-07-21T18:13:31+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME81697E8E\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose manual resume\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8ba6\", \"created_at\": \"2019-07-21T18:12:38+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 309, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b84\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T17:46:41+02:00\", \"duration\": 26, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND81697844\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b86\", \"created_at\": \"2019-07-21T17:23:32+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816972D7\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b88\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T16:24:56+02:00\", \"duration\": 58, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8169651B\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d34c068a7775b341aef8b8a\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-21T15:51:07+02:00\",\n \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL81695D2E\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b70\", \"created_at\": \"2019-07-21T15:21:05+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.875, \"key600\": \"BOLUS81695623\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.9U, iob 0.9 -0.9U (10g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d34c068a7775b341aef8b8c\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-21T15:20:35+02:00\", \"duration\": 10, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL81695605\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes * canceled, duration 10 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8ba8\", \"created_at\": \"2019-07-21T15:15:00+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 210, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b8e\", \"created_at\": \"2019-07-21T14:56:51+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME81695075\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose auto resume - preset glucose reached\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8baa\", \"created_at\": \"2019-07-21T14:22:43+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 125, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b90\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T14:21:50+02:00\", \"duration\": 35, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND81694840\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b72\", \"carbs\": 24, \"created_at\": \"2019-07-21T13:35:06+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.425, \"key600\": \"BOLUS81693D4C\",\n \"notes\": \"carb 24g 2.4U, {75~100} 0.3U, iob 2.3 -0.3U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d34c068a7775b341aef8bac\", \"created_at\": \"2019-07-21T13:34:43+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 141, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8bae\", \"created_at\": \"2019-07-21T13:22:37+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 148, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8bb0\", \"created_at\": \"2019-07-21T12:46:23+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 251, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8bb2\", \"created_at\": \"2019-07-21T12:20:13+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 297, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b74\", \"carbs\": 20, \"created_at\": \"2019-07-21T12:10:02+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.175, \"key600\": \"BOLUS8169295C\",\n \"notes\": \"carb 20g 2.2U, {75~100} 0.0U, iob 4.5 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d34c068a7775b341aef8b76\", \"carbs\": 25, \"created_at\": \"2019-07-21T11:53:32+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.725, \"key600\": \"BOLUS8169257E\",\n \"notes\": \"carb 25g 2.7U, {75~100} 0.0U, iob 2.2 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d34c068a7775b341aef8b92\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-21T11:53:13+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL8169256B\", \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b78\", \"created_at\": \"2019-07-21T11:20:15+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.5, \"key600\": \"BOLUS81691DB1\",\n \"notes\": \"carb 0g 0.0U, {75~100} 1.7U, iob 0.0 0.0U (9g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d34c068a7775b341aef8bb4\", \"created_at\": \"2019-07-21T11:19:19+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 309, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b94\", \"created_at\": \"2019-07-21T10:46:00+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816915AA\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b96\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T09:06:11+02:00\", \"duration\": 99, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8168FE44\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b98\", \"created_at\": \"2019-07-21T08:27:11+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8168F520\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose manual resume\", \"carbs\": None,\n \"insulin\": None}, {\"_id\": \"5d34c068a7775b341aef8b9a\", \"absolute\": 0,\n \"created_at\": \"2019-07-21T08:06:50+02:00\", \"duration\": 20,\n \"eventType\": \"Temp Basal\", \"key600\": \"SUSPEND8168F05B\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8b7a\", \"carbs\": 25, \"created_at\": \"2019-07-21T07:59:27+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 4.075, \"key600\": \"BOLUS8168EEA0\",\n \"notes\": \"carb 25g 4.1U, {75~100} 0.0U, iob 0.2 0.0U (6g/u, isf 125/u)\"},\n {\"_id\": \"5d33e78aa7775b341ae92c56\", \"created_at\": \"2019-07-21T06:18:04+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.4, \"key600\": \"BOLUS8168D6DD\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.4U, iob 0.0 0.0U (6g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d34c068a7775b341aef8bb6\", \"created_at\": \"2019-07-21T06:17:32+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 151, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d33853fa7775b341ae7572a\", \"created_at\": \"2019-07-20T23:17:31+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8168744E\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose manual resume\", \"carbs\": None,\n \"insulin\": None}, {\"_id\": \"5d33853fa7775b341ae7572c\", \"absolute\": 0,\n \"created_at\": \"2019-07-20T23:06:48+02:00\", \"duration\": 10,\n \"eventType\": \"Temp Basal\", \"key600\": \"SUSPEND816871CB\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d3379e6a7775b341ae6e566\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-20T22:09:13+02:00\", \"duration\": 20, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL8168644C\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes * canceled, duration 20 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d3374f0a7775b341ae6b696\", \"created_at\": \"2019-07-20T22:08:37+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.9, \"key600\": \"BOLUS81686428\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.8U, iob 0.0 0.0U (12g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d3374f0a7775b341ae6b698\", \"created_at\": \"2019-07-20T22:07:40+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 197, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d33725ca7775b341ae6a125\", \"created_at\": \"2019-07-20T21:32:08+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME81685B9B\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d33725ca7775b341ae6a127\", \"absolute\": 0,\n \"created_at\": \"2019-07-20T20:28:31+02:00\", \"duration\": 63, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND81684CB2\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d33725ca7775b341ae6a129\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-20T19:27:10+02:00\",\n \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL81683E51\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d334c66a7775b341ae5cb3e\", \"carbs\": 55, \"created_at\": \"2019-07-20T18:48:31+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 4.575, \"key600\": \"BOLUS81683542\",\n \"notes\": \"carb 55g 4.6U, {75~100} 0.0U, iob 0.7 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d333948a7775b341ae583e1\", \"created_at\": \"2019-07-20T17:51:48+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.825, \"key600\": \"BOLUS816827F7\",\n \"notes\": \"carb 0g 0.0U, {75~100} 1.2U, iob 0.4 -0.4U (12g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d334c66a7775b341ae5cb40\", \"created_at\": \"2019-07-20T17:49:49+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 250, \"glucoseType\": \"Finger\", \"key600\": \"BG81682780\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333948a7775b341ae583e3\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-20T17:15:30+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL81681F75\", \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c68\", \"created_at\": \"2019-07-20T16:43:39+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.525, \"key600\": \"BOLUS816817FE\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.8U, iob 0.3 -0.3U (10g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d333257a7775b341ae56c8c\", \"created_at\": \"2019-07-20T16:43:38+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 204, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c7a\", \"created_at\": \"2019-07-20T16:43:05+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816817DC\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d34c068a7775b341aef8bbc\", \"created_at\": \"2019-07-20T16:41:30+02:00\",\n \"eventType\": \"Site Change\", \"key600\": \"MISC8168177D\", \"notes\": \"Reservoir changed\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d333257a7775b341ae56c7c\", \"absolute\": 0,\n \"created_at\": \"2019-07-20T16:40:57+02:00\", \"duration\": 2,\n \"eventType\": \"Temp Basal\", \"key600\": \"SUSPEND8168175C\",\n \"notes\": \"Pump suspended insulin delivery: Set change suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c6a\", \"created_at\": \"2019-07-20T15:27:51+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.5, \"key600\": \"BOLUS8168063A\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.4U, iob 1.8 -0.4U (10g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d333257a7775b341ae56c8e\", \"created_at\": \"2019-07-20T15:27:49+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 155, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c6c\", \"carbs\": 20, \"created_at\": \"2019-07-20T13:25:20+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.025, \"key600\": \"BOLUS8167E983\",\n \"notes\": \"carb 20g 2.0U, {75~100} 0.0U, iob 7.2 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d333257a7775b341ae56c6e\", \"carbs\": 65, \"created_at\": \"2019-07-20T13:16:10+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 6.625, \"key600\": \"BOLUS8167E75D\",\n \"notes\": \"carb 65g 6.6U, {75~100} 0.0U, iob 0.7 0.0U (10g/u, isf 125/u)\"},\n {\"_id\": \"5d333257a7775b341ae56c7e\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-20T12:57:54+02:00\", \"duration\": 17, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL8167E315\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes * canceled, duration 17 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c70\", \"created_at\": \"2019-07-20T12:52:12+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.7, \"key600\": \"BOLUS8167E1BF\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.6U, iob 0.0 0.0U (9g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d333257a7775b341ae56c90\", \"created_at\": \"2019-07-20T12:52:11+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 176, \"glucoseType\": \"Finger\", \"key600\": \"BG8167E1BE\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c80\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-20T12:12:19+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL8167D866\", \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c82\", \"created_at\": \"2019-07-20T12:02:34+02:00\",\n \"duration\": 9, \"eventType\": \"Temp Basal\", \"key600\": \"BASAL8167D61D\",\n \"notes\": \"Temp Basal: 200%, duration 30 minutes * canceled, duration 9 minutes\",\n \"percent\": 100, \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c84\", \"created_at\": \"2019-07-20T10:46:46+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8167C458\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose auto resume - preset glucose reached\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d333257a7775b341ae56c86\", \"absolute\": 0,\n \"created_at\": \"2019-07-20T10:11:55+02:00\",\n \"duration\": 34, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8167BC2D\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c72\", \"carbs\": 20, \"created_at\": \"2019-07-20T08:33:11+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.175, \"key600\": \"BOLUS8167A509\",\n \"notes\": \"carb 20g 2.2U, {75~100} 0.0U, iob 1.7 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d333257a7775b341ae56c74\", \"carbs\": 15, \"created_at\": \"2019-07-20T08:06:45+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1.625, \"key600\": \"BOLUS81679ED7\",\n \"notes\": \"carb 15g 1.6U, {75~100} 0.0U, iob 0.2 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d333257a7775b341ae56c76\", \"created_at\": \"2019-07-20T07:51:56+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.2, \"key600\": \"BOLUS81679B5E\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.0U, iob 0.0 0.0U (6g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d333257a7775b341ae56c88\", \"created_at\": \"2019-07-20T07:45:16+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816799CE\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c8a\", \"absolute\": 0,\n \"created_at\": \"2019-07-20T07:16:39+02:00\", \"duration\": 28, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND81679319\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d333257a7775b341ae56c92\", \"created_at\": \"2019-07-20T06:54:23+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 132, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d323257a7775b341ae1ef02\", \"created_at\": \"2019-07-19T22:02:40+02:00\",\n \"duration\": 30, \"eventType\": \"Temp Basal\", \"key600\": \"BASAL81671141\",\n \"notes\": \"Temp Basal: 155%, duration 30 minutes\", \"percent\": 55, \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d323257a7775b341ae1ef00\", \"carbs\": 10, \"created_at\": \"2019-07-19T21:52:50+02:00\",\n \"duration\": 78, \"enteredinsulin\": \"0.3\", \"eventType\": \"Combo Bolus\",\n \"key600\": \"BOLUS81670EF4\",\n \"notes\": \"Square Bolus: 0.825U, duration 210 minutes * ended before expected duration, square delivered 0.3U in 78 minutes : carb 10g 0.8U, {75~100} 0.0U, iob 0.2 0.0U (12g/u, isf 125/u)\",\n \"relative\": 2, \"splitExt\": \"100\", \"splitNow\": \"0\", \"insulin\": None},\n {\"_id\": \"5d3219dfa7775b341ae1a20d\", \"absolute\": 2,\n \"created_at\": \"2019-07-19T21:02:20+02:00\", \"duration\": 21, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL8167031D\",\n \"notes\": \"Temp Basal: 2.0U, duration 30 minutes * canceled, duration 21 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d3219dfa7775b341ae1a20b\", \"carbs\": 22, \"created_at\": \"2019-07-19T20:58:32+02:00\",\n \"duration\": 25, \"enteredinsulin\": \"0.175\", \"eventType\": \"Combo Bolus\",\n \"key600\": \"BOLUS81670239\",\n \"notes\": \"Square Bolus: 1.825U, duration 255 minutes * ended before expected duration, square delivered 0.175U in 25 minutes : carb 22g 1.8U, {75~100} 0.0U, iob 0.8 0.0U (12g/u, isf 125/u)\",\n \"relative\": 2, \"splitExt\": \"100\", \"splitNow\": \"0\", \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192cb\", \"carbs\": 25, \"created_at\": \"2019-07-19T20:58:04+02:00\",\n \"duration\": 0, \"enteredinsulin\": \"0.0\", \"eventType\": \"Combo Bolus\",\n \"key600\": \"BOLUS8167021D\",\n \"notes\": \"Square Bolus: 2.075U, duration 30 minutes * ended before expected duration, square delivered 0.0U in 0 minutes : carb 25g 2.1U, {75~100} 0.0U, iob 0.9 0.0U (12g/u, isf 125/u)\",\n \"relative\": 2, \"splitExt\": \"100\", \"splitNow\": \"0\", \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192df\", \"created_at\": \"2019-07-19T20:53:02+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816700EF\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose manual resume\", \"carbs\": None,\n \"insulin\": None}, {\"_id\": \"5d321569a7775b341ae192e1\", \"absolute\": 0,\n \"created_at\": \"2019-07-19T20:36:43+02:00\", \"duration\": 16,\n \"eventType\": \"Temp Basal\", \"key600\": \"SUSPEND8166FD1C\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192ff\", \"created_at\": \"2019-07-19T20:12:56+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 116, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae19301\", \"created_at\": \"2019-07-19T19:16:01+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 98, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192cd\", \"carbs\": 45, \"created_at\": \"2019-07-19T18:45:30+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 3.75, \"key600\": \"BOLUS8166E30B\",\n \"notes\": \"carb 45g 3.8U, {75~100} 0.0U, iob 4.2 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae192cf\", \"carbs\": 20, \"created_at\": \"2019-07-19T18:16:11+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1.65, \"key600\": \"BOLUS8166DC2C\",\n \"notes\": \"carb 20g 1.7U, {75~100} 0.0U, iob 3.4 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae192e3\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-19T18:15:57+02:00\", \"duration\": 30, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL8166DC1E\", \"notes\": \"Temp Basal: 3.3U, duration 30 minutes\", \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192d1\", \"carbs\": 25, \"created_at\": \"2019-07-19T17:57:41+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.075, \"key600\": \"BOLUS8166D7D6\",\n \"notes\": \"carb 25g 2.1U, {75~100} 0.0U, iob 1.6 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae192d3\", \"carbs\": 20, \"created_at\": \"2019-07-19T17:39:24+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1.65, \"key600\": \"BOLUS8166D38D\",\n \"notes\": \"carb 20g 1.7U, {75~100} 0.0U, iob 0.0 0.0U (12g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae192e5\", \"created_at\": \"2019-07-19T17:25:32+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8166D04D\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192e7\", \"absolute\": 0,\n \"created_at\": \"2019-07-19T15:38:21+02:00\", \"duration\": 107, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8166B72E\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192e9\", \"created_at\": \"2019-07-19T15:06:50+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME8166AFCA\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose auto resume - preset glucose reached\",\n \"carbs\": None, \"insulin\": None}, {\"_id\": \"5d321569a7775b341ae192eb\", \"absolute\": 0,\n \"created_at\": \"2019-07-19T14:11:54+02:00\",\n \"duration\": 54, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND8166A2EA\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192ed\", \"created_at\": \"2019-07-19T13:38:35+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME81669B1B\",\n \"notes\": \"Pump resumed insulin delivery: Low glucose manual resume\", \"carbs\": None,\n \"insulin\": None}, {\"_id\": \"5d321569a7775b341ae192ef\", \"absolute\": 0,\n \"created_at\": \"2019-07-19T13:16:51+02:00\", \"duration\": 21,\n \"eventType\": \"Temp Basal\", \"key600\": \"SUSPEND81669603\",\n \"notes\": \"Pump suspended insulin delivery: Predicted low glucose suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192d5\", \"carbs\": 50, \"created_at\": \"2019-07-19T12:13:54+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 5.475, \"key600\": \"BOLUS81668742\",\n \"notes\": \"carb 50g 5.5U, {75~100} 0.0U, iob 2.6 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae192f1\", \"created_at\": \"2019-07-19T11:34:09+02:00\",\n \"duration\": 30, \"eventType\": \"Temp Basal\", \"key600\": \"BASAL81667DF1\",\n \"notes\": \"Temp Basal: 200%, duration 30 minutes\", \"percent\": 100, \"carbs\": None,\n \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192d7\", \"carbs\": 25, \"created_at\": \"2019-07-19T11:33:53+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.725, \"key600\": \"BOLUS81667DE1\",\n \"notes\": \"carb 25g 2.7U, {75~100} 0.0U, iob 0.5 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae192d9\", \"created_at\": \"2019-07-19T11:08:33+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.55, \"key600\": \"BOLUS816677F1\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.6U, iob 0.2 -0.2U (9g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d321569a7775b341ae19303\", \"created_at\": \"2019-07-19T11:08:31+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 171, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192f3\", \"absolute\": 3.3,\n \"created_at\": \"2019-07-19T11:01:28+02:00\", \"duration\": 6, \"eventType\": \"Temp Basal\",\n \"key600\": \"BASAL81667648\",\n \"notes\": \"Temp Basal: 3.3U, duration 30 minutes * canceled, duration 6 minutes\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192f5\", \"created_at\": \"2019-07-19T10:52:31+02:00\",\n \"duration\": 8, \"eventType\": \"Temp Basal\", \"key600\": \"BASAL8166742F\",\n \"notes\": \"Temp Basal: 200%, duration 30 minutes * canceled, duration 8 minutes\",\n \"percent\": 100, \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192f7\", \"created_at\": \"2019-07-19T10:37:38+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME816670B2\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192f9\", \"absolute\": 0,\n \"created_at\": \"2019-07-19T08:38:45+02:00\", \"duration\": 118, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND816654D4\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192db\", \"carbs\": 27, \"created_at\": \"2019-07-19T08:20:16+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 2.95, \"key600\": \"BOLUS8166507F\",\n \"notes\": \"carb 27g 3.0U, {75~100} 0.0U, iob 0.0 0.0U (9g/u, isf 125/u)\"},\n {\"_id\": \"5d321569a7775b341ae19305\", \"created_at\": \"2019-07-19T07:55:46+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 106, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192fb\", \"created_at\": \"2019-07-19T07:24:54+02:00\",\n \"duration\": 0, \"eventType\": \"Temp Basal\", \"key600\": \"RESUME81664385\",\n \"notes\": \"Pump resumed insulin delivery: User resumed\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d321569a7775b341ae192fd\", \"absolute\": 0,\n \"created_at\": \"2019-07-19T06:35:27+02:00\", \"duration\": 49, \"eventType\": \"Temp Basal\",\n \"key600\": \"SUSPEND816637F2\", \"notes\": \"Pump suspended insulin delivery: User suspend\",\n \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d3109faa7775b341ade1a0b\", \"created_at\": \"2019-07-19T02:06:51+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.4, \"key600\": \"BOLUS8165F8FE\",\n \"notes\": \"carb 0g 0.0U, {75~100} 0.7U, iob 0.0 0.0U (10g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d3109faa7775b341ade1a0e\", \"created_at\": \"2019-07-19T02:06:50+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 190, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d30daf5a7775b341add8351\", \"created_at\": \"2019-07-18T22:44:51+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 0.8, \"key600\": \"BOLUS8165C9A6\",\n \"notes\": \"carb 0g 0.0U, {75~100} 1.9U, iob 0.8 -0.8U (12g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d30da18a7775b341add8023\", \"created_at\": \"2019-07-18T22:39:40+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 335, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None},\n {\"_id\": \"5d30d44ca7775b341add6d0a\", \"created_at\": \"2019-07-18T21:30:36+02:00\",\n \"eventType\": \"Meal Bolus\", \"insulin\": 1.4, \"key600\": \"BOLUS8165B83F\",\n \"notes\": \"carb 0g 0.0U, {75~100} 1.3U, iob 1.0 -1.0U (12g/u, isf 125/u)\", \"carbs\": None},\n {\"_id\": \"5d30d44ca7775b341add6d31\", \"created_at\": \"2019-07-18T21:30:03+02:00\",\n \"eventType\": \"BG Check\", \"glucose\": 267, \"glucoseType\": \"Finger\", \"key600\": \"<KEY>\",\n \"units\": \"mg/dl\", \"carbs\": None, \"insulin\": None}], status=200)\n response = requests.get(\"https://benc.com/api/v1/treatments\")\n process_nightscouts_api_response(response)\n self.assertEqual(InfusionChanged.objects.get(id=1), InfusionChanged(date=\"2019-07-21T20:30:40+02:00\", id=1))\n self.assertEqual(SensorChanged.objects.get(id=1), SensorChanged(date=\"2019-07-21T18:58:52+02:00\", id=1))\n\n @override_settings(SENSOR_ALERT_FREQUENCY=24)\n def test_calculate_sensor(self):\n date = datetime.now(timezone.utc) - timedelta(hours=10)\n sensor_remains = calculate_sensor(date)\n self.assertEqual(sensor_remains, timedelta(hours=14))\n\n @override_settings(INFUSION_SET_ALERT_FREQUENCY=48)\n def test_calculate_infusion(self):\n date = datetime.now(timezone.utc) - timedelta(hours=10)\n sensor_remains = calculate_infusion(date)\n self.assertEqual(sensor_remains, timedelta(hours=38))\n\n def test_get_trigger_model(self):\n model = get_trigger_model()\n self.assertEqual(model.time, time(16))\n TriggerTime.objects.update(id=1, time=time(18))\n model = get_trigger_model()\n self.assertEqual(model.time, time(18))\n\n def test_not_today(self):\n self.assertTrue(not_today())\n LastTriggerSet.objects.update_or_create(id=1, defaults={\"date\": datetime.now().date()})\n self.assertFalse(not_today())\n\n def test_update_last_trigger_set(self):\n update_last_triggerset()\n self.assertFalse(not_today()) # tested already above\n LastTriggerSet.objects.update_or_create(id=1, defaults={\"date\": datetime.now().date() - timedelta(days=7)})\n self.assertTrue(not_today())\n update_last_triggerset()\n self.assertFalse(not_today())\n\n @override_settings(LANGUAGE_CODE=\"en\")\n def test_get_sms_txt_sensor(self):\n text = get_sms_txt_sensor(timedelta(days=1, hours=2))\n self.assertEqual(text, \"\\n\\n Your CGM sensor should be changed in 1 days and 2 hours.\")\n text = get_sms_txt_sensor(timedelta(days=-1, hours=2))\n self.assertEqual(text, \"\\n\\n Your CGM sensor change has already passed\")\n\n @override_settings(LANGUAGE_CODE=\"en\")\n def test_get_sms_txt_infusion_set(self):\n text = get_sms_txt_infusion_set(timedelta(days=1, hours=2))\n self.assertEqual(text, \".\\n\\n Your infusion set should be changed in 1 days and 2 hours.\")\n text = get_sms_txt_infusion_set(timedelta(days=-1, hours=2))\n self.assertEqual(text, \".\\n\\n Your infusion set change has already passed\")\n\n\n", "id": "9025308", "language": "Python", "matching_score": 3.624058723449707, "max_stars_count": 0, "path": "remider/tests/test_data_processing.py" }, { "content": "from django.contrib import admin\n\nfrom .models import InfusionChanged, SensorChanged, LastTriggerSet\n\nadmin.site.register(InfusionChanged)\nadmin.site.register(SensorChanged)\nadmin.site.register(LastTriggerSet)\n", "id": "7880385", "language": "Python", "matching_score": 1.5792425870895386, "max_stars_count": 2, "path": "remider/admin.py" }, { "content": "from django.db import models\n\n\nclass InfusionChanged(models.Model):\n \"\"\" model for saving last change of insufion set in database \"\"\"\n date = models.DateTimeField()\n\n\nclass SensorChanged(models.Model):\n \"\"\" model for saving last change of CGM sensor in database \"\"\"\n date = models.DateTimeField()\n\n\nclass LastTriggerSet(models.Model):\n \"\"\" model for avoiding triggers duplicates \"\"\"\n\n date = models.DateField()\n\n\nclass TriggerTime(models.Model):\n \"\"\" model for saving waking up app time \"\"\"\n time = models.TimeField()\n", "id": "11435008", "language": "Python", "matching_score": 1.9052674770355225, "max_stars_count": 2, "path": "remider/models.py" }, { "content": "# Generated by Django 2.1.7 on 2019-05-29 16:02\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('remider', '0002_lasttriggerset'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TriggerTime',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('time', models.TimeField()),\n ],\n ),\n ]\n", "id": "8085449", "language": "Python", "matching_score": 0.34754815697669983, "max_stars_count": 2, "path": "remider/migrations/0003_triggertime.py" }, { "content": "from django.contrib.auth.management.commands import createsuperuser\nfrom django.utils.translation import ugettext as _\n\nfrom django.conf import settings\n\n\nclass Command(createsuperuser.Command):\n \"\"\"\n command for creating custom superuser\n \"\"\"\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.HTTP_INFO(_(\"creating admin ...\")))\n\n options.setdefault('interactive', False)\n database = options.get('database')\n user_data = {\n 'username': settings.APP_NAME,\n 'password': <PASSWORD>,\n 'email': \"\",\n }\n self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)\n\n if options.get('verbosity', 0) >= 1:\n self.stdout.write(_(\"Superuser created successfully.\"))\n", "id": "9579398", "language": "Python", "matching_score": 3.67997670173645, "max_stars_count": 2, "path": "remider/management/commands/create_default_admin.py" }, { "content": "import requests\nfrom django.core.management.base import BaseCommand\nfrom django.utils.translation import ugettext as _\nfrom django.conf import settings\n\n\nclass Command(BaseCommand):\n \"\"\"\n command for triggering our site\n \"\"\"\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.HTTP_INFO(_(\"waking up ...\")))\n requests.get(\"https://{1}.herokuapp.com/reminder/?key={0}\".format(settings.SECRET_KEY, settings.APP_NAME))\n self.stdout.write(self.style.SUCCESS(_(\"website successfully woke up\")))\n", "id": "5950558", "language": "Python", "matching_score": 1.4136441946029663, "max_stars_count": 2, "path": "remider/management/commands/wake_up.py" }, { "content": "from functools import wraps\n\nfrom django.conf import settings\nfrom django.http import HttpResponseForbidden\nfrom django.utils.translation import LANGUAGE_SESSION_KEY\n\n\ndef secret_key_required(view_func):\n \"\"\" authorization decorator \"\"\"\n\n @wraps(view_func)\n def _required(request, *args, **kwargs):\n their_key = request.GET.get(\"key\", \"\")\n if their_key == settings.SECRET_KEY:\n return view_func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n\n return _required\n\n\ndef set_language_to_LANGUAGE_CODE(view_func):\n \"\"\" setting language to LANGUAGE_CODE decorator \"\"\"\n\n @wraps(view_func)\n def _set(request, *args, **kwargs):\n request.session[LANGUAGE_SESSION_KEY] = settings.LANGUAGE_CODE\n return view_func(request, *args, **kwargs)\n\n return _set\n", "id": "8129173", "language": "Python", "matching_score": 1.3629945516586304, "max_stars_count": 2, "path": "remider/decorators.py" }, { "content": "from django.urls import re_path\nfrom django.views.generic import TemplateView\n\nfrom .decorators import secret_key_required, set_language_to_LANGUAGE_CODE\nfrom .views import reminder_and_notifier_view, file_view, auth_view, upload_view, ManagePhoneNumbersView, \\\n number_delete_view, MenuView, quiet_checkup_view, NotificationsCenterView, ManageIFTTTMakersView, ifttt_delete_view\n\nurlpatterns = [\n re_path(r\"^$\", set_language_to_LANGUAGE_CODE(TemplateView.as_view(template_name=\"remider/home.html\")), name=\"home\"),\n re_path(r\"^reminder/$\", reminder_and_notifier_view, name=\"reminder\"),\n re_path(r\"^ATriggerVerify.txt$\", file_view, name=\"atriggerfile\"),\n re_path(r\"^auth/$\", auth_view, name='get_secret'),\n re_path(r\"^menu/$\", secret_key_required(set_language_to_LANGUAGE_CODE(MenuView.as_view())), name='menu'),\n re_path(r\"^upload/$\", upload_view, name=\"upload\"),\n re_path(r\"^phonenumbers/$\", secret_key_required(set_language_to_LANGUAGE_CODE(ManagePhoneNumbersView.as_view())),\n name=\"manage_ph_numbers\"),\n re_path(r\"^deletephonenumber/(?P<number_id>[0-9]+)/$\", number_delete_view, name=\"del-ph\"),\n re_path(r\"^reminder/quiet/$\", quiet_checkup_view, name=\"quiet\"),\n re_path(r\"^notifications-center/$\",\n secret_key_required(set_language_to_LANGUAGE_CODE(NotificationsCenterView.as_view())), name=\"notif-center\"),\n re_path(r\"^iftttmakers/$\", secret_key_required(set_language_to_LANGUAGE_CODE(ManageIFTTTMakersView.as_view())),\n name='manage_ifttt_makers'),\n re_path(r\"^deletemaker/(?P<maker_id>[0-9]+)/$\", ifttt_delete_view, name=\"del-ifttt\"),\n\n]\n", "id": "2432032", "language": "Python", "matching_score": 0.26131436228752136, "max_stars_count": 2, "path": "remider/urls.py" }, { "content": "import os\n\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\n\n\nclass OverwriteStorage(FileSystemStorage):\n '''\n File system storage which overwrite files if names are duplicated\n '''\n\n def get_available_name(self, name, max_length=None):\n if self.exists(name):\n os.remove(os.path.join(settings.STATIC_ROOT, \"uplouded\", name))\n return super().get_available_name(name, max_length)\n", "id": "1205312", "language": "Python", "matching_score": 1.210996150970459, "max_stars_count": 2, "path": "remider/storage.py" }, { "content": "from .base import FunctionalTest\nfrom django.shortcuts import reverse\nfrom django.conf import settings\nfrom django.test import override_settings\n\nimport os.path\n\n\nclass UploadingTest(FunctionalTest):\n def setUp(self):\n super().setUp()\n try:\n with open(os.path.join(settings.STATIC_ROOT, \"uplouded\", \"ATriggerVerify.txt\")) as file:\n self.file = file.read()\n except FileNotFoundError:\n self.file = False\n\n def tearDown(self):\n super().tearDown()\n if self.file:\n with open(os.path.join(settings.STATIC_ROOT, \"uplouded\", \"ATriggerVerify.txt\"), \"w+\") as file:\n file.write(str(self.file))\n\n @override_settings(DEBUG=True, SECRET_KEY=\"mycoolsecretkey\")\n def test_templates(self):\n response = self.client.get(reverse(\"upload\") + \"?key=mycoolsecretkey\")\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \"remider/upload.html\")\n\n def uplouding_test(self, file_path):\n self.browser.get(self.live_server_url + reverse(\"menu\") + \"?key=mycoolsecretkey\")\n self.browser.find_element_by_id(\"upload_button\").click()\n self.wait_and_assertUrlNow(\"upload\")\n input = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_file\"))\n input.send_keys(file_path)\n self.browser.find_element_by_id(\"upload_button\").click()\n self.wait_and_assertUrlNow(\"menu\", extras=\"&info=1\")\n\n with open(file_path, \"rb\") as file:\n file = file.read()\n response = self.client.get(reverse(\"atriggerfile\"))\n self.assertContains(response, file)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\", LANGUAGE_CODE='en', app_name=\"benc-test\", DEBUG=True)\n def test_uploading(self):\n self.uplouding_test(file_path=os.path.join(settings.BASE_DIR, \"remider\", \"tests\", \"ATriggerVerify.txt\"))\n self.uplouding_test(file_path=os.path.join(settings.BASE_DIR, \"remider\", \"tests\", \"ATriggerVerify2.txt\"))\n", "id": "834959", "language": "Python", "matching_score": 5.537879467010498, "max_stars_count": 2, "path": "remider/tests/test_functional_uploading.py" }, { "content": "from .base import FunctionalTest, check_internet_connection\nfrom django.shortcuts import reverse\nfrom django.test import override_settings\nfrom selenium.webdriver.common.keys import Keys\n\nfrom unittest import skipIf\n\n\nclass LoggingTest(FunctionalTest):\n\n @skipIf(not check_internet_connection(), \"internet disconnect\")\n @override_settings(SECRET_KEY=\"mycoolsecretkey\", LANGUAGE_CODE='en', app_name=\"benc-test\", DEBUG=True)\n def test_google_redirect(self):\n self.browser.get(self.live_server_url)\n self.wait_for_finding(lambda: self.browser.find_element_by_id(\"OK-btn\")).click()\n self.wait_and_assertUrlNow(\"https://www.google.com/\")\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\", LANGUAGE_CODE='en', app_name=\"benc-test\", DEBUG=False)\n def test_logging(self):\n self.browser.get(self.live_server_url)\n self.wait_for_finding(lambda: self.browser.find_element_by_id(\"continue_buton\")).click()\n self.assertUrlNow(url=\"get_secret\")\n input = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_apisecret\"))\n\n input.send_keys(\"mycoolsecretkey\")\n input.send_keys(Keys.ENTER)\n self.wait_and_assertUrlNow(url=\"menu\")\n\n self.browser.get(self.live_server_url + reverse(\"get_secret\"))\n input = self.wait_for_finding(lambda: self.browser.find_element_by_id(\"id_apisecret\"))\n\n input.send_keys(\"mynotcoolsecretkey\")\n input.send_keys(Keys.ENTER)\n self.wait_and_assertUrlNow(url=\"menu\", status=403)\n", "id": "3152631", "language": "Python", "matching_score": 3.739417791366577, "max_stars_count": 2, "path": "remider/tests/test_functional_logging.py" }, { "content": "from .base import FunctionalTest, check_internet_connection\nfrom django.shortcuts import reverse\nfrom django.test import override_settings\nfrom unittest import skipIf\n\n\nclass CSSTests(FunctionalTest):\n\n @skipIf(not check_internet_connection(), \"no internet connection\")\n def test_home_view(self):\n self.browser.get(self.live_server_url)\n self.browser.set_window_size(1024, 768)\n hello_world = self.browser.find_element_by_css_selector(\".display-4\")\n self.assertAlmostEqual(\n hello_world.location['x'],\n 73,\n delta=20\n )\n\n @skipIf(not check_internet_connection(), \"no internet connection\")\n def test_auth_view(self):\n self.browser.get(self.live_server_url + reverse(\"get_secret\"))\n self.browser.set_window_size(1024, 768)\n form = self.browser.find_element_by_css_selector(\"form\")\n self.assertAlmostEqual(\n form.location['x'],\n 41,\n delta=20\n )\n\n @skipIf(not check_internet_connection(), \"no internet connection\")\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_menu_view(self):\n self.browser.get(self.live_server_url + reverse(\"menu\")+\"?key=mycoolsecretkey\")\n self.browser.set_window_size(1024, 768)\n settings = self.browser.find_element_by_css_selector(\".display-4\")\n self.assertAlmostEqual(\n settings.location['x'],\n 32,\n delta=20\n )\n\n @skipIf(not check_internet_connection(), \"no internet connection\")\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_notif_center_view(self):\n self.browser.get(self.live_server_url + reverse(\"notif-center\") + \"?key=mycoolsecretkey\")\n self.browser.set_window_size(1024, 768)\n settings = self.browser.find_element_by_css_selector(\".btn-info\")\n self.assertAlmostEqual(\n settings.location['x'],\n 43,\n delta=20\n )\n", "id": "8847809", "language": "Python", "matching_score": 2.660398006439209, "max_stars_count": 2, "path": "remider/tests/test_layout_and_styling.py" }, { "content": "from django.test import TestCase, override_settings\nfrom django.shortcuts import reverse\nfrom django.conf import settings\n\nfrom ..forms import GetSecretForm, TriggerTimeForm, ChangeEnvVariableForm, ChooseLanguageForm, \\\n ChooseNotificationsWayForm\n\n\nclass HomeViewTests(TestCase):\n def test_template_loading(self):\n response = self.client.get(reverse(\"home\"))\n self.assertTemplateUsed(response, \"remider/home.html\")\n self.assertEqual(response.status_code, 200)\n\n\nclass AuthViewTests(TestCase):\n def test_template_loading(self):\n response = self.client.get(reverse(\"get_secret\"))\n self.assertTemplateUsed(response, \"remider/auth.html\")\n self.assertEqual(response.status_code, 200)\n\n def test_displays_proper_form(self):\n response = self.client.get(reverse(\"get_secret\"))\n self.assertIsInstance(response.context['form'], GetSecretForm)\n self.assertContains(response, 'type=\"password\"')\n self.assertContains(response, 'type=\"submit\"')\n\n\nclass MenuViewTests(TestCase):\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_template_loading(self):\n response = self.client.get(reverse(\"menu\") + \"?key=mycoolsecretkey\")\n self.assertTemplateUsed(response, \"remider/menu.html\")\n self.assertEqual(response.status_code, 200)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_template_contex(self):\n response = self.client.get(reverse(\"menu\") + \"?key=mycoolsecretkey\")\n self.assertEqual(response.context[\"SECRET_KEY\"], \"mycoolsecretkey\")\n self.assertEqual(response.context[\"info\"], False)\n self.assertEqual(response.context[\"info2\"], False)\n response = self.client.get(reverse(\"menu\") + \"?key=mycoolsecretkey&info=1\")\n self.assertEqual(response.context[\"info\"], True)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_proper_forms(self):\n response = self.client.get(reverse(\"menu\") + \"?key=mycoolsecretkey\")\n self.assertIsInstance(response.context[\"language_form\"], ChooseLanguageForm)\n self.assertIsInstance(response.context[\"time_form\"], TriggerTimeForm)\n forms = (\n (\"NIGHTSCOUT_LINK\", \"ns_link_button\", settings.NIGTSCOUT_LINK),\n (\"INFUSION_SET_ALERT_FREQUENCY\", \"infusion_freq_button\", settings.INFUSION_SET_ALERT_FREQUENCY),\n (\"SENSOR_ALERT_FREQUENCY\", \"sensor_freq_button\", settings.SENSOR_ALERT_FREQUENCY),\n (\"ATRIGGER_KEY\", \"atrigger_key_button\", settings.ATRIGGER_KEY),\n (\"ATRIGGER_SECRET\", \"atrigger_secret_button\", settings.ATRIGGER_SECRET),\n (\"TWILIO_ACCOUNT_SID\", \"twilio_sid_button\", settings.TWILIO_ACCOUNT_SID),\n (\"TWILIO_AUTH_TOKEN\", \"twilio_token_button\", settings.TWILIO_AUTH_TOKEN),\n )\n for indx, form in enumerate(response.context[\"forms_list\"]):\n self.assertIsInstance(form, ChangeEnvVariableForm)\n self.assertEqual(form.button_name, forms[indx][1])\n self.assertEqual(form.fields['new_value'].label, forms[indx][0])\n self.assertEqual(form.fields['new_value'].initial, forms[indx][2])\n\n\nclass NotificationCenterViewTests(TestCase):\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_template_loading(self):\n response = self.client.get(reverse(\"notif-center\") + \"?key=mycoolsecretkey\")\n self.assertTemplateUsed(response, \"remider/notifications.html\")\n self.assertEqual(response.status_code, 200)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_template_context(self):\n response = self.client.get(reverse(\"notif-center\") + \"?key=mycoolsecretkey\")\n self.assertEqual(response.context[\"SECRET_KEY\"], \"mycoolsecretkey\")\n self.assertEqual(response.context[\"trig_info\"], True)\n self.assertEqual(response.context[\"sms_info\"], True)\n\n @override_settings(SECRET_KEY=\"mycoolsecretkey\")\n def test_displays_proper_form(self):\n response = self.client.get(reverse(\"notif-center\") + \"?key=mycoolsecretkey\")\n self.assertIsInstance(response.context['form'], ChooseNotificationsWayForm)\n self.assertContains(response, 'type=\"checkbox\"')\n self.assertContains(response, 'type=\"submit\"')\n", "id": "4337377", "language": "Python", "matching_score": 3.2169835567474365, "max_stars_count": 2, "path": "remider/tests/test_views.py" }, { "content": "from bootstrap_datepicker_plus import TimePickerInput\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import TriggerTime\n\n\nclass GetSecretForm(forms.Form):\n \"\"\" authorization form \"\"\"\n apisecret = forms.CharField(required=True, widget=forms.PasswordInput, label='SECRET_KEY')\n\n\nclass FileUploudForm(forms.Form):\n \"\"\" form for uplouding files \"\"\"\n file = forms.FileField()\n\n\nclass ChangeEnvVariableForm(forms.Form):\n \"\"\" form for changing envinronment variables \"\"\"\n button_name = \"\"\n deletable = False\n new_value = forms.CharField(required=True)\n\n\nclass ChooseNotificationsWayForm(forms.Form):\n \"\"\" form for choosing notifications way \"\"\"\n ifttt_notifications = forms.BooleanField(required=False, label=_(\"TRIGGER IFTTT (SEND WEBHOOKS)\"))\n sms_notifications = forms.BooleanField(required=False, label=_(\"SEND SMS\"))\n\n\nclass ChooseLanguageForm(forms.Form):\n \"\"\" form for choosing your language \"\"\"\n language = forms.ChoiceField(required=True, choices=settings.LANGUAGES,\n label=_(\"LANGUAGE\"))\n\n\nclass TriggerTimeForm(forms.ModelForm):\n \"\"\" form for changing waking up time \"\"\"\n\n class Meta:\n model = TriggerTime\n fields = [\"time\"]\n labels = {\"time\": _(\"NOTIFICATION TIME. Please give UTC TIME\"), }\n widgets = {\"time\": TimePickerInput(), }\n", "id": "1884045", "language": "Python", "matching_score": 2.1701393127441406, "max_stars_count": 2, "path": "remider/forms.py" }, { "content": "import sys\nimport os.path\n\nimport requests\nfrom django.conf import settings\nfrom django.http import FileResponse\nfrom django.shortcuts import render, redirect, reverse\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView, FormView\n\nfrom .api_interactions import change_config_var, create_trigger, notify\nfrom .data_processing import process_nightscouts_api_response, calculate_infusion, calculate_sensor, \\\n get_sms_txt_infusion_set, get_sms_txt_sensor, get_trigger_model\nfrom .decorators import secret_key_required, set_language_to_LANGUAGE_CODE\nfrom .forms import ChangeEnvVariableForm, ChooseNotificationsWayForm, GetSecretForm, FileUploudForm, ChooseLanguageForm, \\\n TriggerTimeForm\nfrom .storage import OverwriteStorage\n\n\n@secret_key_required\n@set_language_to_LANGUAGE_CODE\ndef quiet_checkup_view(request):\n \"\"\"\n shows remaining time to next change (infusion set or CGM sensor)\n without sending notification\n \"\"\"\n return reminder_and_notifier_view(request, False)\n\n\n@secret_key_required\n@set_language_to_LANGUAGE_CODE\ndef reminder_and_notifier_view(request, send_notif=True):\n \"\"\"\n get latest infusion set or CGM sensor change date from Nightscout`s API\n saves it in database\n calculates next change date\n sends notification via sms\n \"\"\"\n\n response = requests.get(settings.NIGTSCOUT_LINK + \"/api/v1/treatments\")\n date, sensor_date = process_nightscouts_api_response(response)\n\n sms_text = \"\"\n\n try:\n infusion_time_remains = calculate_infusion(date)\n inf_text = get_sms_txt_infusion_set(infusion_time_remains)\n sms_text += inf_text\n\n except TypeError: # date is None\n inf_text = _(\".\\n\\nInfusion set: unsuccessful data reading\")\n sms_text += inf_text\n\n except Exception as error:\n print(error)\n sys.stdout.flush()\n inf_text = _(\".\\n\\n Infusion set: unsuccessful data processing\")\n sms_text += inf_text\n try:\n sensor_time_remains = calculate_sensor(sensor_date)\n sensor_text = get_sms_txt_sensor(sensor_time_remains)\n sms_text += sensor_text\n\n except TypeError: # sensor_date is None\n sensor_text = _('\\n\\nCGM sensor: unsuccessful data reading')\n sms_text += sensor_text\n\n except Exception as error:\n print(error)\n sys.stdout.flush()\n sensor_text = _(\"\\n\\nCGM sensor: unsuccessful data processing\")\n sms_text += sensor_text\n\n if send_notif:\n notify(sms_text)\n create_trigger()\n\n return render(request, \"remider/debug.html\",\n {\n \"inf_text\": inf_text[1:],\n \"sensor_text\": sensor_text,\n \"SECRET_KEY\": settings.SECRET_KEY,\n })\n\n\n@set_language_to_LANGUAGE_CODE\ndef file_view(request):\n \"\"\"\n view returns verification file for atrigger.com\n \"\"\"\n file = open(os.path.join(settings.STATIC_ROOT, \"uplouded\", \"ATriggerVerify.txt\"), \"rb\")\n return FileResponse(file)\n\n\n@set_language_to_LANGUAGE_CODE\ndef auth_view(request):\n \"\"\" authorization view via SECRET_KEY \"\"\"\n if request.method == \"POST\":\n form = GetSecretForm(request.POST)\n if form.is_valid():\n return redirect(\"/menu/?key={}\".format(form.cleaned_data['apisecret']))\n else:\n form = GetSecretForm()\n\n return render(request, \"remider/auth.html\", {\"form\": form})\n\n\nclass MenuView(TemplateView):\n \"\"\"\n menu view\n redirecting buttons and config variables control\n \"\"\"\n template_name = \"remider/menu.html\"\n\n forms_list = []\n forms = (\n (\"NIGHTSCOUT_LINK\", \"ns_link_button\", settings.NIGTSCOUT_LINK),\n (\"INFUSION_SET_ALERT_FREQUENCY\", \"infusion_freq_button\", settings.INFUSION_SET_ALERT_FREQUENCY),\n (\"SENSOR_ALERT_FREQUENCY\", \"sensor_freq_button\", settings.SENSOR_ALERT_FREQUENCY),\n (\"ATRIGGER_KEY\", \"atrigger_key_button\", settings.ATRIGGER_KEY),\n (\"ATRIGGER_SECRET\", \"atrigger_secret_button\", settings.ATRIGGER_SECRET),\n (\"TWILIO_ACCOUNT_SID\", \"twilio_sid_button\", settings.TWILIO_ACCOUNT_SID),\n (\"TWILIO_AUTH_TOKEN\", \"twilio_token_button\", settings.TWILIO_AUTH_TOKEN),\n )\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n POST method\n handles http`s POST request\n loads forms\n checks if they are submitted\n changes config variables\n \"\"\"\n self.info = False\n self.info2 = False\n self.forms_list = []\n self.forms_link_dict = {}\n post_data = request.POST or None\n\n if \"language_button\" in post_data:\n language_form = ChooseLanguageForm(post_data)\n else:\n language_form = ChooseLanguageForm()\n language_form.fields[\"language\"].initial = settings.LANGUAGE_CODE\n\n time_model = get_trigger_model()\n if \"time_button\" in post_data:\n time_form = TriggerTimeForm(post_data, instance=time_model)\n else:\n time_form = TriggerTimeForm(instance=time_model)\n\n for form_tuple in self.forms:\n form = self.create_changeenvvarform(form_tuple[1], form_tuple[0], form_tuple[2], post_data)\n self.forms_link_dict[form_tuple[0]] = form\n\n for form_tuple in self.forms:\n form = self.forms_link_dict[form_tuple[0]]\n if form.is_valid() and form_tuple[1] in post_data:\n form, self.info2 = self.save_changeenvvarform(form, form_tuple[0])\n\n if language_form.is_valid() and \"language_button\" in post_data:\n language_form, self.info2 = self.save_changeenvvarform(language_form, \"LANGUAGE_CODE\", \"language\")\n if time_form.is_valid() and \"time_button\" in post_data:\n time_form.save()\n contex = self.get_context_data(forms_list=self.forms_list, SECRET_KEY=settings.SECRET_KEY, info=self.info,\n info2=self.info2,\n language_form=language_form, time_form=time_form, )\n return self.render_to_response(contex)\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n GET method\n handles http`s GET request\n loads forms\n shows info about successful change\n \"\"\"\n language_form = ChooseLanguageForm()\n language_form.fields[\"language\"].initial = settings.LANGUAGE_CODE\n\n time_model = get_trigger_model()\n time_form = TriggerTimeForm(instance=time_model)\n\n self.forms_list = []\n self.info = bool(int(request.GET.get(\"info\", \"0\")))\n self.info2 = False\n\n for form_tuple in self.forms:\n self.create_changeenvvarform(form_tuple[1], form_tuple[0], form_tuple[2])\n\n contex = self.get_context_data(forms_list=self.forms_list, SECRET_KEY=settings.SECRET_KEY, info=self.info,\n info2=self.info2,\n language_form=language_form, time_form=time_form, )\n return self.render_to_response(contex)\n\n def create_changeenvvarform(self, button_name, label, default, post_data=()):\n \"\"\"\n creates form and adds it to forms_list\n :param button_name: string, unique button name\n :param label: string, field`s display name\n :param default: string, default value of form`s field\n :param post_data: request.POST or empty tuple\n :return: ready to use form\n \"\"\"\n if button_name in post_data:\n form = ChangeEnvVariableForm(post_data)\n else:\n form = ChangeEnvVariableForm()\n form.button_name = button_name\n form.fields['new_value'].label = label\n form.fields['new_value'].initial = default\n self.forms_list.append(form)\n return form\n\n def save_changeenvvarform(self, form, label, field_name=\"new_value\"):\n \"\"\"\n reads data from submitted form and changes config variables\n :param form: submitted form\n :param label: name of config variable\n :return: already used form and info about successful change\n \"\"\"\n var = form.cleaned_data[field_name]\n if change_config_var(label, var):\n info2 = (True, label)\n else:\n info2 = (False, \"unsuccess\")\n\n return form, info2\n\n\n@secret_key_required\n@set_language_to_LANGUAGE_CODE\ndef upload_view(request, location=os.path.join(settings.STATIC_ROOT, \"uplouded\")):\n \"\"\" allows user to upload verification file for atrigger.com \"\"\"\n if request.method == 'POST':\n form = FileUploudForm(request.POST, request.FILES)\n if form.is_valid():\n file = request.FILES['file']\n fs = OverwriteStorage(location=location)\n fs.save(\"ATriggerVerify.txt\", file)\n return redirect(\"/menu/?key={}&info={}\".format(settings.SECRET_KEY, \"1\"))\n else:\n form = FileUploudForm()\n return render(request, 'remider/upload.html', {'form': form, \"SECRET_KEY\": settings.SECRET_KEY, })\n\n\nclass ManagePhoneNumbersView(TemplateView):\n \"\"\"\n allows user to change, add or delete his phone numbers\n \"\"\"\n template_name = \"remider/manage_ph.html\"\n forms_list = []\n to_numbers_forms_list = {}\n info = (False, \"\")\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n GET method\n handles http`s GET request\n loads forms\n shows info about successful change\n \"\"\"\n self.to_numbers_forms_list = {}\n self.forms_list = []\n post_data = request.POST\n from_number_form = self.create_changeenvvarform('from_number_button', _(\"NUMBER OF SENDER\"),\n settings.FROM_NUMBER,\n post_data)\n\n for i, number in enumerate(settings.TO_NUMBERS):\n label = \"to_number_\" + str(i + 1)\n button_name = label + \"_button\"\n label_tag = _(\"DESTINATION NUMBER \") + str(i + 1) + \".\"\n form = self.create_changeenvvarform(button_name, label_tag, number, post_data)\n self.to_numbers_forms_list[label] = form\n next_number_id = len(settings.TO_NUMBERS) + 1\n new_number_form = self.create_changeenvvarform('new_number_button',\n _(\"DESTINATION NUMBER \") + str(next_number_id) + \".\", \"\",\n post_data)\n\n if from_number_form.is_valid() and 'from_number_button' in post_data:\n from_number_form, self.info = self.save_changeenvvarform(from_number_form, \"from_number\", )\n\n for i, number in enumerate(settings.TO_NUMBERS):\n label = \"to_number_\" + str(i + 1)\n button_name = label + \"_button\"\n form = self.to_numbers_forms_list[label]\n if form.is_valid() and button_name in post_data:\n form, self.info = self.save_changeenvvarform(form, label)\n break\n\n if new_number_form.is_valid() and 'new_number_button' in post_data:\n new_number_form, self.info = self.save_changeenvvarform(new_number_form, \"to_number_\" + str(next_number_id))\n contex = self.get_context_data(forms_list=self.forms_list, info=self.info, delinfo=(False, \"normal\"),\n SECRET_KEY=settings.SECRET_KEY, last_id=self.get_del_id())\n\n return self.render_to_response(contex)\n\n def get_del_id(self):\n if bool(int(self.request.GET.get(\"delinfo\", \"0\"))):\n return str(int(self.request.GET.get(\"delid\", \"normal\")) - 1)\n else:\n return len(self.to_numbers_forms_list)\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n GET method\n handles http`s GET request\n loads forms\n shows info about successful change\n \"\"\"\n delinfo = (bool(int(request.GET.get(\"delinfo\", \"0\"))), request.GET.get(\"delid\", \"normal\"))\n\n self.forms_list = []\n self.to_numbers_forms_list = {}\n self.create_changeenvvarform('from_number_button', _(\"NUMBER OF SENDER\"), settings.FROM_NUMBER)\n\n for i, number in enumerate(settings.TO_NUMBERS):\n label = \"to_number_\" + str(i + 1)\n button_name = label + \"_button\"\n label_tag = _(\"DESTINATION NUMBER \") + str(i + 1) + \".\"\n form = self.create_changeenvvarform(button_name, label_tag, number)\n self.to_numbers_forms_list[label] = form\n\n next_number_id = len(settings.TO_NUMBERS) + 1\n self.create_changeenvvarform('new_number_button', _(\"DESTINATION NUMBER \") + str(next_number_id) + \".\", \"\")\n\n if delinfo[0]:\n id = delinfo[1]\n label = \"to_number_\" + str(id)\n form = self.to_numbers_forms_list.pop(label)\n self.forms_list.remove(form)\n self.forms_list[-2].deletable = True\n self.forms_list[-1].fields[\"new_value\"].label = _(\"DESTINATION NUMBER \") + str(\n len(self.to_numbers_forms_list) + 1) + \".\"\n contex = self.get_context_data(forms_list=self.forms_list, info=self.info, delinfo=delinfo,\n SECRET_KEY=settings.SECRET_KEY, last_id=self.get_del_id())\n\n return self.render_to_response(contex)\n\n def create_changeenvvarform(self, button_name, label, default, post_data=()):\n \"\"\"\n creates form and adds it to forms_list\n :param button_name: string, unique button name\n :param label: string, field`s display name\n :param default: string, default value of form`s field\n :param post_data: request.POST or empty tuple\n :return: ready to use form\n \"\"\"\n if button_name in post_data:\n form = ChangeEnvVariableForm(post_data)\n else:\n form = ChangeEnvVariableForm()\n\n form.button_name = button_name\n form.fields['new_value'].label = label\n form.fields['new_value'].initial = default\n form.fields[\"new_value\"].required = False\n if form.button_name == 'new_number_button': # special treatment for adding new number form\n form.action = _(\"ADD\")\n if len(self.forms_list) > 0:\n self.forms_list[-1].deletable = True\n else:\n form.action = _(\"CHANGE\")\n\n self.forms_list.append(form)\n return form\n\n def save_changeenvvarform(self, form, label):\n \"\"\"\n reads data from submitted form and changes config variables\n :param form: submitted form\n :param label: name of config variable\n :return: already used form and info about successful change\n \"\"\"\n var = form.cleaned_data[\"new_value\"]\n if change_config_var(label, var):\n if form.button_name == 'new_number_button': # special treatment for adding new number form\n action = _(\"ADDED\")\n if len(self.forms_list) > 1:\n self.forms_list[-2].deletable = False\n form.action = _(\"CHANGE\")\n form.button_name = label + \"_button\"\n self.to_numbers_forms_list[label] = form\n next_number_id = len(self.to_numbers_forms_list) + 1\n self.create_changeenvvarform('new_number_button',\n _(\"DESTINATION NUMBER \") + str(next_number_id) + \".\",\n \"\")\n\n\n else:\n action = _(\"CHANGED\")\n info2 = [True, form.fields['new_value'].label, action]\n else:\n info2 = [False, form.fields['new_value'].label, \"unsuccess\"]\n\n return form, info2\n\n\n@secret_key_required\n@set_language_to_LANGUAGE_CODE\ndef number_delete_view(request, number_id):\n \"\"\"\n view handles requests for phone number deleting\n :param request: http request\n :param number_id: assigned number of phone number (requested to deleting)\n :return: redirects to phone numbers management view\n \"\"\"\n label = \"to_number_\" + str(number_id)\n\n if change_config_var(label, None):\n deleted = 1\n else:\n deleted = 0\n\n return redirect(\"/phonenumbers/?key={}&delinfo={}&delid={}\".format(settings.SECRET_KEY, deleted, number_id))\n\n\n@secret_key_required\n@set_language_to_LANGUAGE_CODE\ndef ifttt_delete_view(request, maker_id):\n \"\"\"\n view handles requests for IFTTT makers deleting\n :param request: http request\n :param maker_id: assigned number of IFTTT maker (requested to deleting)\n :return: redirects to IFTTT makers management view\n \"\"\"\n label = \"IFTTT_MAKER_\" + str(maker_id)\n if change_config_var(label, None):\n deleted = 1\n else:\n deleted = 0\n return redirect(\"/iftttmakers/?key={}&delinfo={}&delid={}\".format(settings.SECRET_KEY, deleted, maker_id))\n\n\nclass NotificationsCenterView(FormView):\n \"\"\"\n view for notifications management\n \"\"\"\n form_class = ChooseNotificationsWayForm\n template_name = \"remider/notifications.html\"\n\n trig_info = True\n sms_info = True\n\n def get_initial(self):\n \"\"\"\n :return: initial values for form\n \"\"\"\n initial = super(NotificationsCenterView, self).get_initial()\n initial[\"ifttt_notifications\"] = settings.TRIGGER_IFTTT\n initial[\"sms_notifications\"] = settings.SEND_SMS\n\n return initial\n\n def get_context_data(self, **kwargs):\n \"\"\"\n :return: contex data\n \"\"\"\n return super().get_context_data(**kwargs, SECRET_KEY=settings.SECRET_KEY, trig_info=self.trig_info,\n sms_info=self.sms_info)\n\n def form_valid(self, form):\n \"\"\"\n method for handling validly submitted forms\n \"\"\"\n ifttt = form.cleaned_data[\"ifttt_notifications\"]\n sms = form.cleaned_data[\"sms_notifications\"]\n if change_config_var(\"trigger_ifttt\", ifttt):\n self.trig_info = True\n else:\n self.trig_info = False\n\n if change_config_var(\"send_sms\", sms):\n self.sms_info = True\n else:\n self.sms_info = False\n\n form.fields[\"ifttt_notifications\"].initial = ifttt\n form.fields[\"sms_notifications\"].initial = sms\n\n return self.render_to_response(self.get_context_data())\n\n\nclass ManageIFTTTMakersView(TemplateView):\n \"\"\"\n view for adding, changing and deleting IFTTT makers\n \"\"\"\n template_name = \"remider/manage_ifttt.html\"\n forms_list = []\n makers_dict = {}\n info = (False, \"\")\n ignore_delinfo_in_url = False\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n GET method\n handles http`s GET request\n loads forms\n shows info about successful change\n \"\"\"\n self.makers_dict = {}\n self.forms_list = []\n post_data = request.POST\n\n for i, maker in enumerate(settings.IFTTT_MAKERS):\n label = \"IFTTT_MAKER_\" + str(i + 1)\n button_name = label + \"_button\"\n label_tag = \"IFTTT MAKER \" + str(i + 1) + \".\"\n form = self.create_changeenvvarform(button_name, label_tag, maker, post_data)\n self.makers_dict[label] = form\n next_maker_id = len(settings.IFTTT_MAKERS) + 1\n new_maker_form = self.create_changeenvvarform('new_maker_button',\n \"IFTTT MAKER \" + str(next_maker_id) + \".\", \"\", post_data)\n\n for i, maker in enumerate(settings.IFTTT_MAKERS):\n label = \"IFTTT_MAKER_\" + str(i + 1)\n button_name = label + \"_button\"\n form = self.makers_dict[label]\n if form.is_valid() and button_name in post_data:\n form, self.info = self.save_changeenvvarform(form, label)\n break\n if new_maker_form.is_valid() and 'new_maker_button' in post_data:\n new_maker_form, self.info = self.save_changeenvvarform(new_maker_form, \"IFTTT_MAKER_\" + str(next_maker_id))\n if self.request.GET.get(\"delinfo\", False) and self.info[0]:\n self.ignore_delinfo_in_url = True\n contex = self.get_context_data(forms_list=self.forms_list, info=self.info, delinfo=(False, \"normal\"),\n SECRET_KEY=settings.SECRET_KEY, last_id=self.get_del_id())\n\n return self.render_to_response(contex)\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n GET method\n handles http`s GET request\n loads forms\n shows info about successful change\n \"\"\"\n delinfo = (bool(int(request.GET.get(\"delinfo\", \"0\"))), request.GET.get(\"delid\", \"normal\"))\n\n self.forms_list = []\n self.makers_dict = {}\n\n for i, maker in enumerate(settings.IFTTT_MAKERS):\n label = \"IFTTT_MAKER_\" + str(i + 1)\n button_name = label + \"_button\"\n label_tag = \"IFTTT MAKER \" + str(i + 1) + \".\"\n form = self.create_changeenvvarform(button_name, label_tag, maker)\n self.makers_dict[label] = form\n\n next_maker_id = len(settings.IFTTT_MAKERS) + 1\n self.create_changeenvvarform('new_maker_button', \"IFTTT MAKER \" + str(next_maker_id) + \".\", \"\")\n\n if delinfo[0] and not self.ignore_delinfo_in_url:\n id = delinfo[1]\n label = \"IFTTT_MAKER_\" + str(id)\n try:\n form = self.makers_dict.pop(label)\n self.forms_list.remove(form)\n self.forms_list[-2].deletable = True\n self.forms_list[-1].fields[\"new_value\"].label = \"IFTTT MAKER \" + str(\n len(self.makers_dict) + 1) + \".\"\n except KeyError:\n pass\n contex = self.get_context_data(forms_list=self.forms_list, info=self.info, delinfo=delinfo,\n SECRET_KEY=settings.SECRET_KEY, last_id=self.get_del_id())\n\n return self.render_to_response(contex)\n\n def get_del_id(self):\n if self.ignore_delinfo_in_url:\n return len(settings.IFTTT_MAKERS)\n\n delinfo = (bool(int(self.request.GET.get(\"delinfo\", \"0\"))), self.request.GET.get(\"delid\", \"normal\"))\n\n return str(int(delinfo[1]) - 1) if delinfo[0] else len(self.makers_dict)\n\n def create_changeenvvarform(self, button_name, label, default, post_data=()):\n \"\"\"\n creates form and adds it to forms_list\n :param button_name: string, unique button name\n :param label: string, field`s display name\n :param default: string, default value of form`s field\n :param post_data: request.POST or empty tuple\n :return: ready to use form\n \"\"\"\n if button_name in post_data:\n form = ChangeEnvVariableForm(post_data)\n else:\n form = ChangeEnvVariableForm()\n\n form.button_name = button_name\n form.fields['new_value'].label = label\n form.fields['new_value'].initial = default\n form.fields[\"new_value\"].required = False\n if form.button_name == 'new_maker_button': # special treatment for adding new maker form\n form.action = _(\"ADD\")\n if len(self.forms_list) > 0:\n self.forms_list[-1].deletable = True\n else:\n form.action = _(\"CHANGE\")\n\n self.forms_list.append(form)\n return form\n\n def save_changeenvvarform(self, form, label):\n \"\"\"\n reads data from submitted form and changes config variables\n :param form: submitted form\n :param label: name of config variable\n :return: already used form and info about successful change\n \"\"\"\n var = form.cleaned_data[\"new_value\"]\n if change_config_var(label, var):\n if form.button_name == 'new_maker_button': # special treatment for adding new maker form\n action = _(\"ADDED\")\n if len(self.forms_list) > 1:\n self.forms_list[-2].deletable = False\n form.action = _(\"CHANGE\")\n form.button_name = label + \"_button\"\n self.makers_dict[label] = form\n next_maker_id = len(self.makers_dict) + 1\n self.create_changeenvvarform('new_maker_button', \"IFTTT MAKER \" + str(next_maker_id) + \".\", \"\")\n\n\n else:\n action = _(\"CHANGED\")\n info2 = [True, form.fields['new_value'].label, action]\n else:\n info2 = [False, form.fields['new_value'].label, \"unsuccess\"]\n\n return form, info2\n", "id": "5708187", "language": "Python", "matching_score": 4.731353282928467, "max_stars_count": 2, "path": "remider/views.py" }, { "content": "\"\"\"\nDjango settings for infusionset_reminder project.\n\nGenerated by 'django-admin startproject' using Django 2.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.1/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\nfrom decouple import config\nfrom django.utils.translation import ugettext_lazy as _\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config(\"DEBUG\", default=False, cast=bool)\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'remider.apps.RemiderConfig',\n \"bootstrap4\",\n 'bootstrap_datepicker_plus'\n]\nCRISPY_TEMPLATE_PACK = 'bootstrap4'\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'infusionset_reminder.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')]\n ,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'infusionset_reminder.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\nLANGUAGES = (\n (\"pl\", _(\"Polish\")),\n (\"en\", _(\"English\"))\n)\n\nLANGUAGE_CODE = config(\"LANGUAGE_CODE\", default=\"en\")\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, \"locale\"),\n)\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nTOKEN = config(\"heroku_token\", default=\"\")\nAPP_NAME = config(\"app_name\")\n\nINFUSION_SET_ALERT_FREQUENCY = config(\"INFUSION_SET_ALERT_FREQUENCY\", default=72, cast=int)\nSENSOR_ALERT_FREQUENCY = config(\"SENSOR_ALERT_FREQUENCY\", default=144, cast=int)\n\nTWILIO_ACCOUNT_SID = config(\"TWILIO_ACCOUNT_SID\", default=\"\")\nTWILIO_AUTH_TOKEN = config(\"TWILIO_AUTH_TOKEN\", default=\"\")\n\nATRIGGER_KEY = config(\"ATRIGGER_KEY\", default=\"\")\nATRIGGER_SECRET = config(\"ATRIGGER_SECRET\", default=\"\")\n\nFROM_NUMBER = config(\"from_number\", default=\"\")\nNIGTSCOUT_LINK = config(\"NIGHTSCOUT_LINK\", default=\"\")\n\nTO_NUMBERS = []\ni = 0\nwhile True:\n i += 1\n try:\n number = config(\"to_number_\" + str(i))\n TO_NUMBERS.append(number)\n except:\n break\n\nIFTTT_MAKERS = []\ni = 0\n\nwhile True:\n i += 1\n try:\n marker = config(\"IFTTT_MAKER_\" + str(i))\n IFTTT_MAKERS.append(marker)\n except:\n break\n\nTRIGGER_IFTTT = config(\"trigger_ifttt\", default=False, cast=bool)\nSEND_SMS = config(\"send_sms\", default=False, cast=bool)\ndjango_heroku.settings(locals())\n", "id": "2338772", "language": "Python", "matching_score": 2.592928647994995, "max_stars_count": 0, "path": "infusionset_reminder/settings.py" }, { "content": "from django.apps import AppConfig\n\n\nclass RemiderConfig(AppConfig):\n name = 'remider'\n", "id": "2441019", "language": "Python", "matching_score": 0.5542833209037781, "max_stars_count": 2, "path": "remider/apps.py" } ]
2.170139
csestelo
[ { "content": "import unittest\n\nimport sys\n\nfrom jsonbender import S, K\nfrom jsonbender.core import bend, BendingException, Context\nfrom jsonbender.test import BenderTestMixin\n\n\nclass TestBend(unittest.TestCase):\n def test_empty_mapping(self):\n self.assertDictEqual(bend({}, {'a': 1}), {})\n\n def test_flat_mapping(self):\n mapping = {\n 'a_field': S('a', 'b'),\n 'another_field': K('wow'),\n }\n source = {'a': {'b': 'ok'}}\n expected = {\n 'a_field': 'ok',\n 'another_field': 'wow',\n }\n self.assertDictEqual(bend(mapping, source), expected)\n\n def test_nested_mapping(self):\n mapping = {\n 'a_field': S('a', 'b'),\n 'a': {\n 'nested': {\n 'field': S('f1', 'f2'),\n },\n },\n }\n source = {\n 'a': {'b': 'ok'},\n 'f1': {'f2': 'hi'},\n }\n expected = {\n 'a_field': 'ok',\n 'a': {'nested': {'field': 'hi'}},\n }\n self.assertDictEqual(bend(mapping, source), expected)\n\n def test_nested_mapping_with_lists(self):\n mapping = {\n 'a_field': S('a', 'b'),\n 'a': [{\n 'nested': {\n 'field': S('f1', 'f2'),\n },\n }],\n }\n source = {\n 'a': {'b': 'ok'},\n 'f1': {'f2': 'hi'},\n }\n expected = {\n 'a_field': 'ok',\n 'a': [{'nested': {'field': 'hi'}}],\n }\n self.assertDictEqual(bend(mapping, source), expected)\n\n def test_list_with_non_dict_elements(self):\n mapping = {'k': ['foo1', S('bar1')]}\n source = {'bar1': 'val 1'}\n expected = {'k': ['foo1', 'val 1']}\n self.assertDictEqual(bend(mapping, source), expected)\n\n def test_bending_exception_is_raised_when_something_bad_happens(self):\n mapping = {'a': S('nonexistant')}\n source = {}\n self.assertRaises(BendingException, bend, mapping, source)\n\n def test_constants_without_K(self):\n mapping = {'a': 'a const value', 'b': 123}\n self.assertDictEqual(bend(mapping, {}),\n {'a': 'a const value', 'b': 123})\n\n def test_context_shallow(self):\n mapping = {'a': Context() >> S('b')}\n res = bend(mapping, {}, context={'b': 23})\n self.assertDictEqual(res, {'a': 23})\n\n def test_context_deep(self):\n mapping = {'a': [{'a': Context() >> S('b')}]}\n res = bend(mapping, {}, context={'b': 23})\n self.assertDictEqual(res, {'a': [{'a': 23}]})\n\n\nclass TestOperators(unittest.TestCase, BenderTestMixin):\n def test_add(self):\n self.assert_bender(K(5) + K(2), None, 7)\n\n def test_sub(self):\n self.assert_bender(K(5) - K(2), None, 3)\n\n def test_mul(self):\n self.assert_bender(K(5) * K(2), None, 10)\n\n def test_div(self):\n self.assert_bender(K(4) / K(2), None, 2)\n self.assertAlmostEqual((K(5) / K(2))(None), 2.5, 2)\n\n def test_neg(self):\n self.assert_bender(-K(1), None, -1)\n self.assert_bender(-K(-1), None, 1)\n\n def test_op_with_context(self):\n mapping = {'res': (Context() >> S('b')) - S('a')}\n in_ = {'a': 23}\n context = {'b': 27}\n res = bend(mapping, in_, context=context)\n self.assertEqual(res, {'res': 4})\n\n def test_eq(self):\n self.assert_bender(K(42) == K(42), None, True)\n self.assert_bender(K(42) == K(27), None, False)\n\n def test_and(self):\n self.assert_bender(K(True) & K(True), None, True)\n self.assert_bender(K(True) & K(False), None, False)\n self.assert_bender(K(False) & K(True), None, False)\n self.assert_bender(K(False) & K(False), None, False)\n\n def test_or(self):\n self.assert_bender(K(True) | K(True), None, True)\n self.assert_bender(K(True) | K(False), None, True)\n self.assert_bender(K(False) | K(True), None, True)\n self.assert_bender(K(False) | K(False), None, False)\n\n def test_invert(self):\n self.assert_bender(~K(True), None, False)\n self.assert_bender(~K(False), None, True)\n\n\nclass TestGetItem(unittest.TestCase, BenderTestMixin):\n def test_getitem(self):\n bender = S('val')[2:8:2]\n if sys.version_info.major == 2:\n val = range(10)\n else:\n val = list(range(10))\n self.assert_bender(bender, {'val': val}, [2, 4, 6])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2474909", "language": "Python", "matching_score": 2.547334671020508, "max_stars_count": 0, "path": "tests/test_core.py" }, { "content": "from jsonbender.core import Bender, Context, bend, BendingException\nfrom jsonbender.list_ops import FlatForall, Forall, Filter, Reduce\nfrom jsonbender.string_ops import Format\nfrom jsonbender.selectors import F, K, S, OptionalS\nfrom jsonbender.control_flow import Alternation, If, Switch\n\n\n__version__ = '0.9.2'\n\n", "id": "12840975", "language": "Python", "matching_score": 1.0812833309173584, "max_stars_count": 0, "path": "jsonbender/__init__.py" }, { "content": "from jsonbender.core import Bender\nfrom jsonbender.selectors import K\n\n\nclass If(Bender):\n \"\"\"\n Takes a condition bender, and two benders (both default to K(None)).\n If the condition bender evaluates to true, return the value of the first\n bender. If it evaluates to false, return the value of the second bender.\n\n Example:\n ```\n if_ = If(S('country') == K('China'), S('first_name'), S('last_name'))\n if_({'country': 'China',\n 'first_name': 'Li',\n 'last_name': 'Na'}) # -> 'Li'\n\n if_({'country': 'Brazil',\n 'first_name': 'Gustavo',\n 'last_name': 'Kuerten'}) # -> 'Kuerten'\n ```\n \"\"\"\n\n def __init__(self, condition, when_true=K(None), when_false=K(None)):\n self.condition = condition\n self.when_true = when_true\n self.when_false = when_false\n\n def execute(self, val):\n return (self.when_true(val)\n if self.condition(val)\n else self.when_false(val))\n\n\nclass Alternation(Bender):\n \"\"\"\n Take any number of benders, and return the value of the first one that\n doesn't raise a LookupError (KeyError, IndexError etc.).\n If all benders raise LookupError, re-raise the last raised exception.\n\n Example:\n ```\n b = Alternation(S(1), S(0), S('key1'))\n b(['a', 'b']) # -> 'b'\n b(['a']) # -> 'a'\n b([]) # -> KeyError\n b({}) # -> KeyError\n b({'key1': 23}) # -> 23\n ```\n \"\"\"\n\n def __init__(self, *benders):\n self.benders = benders\n\n def execute(self, source):\n exc = ValueError()\n for bender in self.benders:\n try:\n result = bender(source)\n except LookupError as e:\n exc = e\n else:\n return result\n else:\n raise exc\n\n\nclass Switch(Bender):\n \"\"\"\n Take a key bender, a 'case' container of benders and a default bender\n (optional).\n The value returned by the key bender is used to get a bender from the\n case container, which then returns the result.\n If the key is not in the case container, the default is used.\n If it's unavailable, raise the original LookupError.\n\n Example:\n ```\n b = Switch(S('service'),\n {'twitter': S('handle'),\n 'mastodon': S('handle') + K('@') + S('server')},\n default=S('email'))\n\n b({'service': 'twitter', 'handle': 'etandel'}) # -> 'etandel'\n b({'service': 'mastodon', 'handle': 'etandel',\n 'server': 'mastodon.social'}) # -> '<EMAIL>'\n b({'service': 'facebook',\n 'email': '<EMAIL>'}) # -> '<EMAIL>'\n ```\n \"\"\"\n\n def __init__(self, key_bender, cases, default=None):\n self.key_bender = key_bender\n self.cases = cases\n self.default = default\n\n def execute(self, source):\n key = self.key_bender(source)\n try:\n bender = self.cases[key]\n except LookupError:\n if self.default:\n bender = self.default\n else:\n raise\n\n return bender(source)\n\n", "id": "12511521", "language": "Python", "matching_score": 0.9011624455451965, "max_stars_count": 115, "path": "jsonbender/control_flow.py" } ]
1.081283
abalasu1
[ { "content": "import os\nfrom flask import Flask\n\nfrom flask import request\nfrom flask import Response\n\napp = Flask(__name__)\n\nfrom flask_cors import CORS\nfrom flask_cors import cross_origin\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\[email protected]('/basicop/subtract', methods=['GET'])\n@cross_origin()\ndef subtract():\n n1 = int(request.args.get('n1'))\n if (n1 is None): n1 = 0\n\n n2 = int(request.args['n2'])\n if (n2 is None): n2 = 0\n \n result = n1 - n2\n return Response(str(result), mimetype=\"text/plain\")\n\nport = os.getenv('PORT', '80')\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=int(port))", "id": "2467283", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Istio/Calculator/basicop/subtract/app.py" }, { "content": "import os\nimport requests\n\nimport json\n\nfrom flask import Flask\nfrom flask import request\n\nfrom flask import Response\nfrom flask import jsonify\n\napp = Flask(__name__)\n\nfrom flask_cors import CORS\nfrom flask_cors import cross_origin\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\nbasicopserviceoptions = { \n '+': os.getenv('ADDURI', 'http://9.121.242.203:31697/basicop/add'),\n '-': os.getenv('SUBURI', 'http://172.16.58.3:31697/basicop/subtract'),\n '*': os.getenv('MULURI', 'http://9.121.242.203:31697/basicop/multiply'),\n '/': os.getenv('DIVURI', 'http://9.121.242.203:31697/basicop/divide') \n}\n\ndef forwardTraceHeaders(request):\n headers = {}\n incomingheaders = [\n 'x-request-id',\n 'x-b3-traceid',\n 'x-b3-spanid',\n 'x-b3-parentspanid',\n 'x-b3-sampled',\n 'x-b3-flags',\n 'x-ot-span-context',\n 'x-dev-user'\n ]\n \n for h in incomingheaders:\n val = request.headers.get(h)\n\n if val is not None:\n headers[h] = val\n \n return headers\n\ndef executeop(op, params, headers, timeout):\n try:\n url = basicopserviceoptions[op]\n response = requests.get(url, headers = headers, params = params, timeout = timeout)\n except:\n response = None\n \n status = response.status_code\n if response and status == 200:\n return status, response.text\n else:\n return status, 'NA'\n\[email protected]('/compositeop/square', methods=['GET'])\n@cross_origin()\ndef square():\n n1 = request.args.get('n1')\n\n headers = forwardTraceHeaders(request)\n params = { 'n1': n1, 'n2': n1 }\n \n status, result = executeop('*', params, headers, 20.0)\n return Response(str(result), mimetype=\"text/plain\")\n\[email protected]('/compositeop/cube', methods=['GET'])\n@cross_origin()\ndef cube():\n n1 = request.args.get('n1')\n\n headers = forwardTraceHeaders(request)\n params = { 'n1': n1, 'n2': n1 }\n status, result = executeop('*', params, headers, 20.0)\n\n params = { 'n1': result, 'n2': n1 }\n status, result = executeop('*', params, headers, 20.0)\n\n return Response(str(result), mimetype=\"text/plain\")\n\[email protected]('/compositeop/mean', methods=['GET'])\n@cross_origin()\ndef mean():\n headers = forwardTraceHeaders(request)\n count = len(request.args)\n\n result = 0\n for arg in request.args:\n param = request.args.get(arg)\n\n params = { 'n1': result, 'n2': param }\n status, result = executeop('+', params, headers, 20.0)\n \n params = { 'n1': result, 'n2': count }\n status, result = executeop('/', params, headers, 20.0)\n\n return Response(str(result), mimetype=\"text/plain\") \n\ndef expr2rpn(expr, delimiter):\n OPERATORS = set(['+', '-', '*', '/', '(', ')'])\n PRIORITY = {'+':1, '-':1, '*':2, '/':2}\n \n stack = [] # only pop when the coming op has priority \n output = ''\n\n pos = 0\n for ch in expr:\n if ch not in OPERATORS:\n output += ch\n if pos < len(expr) - 1 and expr[pos + 1] in OPERATORS: output += delimiter\n elif ch == '(':\n stack.append('(')\n output += delimiter\n elif ch == ')':\n while stack and stack[-1] != '(':\n output += stack.pop()\n output += delimiter\n\n stack.pop() # pop '('\n else:\n while stack and stack[-1] != '(' and PRIORITY[ch] <= PRIORITY[stack[-1]]:\n output += stack.pop()\n output += delimiter\n \n stack.append(ch)\n\n pos += 1\n \n # leftover\n while stack: \n output += delimiter\n output += stack.pop()\n\n return \" \".join(output.split()).strip()\n\ndef processrpnexpr(rpnexpr, headers):\n stack = []\n steps = []\n\n if rpnexpr:\n for val in rpnexpr.split(' '):\n if val in ['+', '-', '*', '/']:\n op1 = stack.pop()\n op2 = stack.pop()\n \n op = val\n params = { 'n1': op2, 'n2': op1 }\n\n if op == '/' and int(op1) == 0: return [ \"NaN\", steps ]\n \n istatus, iresult = executeop(op, params, headers, 20.0)\n stack.append(iresult)\n\n stepn = {}\n stepn['stepop'] = op\n stepn['stepop1'] = op1\n stepn['stepop2'] = op2\n stepn['stepresult'] = iresult\n stepn['stepstatus'] = istatus\n steps.append(stepn)\n \n if (istatus != 200): break\n else:\n stack.append(val)\n\n result = stack.pop()\n return [ result, steps ]\n else:\n return [ None, None ]\n\ndef formatresult(identifier, expr, rpnexpr, steps, result):\n jsonresult = {}\n\n jsonresult['version'] = os.getenv('VERSION')\n jsonresult['identifier'] = identifier\n\n jsonresult['expr'] = expr\n jsonresult['rpnexpr'] = rpnexpr\n\n jsonresult['result'] = result\n jsonresult['steps'] = steps\n\n return jsonresult\n\[email protected]('/compositeop/eval', methods=['GET'])\n@cross_origin()\ndef eval():\n headers = forwardTraceHeaders(request)\n\n identifier = request.args.get('identifier')\n expr = request.args.get('expr')\n \n rpnexpr = expr2rpn(expr, ' ') \n \n result, steps = processrpnexpr(rpnexpr, headers)\n jsonresult = formatresult(identifier, expr, rpnexpr, steps, result)\n\n return jsonify(jsonresult)\n\[email protected]('/compositeop/evalrpnexpr', methods=['GET'])\n@cross_origin()\ndef evalrpnexpr():\n headers = forwardTraceHeaders(request)\n\n identifier = request.args.get('identifier')\n rpnexpr = request.args.get('expr')\n \n result, steps = processrpnexpr(rpnexpr, headers) \n jsonresult = formatresult(identifier, rpnexpr, rpnexpr, steps, result)\n \n return jsonify(jsonresult)\n\nport = os.getenv('PORT', '80')\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=int(port))", "id": "11149424", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Istio/Calculator/compositeop/app.py" } ]
0
mnmldani
[ { "content": "\n# This file is part of ClusterScan.\n\n# ClusterScan is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# ClusterScan is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with ClusterScan. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport string\n\nimport pandas as pd\nimport pybedtools\n\n\ndef window_maker(list_name, filled_list, window_size, slide_size):\n \"\"\"Make a bed file of sliding windows.\"\"\"\n for scaffold, start, end in filled_list:\n width = window_size\n step = slide_size\n\n if width <= end:\n list_name.append((scaffold, start, width))\n else:\n list_name.append((scaffold, start, end))\n\n while width <= end:\n start += step\n width += step\n if width >= end:\n list_name.append((scaffold, start, end))\n else:\n list_name.append((scaffold, start, width))\n return list_name\n\n\ndef cluster_composer(pre_cluster_object, pre_cluster_intersection):\n final_list = []\n \"\"\"Find real feature's positions.\"\"\"\n tmp = []\n for item in pre_cluster_object:\n scaffold = str(item[0])\n for line in pre_cluster_intersection:\n line = str(line)\n if line.split()[0] == scaffold and (int(item[1]) <= int(line.split()[1]) <= int(item[2]) and int(item[1]) <= int(line.split()[2]) <= int(item[2])):\n tmp.append(int(line.split()[1]))\n tmp.append(int(line.split()[2]))\n else:\n continue\n final_list.append((scaffold, min(tmp), max(tmp)))\n tmp = []\n return final_list\n\n\n'''\ndef seed_extender(new_list, indexes, intersection, limit):\n \"\"\"Extends culster's seeds.\"\"\"\n for index in indexes:\n cluster_pos = []\n scaffold = intersection[index][0]\n right_step = index\n left_step = index\n while int(intersection[left_step][3]) >= limit and intersection[left_step][0] == scaffold:\n if int(intersection[left_step][1]) not in cluster_pos:\n cluster_pos.append(int(intersection[left_step][1]))\n if (left_step - 1) >= 0:\n left_step -= 1\n else:\n break\n while int(intersection[right_step][3]) >= limit and intersection[right_step][0] == scaffold:\n if int(intersection[right_step][2]) not in cluster_pos:\n cluster_pos.append(int(intersection[right_step][2]))\n if (right_step + 1) <= max(indexes):\n right_step += 1\n else:\n break\n cluster = (str(scaffold), min(cluster_pos), max(cluster_pos))\n if cluster not in new_list:\n new_list.append(cluster)\n return new_list\n'''\n\n\ndef do_clusterdist(catList, pdTbl, tbl, sargs):\n for category in catList:\n df = pdTbl[pdTbl.category == category]\n BEDtools_object = pybedtools.BedTool().from_dataframe(df).sort()\n\n try:\n merge = BEDtools_object.merge(d=int(sargs['--dist']), c=4, o=\"count_distinct\")\n except Exception as e:\n continue\n\n df = pd.read_table(merge.fn, header=None)\n df[4] = category\n tbl = tbl.append(df)\n\n return tbl\n\n\ndef do_clustermean(catList, pdTbl, tbl, sargs):\n loc = list(pdTbl.chr.unique())\n chr_len = []\n\n for chr in loc:\n df = pdTbl[pdTbl.chr == chr]\n chr_len.append((chr, 0, max(df.end)))\n\n windows = []\n window_maker(windows, chr_len, int(sargs['--window']), int(sargs['--slide']))\n win_bed = pybedtools.BedTool(windows)\n\n # for each category compute clusters\n for category in catList:\n # print category\n df = pdTbl[pdTbl.category == category]\n BEDtools_object = pybedtools.BedTool().from_dataframe(df)\n\n # intersect features to windows\n try:\n intersect_bed = win_bed.intersect(BEDtools_object, c=True)\n except:\n continue\n\n df = pd.read_table(intersect_bed.fn, header=None, dtype={0: str})\n df[4] = category\n\n # compute mean and stdv feature density per-window\n mean = df[3].mean()\n stdv = df[3].std()\n\n multi1 = mean + (int(sargs['--seed'])*stdv)\n multi2 = mean + (int(sargs['--extension'])*stdv)\n\n # extract seeds and try to extend them\n seed_list = df[df[3] >= multi1].index.tolist()\n\n #NUOVO\n df_seed = df.loc[df[3] >= multi1]\n df_ext = df.loc[df[3] >= multi2]\n BEDtools_seed = pybedtools.BedTool().from_dataframe(df_seed)\n BEDtools_ext = pybedtools.BedTool().from_dataframe(df_ext)\n\n try:\n # merge = BEDtools_ext.merge(c=4, o=\"sum\")\n merge = BEDtools_ext.merge()\n except:\n continue\n\n try:\n intersect = merge.intersect(BEDtools_seed, u=True)\n except:\n continue\n\n try:\n pre_clusters = intersect.intersect(BEDtools_object, u=True)\n except:\n continue\n\n features_in_clusters = BEDtools_object.intersect(pre_clusters, wa=True)\n\n final_list = cluster_composer(pre_clusters, features_in_clusters)\n clusters = pybedtools.BedTool(final_list)\n final_clusters = clusters.intersect(BEDtools_object, c=True)\n\n tclusters = pd.read_table(final_clusters.fn, header=None)\n tclusters[4] = category\n tbl = tbl.append(tclusters)\n\n return tbl\n\n\ndef do_singletons(catList, pdTbl, clustersTbl, emptyTbl, sargs):\n for category in catList:\n try:\n df = pdTbl[pdTbl.category == category]\n df2 = clustersTbl[clustersTbl.category == category]\n\n ft = pybedtools.BedTool().from_dataframe(df).sort()\n cl = pybedtools.BedTool().from_dataframe(df2).sort()\n\n st = ft.intersect(cl, v=True)\n\n pdSt = pd.read_table(st.fn, header=None)\n emptyTbl = emptyTbl.append(pdSt)\n except Exception as e:\n continue\n\n return emptyTbl\n\n\n'''\n #extended_seed = []\n #seed_extender(extended_seed, seed_list, intersect_bed, multi2)\n\n #pre_clusters = pybedtools.BedTool(extended_seed)\n #features_in_clusters = BEDtools_object.intersect(pre_clusters, wa=True)\n\n #final_list = cluster_composer(pre_clusters, features_in_clusters)\n\n #try:\n #final_clusters = pybedtools.BedTool(final_list)\n #final_clusters = final_clusters.intersect(BEDtools_object, c=True) \n #final_clusters = pd.read_table(final_clusters.fn, header=None)\n #final_clusters[5] = category\n #tbl = tbl.append(final_clusters)\n #except Exception as e:\n #pass\n'''\n", "id": "5442612", "language": "Python", "matching_score": 2.9952309131622314, "max_stars_count": 0, "path": "bin/algos.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2017-2018 <NAME> and <NAME>\n\n# This file is part of ClusterScan.\n\n# ClusterScan is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# ClusterScan is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with ClusterScan. If not, see <http://www.gnu.org/licenses/>.\n\n\n\"\"\"Description:\n ClusterScan, search for clusters of features in a given annotation.\n\nUsage:\n clusterscan.py clusterdist FEATURES ANNOTATION [-o PATH] [-a NAME] [-c LIST] [--info FILE] [--singletons] [-n=<n>] [-d=<bp>]\n clusterscan.py clustermean FEATURES ANNOTATION [-o PATH] [-a NAME] [-c LIST] [--info FILE] [--singletons] [-n=<n>] [-w=<bp>] [-s=<bp>] [-k=<n>] [-e=<n>]\n clusterscan.py (-h | --help)\n clusterscan.py --version\n\nOptions:\n -h, --help Show this screen.\n -o, --output PATH Specify output path [default: ./].\n -a, --analysis NAME Specify optional analysis name for output files.\n -n, --nf=<n> Minimum number of features per cluster [default: 2].\n -d, --dist=<bp> Maximum distance between features in bp [default: 500000].\n -w, --window=<bp> Window size [default: 500000].\n -s, --slide=<bp> Sliding size [default: 250000].\n -k, --seed=<n> Number of standard deviations to identify a window which serves as the beginning of the cluster [default: 3].\n -e, --extension=<n> Number of standard deviations to identify the window(s) which serve to extend the cluster [default: 2].\n -c, --category LIST Comma separated list of one or more specific categories to be analyzed [e.g. PF00001,PF00002].\n Useful when you need to perform the analysis only for specific categories in the ANNOTATION file.\n --info FILE Specify optional file to describe categories.\n --singletons Identify singletons after clusters and bystanders annotation.\n --version Show program version.\n\"\"\"\n\nimport time\nimport os\nimport warnings\n\nimport pandas as pd\nimport pybedtools\nfrom docopt import docopt\nfrom rpy2 import robjects\nfrom rpy2.robjects import pandas2ri\nfrom rpy2.robjects.packages import importr\n\nfrom algos import *\n\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n import rpy2.robjects.lib.ggplot2 as ggplot2\n\nstart_time = time.time()\n\n\ndef input_tester(file_path):\n \"\"\"Check for the presence of input files.\"\"\"\n try:\n open(file_path)\n except IOError:\n raise SystemExit('Unable to open %s, file does not exist!' % (file_path.split('/')[-1]))\n else:\n pass\n\n\ndef options_tester(option, n, string):\n \"\"\"Check if rules for parameters are respected.\"\"\"\n if option < n:\n raise ValueError(string)\n else:\n pass\n\n\ndef rpy2_plotter(anno, clusters, name):\n \"\"\"Plot genes distribution in clusters using ggplot2 from R.\"\"\"\n pandas2ri.activate()\n grdevices = importr('grDevices')\n rprint = robjects.globalenv.get(\"print\")\n\n anno = anno.sort_values(by=\"n_ft\", ascending=False)\n anno = anno.head(n=10)\n category = anno[\"category\"].tolist()\n clusters = clusters[clusters[\"category\"].isin(category)]\n clusters = pandas2ri.py2ri(clusters)\n\n pp = ggplot2.ggplot(clusters) + ggplot2.aes_string(x=\"n_features\") + ggplot2.geom_histogram(binwidth=1) + ggplot2.facet_wrap(robjects.Formula(\"~category\"), ncol=5) + ggplot2.labs(x=\"Number of Features\", y=\"Number of Clusters\", title=\"Clusters distribution\")\n\n grdevices.pdf(file=name, width=11.692, height=8.267)\n rprint(pp)\n grdevices.dev_off()\n\n\ndef main():\n # test for input files availability\n input_tester(arguments['FEATURES'])\n input_tester(arguments['ANNOTATION'])\n\n if arguments['--info'] is None:\n pass\n else:\n input_tester(arguments['--info'])\n\n # clusters can't contain less than 2 features\n error1 = \"Minimum number of features per cluster must be a number higher than 1!\"\n options_tester(int(arguments['--nf']), 2, error1)\n\n # window size can't be lower than sliding size\n error2 = \"Sliding size can't be higher than window size!\"\n options_tester(int(arguments['--window']), int(arguments['--slide']), error2)\n\n # window size can't be lower than sliding size\n error3 = \"Seed or extension can't be a number lower than 1!\"\n options_tester(int(arguments['--seed']), 1, error3)\n options_tester(int(arguments['--extension']), 1, error3)\n\n # build database\n feat = pd.read_table(arguments['FEATURES'], header=None, usecols=range(6), dtype={0: str})\n anno = pd.read_table(arguments['ANNOTATION'], header=None)\n\n feat.columns = ['chr', 'start', 'end', 'name', 'score', 'strand']\n anno.columns = ['name', \"category\"]\n # anno[\"category\"] = anno[\"category\"].fillna(\"Unknown\")\n n = list(feat.name.unique())\n\n # pdtable stores genes annotation and corresponding categories\n pdtable = pd.merge(feat, anno, on='name', how='outer')\n pdtable[\"category\"] = pdtable[\"category\"].fillna(\"Unknown\")\n pdtable = pdtable[pd.notnull(pdtable[\"category\"])]\n pdtable = pdtable[pd.notnull(pdtable['chr'])]\n pdtable[['start', 'end']] = pdtable[['start', 'end']].astype(int)\n pdtable = pdtable.drop_duplicates(['name', \"category\"])\n # movq print str(pdtable)\n all_features = pdtable\n pdtable = pdtable[pdtable[\"category\"] != \"Unknown\"]\n\n # list unique categories\n if arguments['--category'] is None:\n l = list(pdtable.category.unique())\n else:\n l = arguments['--category'].split(',')\n # test the argument\n if set(l) <= set(list(pdtable.category.unique())):\n pass\n else:\n raise SystemExit('Some categories passed through the -c parameter are not present in the input files. Please, check your list and run the analysis again.')\n\n # inizialize empty table to be filled with clusters\n table = pd.DataFrame()\n\n # choose the algorithm\n if arguments['clusterdist'] is True:\n print(\"ClusterScan is running with clusterdist...\")\n\n # movq: arguments should be changed\n table = do_clusterdist(l,pdtable, table, arguments)\n else:\n print(\"ClusterScan is running with clustermean...\")\n\n table = do_clustermean(l, pdtable, table, arguments)\n\n if table.empty:\n print(\"ClusterScan didn't found any cluster!\")\n exit()\n else:\n pass\n\n # generate cluster table and filter it\n table.columns = [\"chr\", \"start\", \"end\", \"n_features\", \"category\"]\n table = table.sort_values([\"category\", \"chr\"], ascending=[True, True])\n table = table[table[\"n_features\"] >= int(arguments['--nf'])]\n table = table.sort_values(by=[\"category\"], ascending=[True])\n # table[\"cluster_id\"] = range(1, len(table) + 1)\n table[\"cluster_id\"] = [\"C\"+str(i) for i in range(1, len(table) + 1)]\n # get the total number of clusters\n c = table.shape[0]\n\n # generate output of clusters in BED format\n bedTbl = table.copy()\n bedTbl[\"strand\"] = \"+\"\n bedTbl = bedTbl.iloc[:, [0, 1, 2, 5, 3, 6, 4]]\n bed = pybedtools.BedTool().from_dataframe(bedTbl).sort()\n\n # generate table of features by intersect feature with clusters\n all_features_bed = pybedtools.BedTool().from_dataframe(all_features)\n clusters = pybedtools.BedTool().from_dataframe(table)\n features = all_features_bed.intersect(clusters, wb=True)\n features = pd.read_table(features.fn, header=None, dtype={0: str, 7: str})\n cl_features = features[features[6] == features[11]]\n cl_features = cl_features.iloc[:, [0, 1, 2, 3, 4, 5, 12, 11]]\n cl_features.columns = [\"chr\", \"start\", \"end\", \"name\", \"score\",\n \"strand\", \"cluster_id\", \"category\"]\n cl_features.drop('score', axis=1, inplace=True)\n # generate table of bystanders\n bystanders = features[features[6] != features[11]]\n\n # comment if you want to search for bystanders using only 1 category\n #if len(l) == 1:\n # bystanders = pd.DataFrame()\n #else:\n # pass\n\n # control for bystander = 0 (when program run with 1 category)\n if bystanders.empty:\n table = table.iloc[:, [5, 4, 0, 1, 2, 3]]\n table[\"n_bystanders\"] = 0\n else:\n bystanders = bystanders.iloc[:, [0, 1, 2, 3, 4, 5, 12, 11]]\n bystanders.columns = [\"chr\", \"start\", \"end\", \"name\", \"score\",\n \"strand\", \"cluster_id\", \"category\"]\n # prevent bystanders with 2+ different categories to be counted twice\n bystanders = bystanders.drop_duplicates(['name', \"cluster_id\"])\n # prevent features with 2+ different categories to be bystanders in theyr clusters\n bs_merge = pd.merge(bystanders, cl_features, how='outer', indicator=True)\n bystanders = bs_merge.ix[bs_merge._merge == 'left_only']\n #bystanders = bystanders.drop(bystanders.columns[8], axis=1)\n bystanders = bystanders.drop(bystanders.columns[[4, 8]], axis=1)\n # count bystanders number\n bs_count = bystanders.groupby(\"cluster_id\").count().reset_index()\n bs_count = bs_count.iloc[:, [0, 1]]\n bs_count.columns = [\"cluster_id\", \"n_bystanders\"]\n table = table.merge(bs_count, on=\"cluster_id\", how='outer')\n table = table.iloc[:, [5, 4, 0, 1, 2, 3, 6]]\n table[\"n_bystanders\"].fillna(0, inplace=True)\n table[\"n_bystanders\"] = table[\"n_bystanders\"].astype(int)\n bystanders[\"start\"] += 1\n\n\n # generate summary table\n summary = table.drop(table.columns[[0, 2, 3, 4]], axis=1)\n # calculate total number of clusters per-category\n n_clusters = summary.groupby(\"category\").count().reset_index()\n n_clusters = n_clusters.drop(\"n_bystanders\", axis=1)\n n_clusters.columns = [\"category\", \"n_clusters\"]\n # calculate total number of features and bystanders per-category\n n_ft_bs = summary.groupby(\"category\").sum().reset_index()\n n_ft_bs.columns = [\"category\", \"n_ft\", \"n_bs\"]\n # calculate maximum number of features and bystander in cluster\n max_ft_bs = summary.groupby(\"category\").max().reset_index()\n max_ft_bs.columns = [\"category\", \"max_ft\", \"max_bs\"]\n # calculate minimum number of features and bystander in cluster\n min_ft_bs = summary.groupby(\"category\").min().reset_index()\n min_ft_bs.columns = [\"category\", \"min_ft\", \"min_bs\"]\n # add category description if an info file is provided\n if arguments['--info'] is None:\n summary = n_clusters.merge(n_ft_bs, on=\"category\").merge(max_ft_bs, on=\"category\").merge(min_ft_bs, on=\"category\")\n else:\n desc = pd.read_table(arguments['--info'], header=None)\n desc.columns = [\"category\", \"description\"]\n summary = n_clusters.merge(n_ft_bs, on=\"category\").merge(max_ft_bs, on=\"category\").merge(min_ft_bs, on=\"category\").merge(desc, on=\"category\")\n\n # assign file names and save tables as result\n if not os.path.exists(arguments['--output']):\n os.makedirs(arguments['--output'])\n\n if arguments['--analysis'] is None:\n feat_name = os.path.join(arguments['--output'], 'features.tsv')\n byst_name = os.path.join(arguments['--output'], 'bystanders.tsv')\n clus_name = os.path.join(arguments['--output'], 'clusters.tsv')\n summ_name = os.path.join(arguments['--output'], 'summary.tsv')\n bed_name = os.path.join(arguments['--output'], 'clusters.bed')\n plot_name = os.path.join(arguments['--output'], 'distribution.pdf')\n else:\n feat_name = os.path.join(arguments['--output'], arguments['--analysis']+'_features.tsv')\n byst_name = os.path.join(arguments['--output'], arguments['--analysis']+'_bystanders.tsv')\n clus_name = os.path.join(arguments['--output'], arguments['--analysis']+'_clusters.tsv')\n summ_name = os.path.join(arguments['--output'], arguments['--analysis']+'_summary.tsv')\n bed_name = os.path.join(arguments['--output'], arguments['--analysis']+'_clusters.bed')\n plot_name = os.path.join(arguments['--output'], arguments['--analysis']+'_distribution.pdf')\n\n cl_features[\"start\"] += 1\n table[\"start\"] += 1\n\n if bystanders.empty:\n table[\"n_bystanders\"] = 'NA'\n summary[\"n_bs\"] = 'NA'\n summary[\"max_bs\"] = 'NA'\n summary[\"min_bs\"] = 'NA'\n\n cl_features.to_csv(feat_name, sep='\\t', header=True, index=False)\n bystanders.to_csv(byst_name, sep='\\t', header=True, index=False)\n table.to_csv(clus_name, sep='\\t', header=True, index=False)\n summary.to_csv(summ_name, sep='\\t', header=True, index=False)\n\n if arguments['--analysis'] is None:\n bed.saveas(bed_name, trackline='track name=\"%s\" description=\"chr start end cluster_id n_features strand category\"' % (arguments['FEATURES']))\n else:\n bed.saveas(bed_name, trackline='track name=\"%s\" description=\"chr start end cluster_id n_features strand category\"' % (arguments['--analysis']))\n\n # plot a duistribution for top 10 clusters (per n of features)\n #rpy2_plotter(summary, table, plot_name)\n\n if arguments['--singletons'] is True:\n print(\"Singletons identification has been launched...\")\n singletons = pd.DataFrame()\n singletons = do_singletons(l, pdtable, bedTbl, singletons, arguments)\n if singletons.empty:\n print(\"ClusterScan didn't found any singleton!\")\n else:\n if arguments['--analysis'] is None:\n st_name = os.path.join(arguments['--output'], 'singletons.tsv')\n else:\n st_name = os.path.join(arguments['--output'], arguments['--analysis']+'_singletons.tsv')\n\n singletons.columns = [\"chr\", \"start\", \"end\", \"name\", \"score\",\n \"strand\", \"category\"]\n singletons.drop('score', axis=1, inplace=True)\n singletons[\"start\"] += 1\n singletons.to_csv(st_name, sep='\\t', header=True, index=False)\n else:\n pass\n\n print('\\n%s\\t%s' % (\"Total number of unique features scanned:\", len(n)))\n print('%s\\t%s' % (\"Total number of unique categories scanned:\", len(l)))\n print('%s\\t%s\\n' % (\"Total number of clusters found:\", c))\n\n\n# program execution\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='ClusterScan 0.2.1')\n # print arguments\n main()\n print(\"--- %s seconds ---\" % (int(round(time.time() - start_time, 0))))\n", "id": "8981665", "language": "Python", "matching_score": 2.7058510780334473, "max_stars_count": 0, "path": "bin/clusterscan.py" }, { "content": "#!/usr/bin/env python\n\nimport pandas as pd\nimport argparse as ap\nimport subprocess as sp\nimport logging\nimport os\n\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\n\nparser = ap.ArgumentParser()\nparser.add_argument('-p', '--peakfile', required = True,\n help = 'narrowPeak file containing initiation site coordinates')\nparser.add_argument('-n', '--sitespercluster', default = 2, type = int,\n help = 'minimum number of sites per identified cluster')\nparser.add_argument('--clusterScan', required = True,\n help = 'path to clusterscan.py script')\nparser.add_argument('-o', '--outputPrefix', required = True,\n help = 'prefix for the output file')\nargs = parser.parse_args()\n\n# reading in MACS output\nlogging.info('generating required annotation file')\nbed = pd.read_csv(args.peakfile, sep = '\\t', header = None, usecols = [0, 1, 2, 3],\n names = ['chr', 'start', 'end', 'name'])\nbed['featurename'] = 'ispeak'\n\n# generating required annotation file\nannotationFile = args.peakfile.split('.')[0] + '.csa'\nwith open(annotationFile, 'w') as csa:\n csa.write('\\t'.join(['peakid', 'featuretype']) + '\\n')\n bed[['name', 'featurename']].to_csv(csa, sep = '\\t', index = False, mode = 'a', header = False)\n\n# calculating pairwise distances between MACS peaks\nlogging.info('computing median distance')\ndistances = []\nfor group in bed.groupby('chr'):\n frame = group[1]\n for i in frame.index[:-1]:\n distances.append(frame.at[i + 1, 'start'] - frame.at[i, 'end'])\n\nmedian = int(pd.Series(distances).median())\nlogging.info('median distance between peaks is %d' % median)\n\nlogging.info('clustering with clusterscan.py clusterdist {0} {1} -a {2} -d {3} -n {4}'.format(\n args.peakfile,\n annotationFile,\n args.outputPrefix,\n median,\n args.sitespercluster))\n\nsubprocess = sp.Popen('{0} clusterdist {1} {2} -a {3} -d {4} -n {5}'.format(\n args.clusterScan,\n args.peakfile,\n annotationFile,\n args.outputPrefix,\n median,\n args.sitespercluster),\n shell = True)\nsubprocess.wait()\n\n# processing results\nresultbed = pd.read_csv(args.outputPrefix + '_clusters.bed', sep = '\\t', header = None, skiprows = 1)\nresultbed.columns = ['chr', 'start', 'end', 'name', 'nsites', 'strand', 'featurename']\nresultbed.loc[:, 'name'] = ['iniZone_{0}'.format(i) for i in range(1, len(resultbed) + 1)]\nresultbed.to_csv(args.outputPrefix + '_clusters.bed', header = None, index = None, sep = '\\t')\n", "id": "614523", "language": "Python", "matching_score": 1.0248340368270874, "max_stars_count": 0, "path": "bin/clusterinitsites.py" } ]
2.705851
mareep-raljodid
[ { "content": "'''\nMIT License\n\nCopyright (c) 2019 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport numpy as np\nimport os\nimport cv2 as cv2\nimport matplotlib.pyplot as plt\nimport random\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import RMSprop\n\n# Download the ASL dateset from here: https://drive.google.com/open?id=1apmXyY8OQx68b4-2G9Mttgrml7bldWfq\n# Don't forget to update the path on the below Dir variable\n\nDir = \"asl\"\ncatg = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\n\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\n\"28\",\"29\",\"30\",\"31\",\"32\",\"33\",\"34\",\"35\"]\n\ntrain = []\nsize = 200\ndef create_tr():\n for c in catg:\n path=os.path.join(Dir, c)\n class_nnn= catg.index(c)\n for img in os.listdir(path):\n imgax = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE) \n imgar = cv2.resize(imgax, (size, size))\n train.append([imgar, class_nnn])\n\n\ncreate_tr()\n\nprint(\"Number of elements in training set: \", len(train))\nrandom.shuffle(train)\n\ntX = []\ntY = []\n\nfor features, label in train:\n tX.append(features)\n tY.append(label)\n \ntX = np.array(tX).reshape(-1, size, size, 1) #1 not 3\ntX = tX / 255.0\n\nclass myCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs={}):\n if(logs.get('acc')>0.99):\n print(\"\\nReached 99% accuracy so cancelling training!\")\n self.model.stop_training = True\n\n# Below are the callBacls for saving the model and 99% accuracy Epocs Callback\n\n\ncallbacks1 = myCallback()\ncheckpoint_path = \"training_data/cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\ncp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size = [3,3], padding = 'same', activation = 'relu', input_shape = tX.shape[1:]),\n tf.keras.layers.Conv2D(64, kernel_size = [3,3], padding = 'same', activation = 'relu'),\n tf.keras.layers.MaxPool2D(pool_size = [3,3]),\n tf.keras.layers.Conv2D(128, kernel_size = [3,3], padding = 'same', activation = 'relu'),\n tf.keras.layers.MaxPooling2D(3,3),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2), \n tf.keras.layers.Conv2D(32, (3,3), activation='relu'), \n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Dense(1024, activation = 'relu'),\n tf.keras.layers.Dense(512, activation = 'relu'),\n tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(1013, activation='relu'), \n tf.keras.layers.Dense(36, activation='softmax') \n ])\n\n\n\nmodel.summary()\nmodel.compile(optimizer=RMSprop(lr=0.001),\n loss='sparse_categorical_crossentropy',\n metrics = ['acc'])\n\nmodel.fit(tX, tY, batch_size=25, validation_split=0.10, epochs = 10, callbacks=[callbacks1, cp_callback])\n\n# This code comented out below is for restoration process of the Model trained by me. \n# You're welcome :) \n\n'''\nmodel.load_weights(\"Pretrained_Model/cp.ckpt\")\nloss,acc = model.evaluate(tX, tY)\nprint(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n'''\n\n# Now comes the fun part!! \n# Predict the hand sign by editing the path_to_pic valiable below, \n# don't forget that darker the backgroung, more accurate prediction!!\n\nzzx = \"\"\nwhile zzx != \"exit\":\n\n path_to_pic = input(\"Enter the path to the image you would like to predict: \")\n imgax4 = cv2.imread(os.path.join(path_to_pic), cv2.IMREAD_GRAYSCALE)\n imgar4 = cv2.resize(imgax4, (size, size))\n\n ttX = []\n for feature in imgar4:\n ttX.append(feature)\n \n ttX = np.array(ttX).reshape(-1, size, size, 1)\n eer = model.predict(ttX)\n z = np.where(eer == 1)\n if z[1]>9:\n prediction = chr((z[1]-10) + 65)\n print(prediction)\n else:\n prediction = int(z[1])\n print(prediction)\n zzx = input(\"Enter exit to stop or return to keep predicting.\")\n \n\n", "id": "5584514", "language": "Python", "matching_score": 7.244522571563721, "max_stars_count": 2, "path": "SL_independent.py" }, { "content": "'''\nMIT License\n\nCopyright (c) 2019 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\nimport string\nimport numpy as np\nimport os\nimport cv2 as cv2\nimport matplotlib.pyplot as plt\nimport random\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras.optimizers import RMSprop\n\n\nsize = 200\npath_to_pic = \"photo.jpg\"\nimgax4 = cv2.imread(os.path.join(path_to_pic), cv2.IMREAD_GRAYSCALE)\nimgar4 = cv2.resize(imgax4, (size, size))\n\nttX = []\nfor feature in imgar4:\n ttX.append(feature)\n \nttX = np.array(ttX).reshape(-1, size, size, 1)\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, kernel_size = [3,3], padding = 'same', activation = 'relu', input_shape = ttX.shape[1:]),\n tf.keras.layers.Conv2D(64, kernel_size = [3,3], padding = 'same', activation = 'relu'),\n tf.keras.layers.MaxPool2D(pool_size = [3,3]),\n tf.keras.layers.Conv2D(128, kernel_size = [3,3], padding = 'same', activation = 'relu'),\n tf.keras.layers.MaxPooling2D(3,3),\n tf.keras.layers.Conv2D(64, (3,3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2,2), \n tf.keras.layers.Conv2D(32, (3,3), activation='relu'), \n tf.keras.layers.MaxPooling2D(2,2),\n tf.keras.layers.Dense(1024, activation = 'relu'),\n tf.keras.layers.Dense(512, activation = 'relu'),\n tf.keras.layers.Flatten(), \n tf.keras.layers.Dense(1013, activation='relu'), \n tf.keras.layers.Dense(36, activation='softmax') \n ])\n \nmodel.summary()\nmodel.compile(optimizer=RMSprop(lr=0.001),\n loss='sparse_categorical_crossentropy',\n metrics = ['acc'])\nmodel.load_weights(\"Pretrained_Model/cp.ckpt\")\n#loss,acc = model.evaluate(tX, tY)\n#print(\"Restored model, accuracy: {:5.2f}%\".format(100*acc))\n\nzzx = \"\"\nwhile zzx != \"exit\":\n path_to_pic = input(\"Enter the path to the image you would like to predict: \")\n imgax4 = cv2.imread(os.path.join(path_to_pic), cv2.IMREAD_GRAYSCALE)\n imgar4 = cv2.resize(imgax4, (size, size))\n\n ttX = []\n for feature in imgar4:\n ttX.append(feature)\n \n ttX = np.array(ttX).reshape(-1, size, size, 1) \n eer = model.predict(ttX)\n z = np.where(eer == 1)\n if z[1]>9:\n prediction = chr((z[1]-10) + 65)\n print(prediction)\n else:\n prediction = int(z[1])\n print(prediction)\n zzx = input(\"Enter exit to stop or return to keep predicting.\")\n\n", "id": "4421143", "language": "Python", "matching_score": 0.2801467180252075, "max_stars_count": 2, "path": "SL_quickPredict.py" }, { "content": "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: beam_search_ops.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\nfrom tensorflow.python.util import dispatch as _dispatch\nfrom tensorflow.python.util.tf_export import tf_export\nfrom tensorflow.python.util.tf_export import kwarg_only as _kwarg_only\nfrom tensorflow.tools.docs import doc_controls as _doc_controls\n\n\n@_dispatch.add_dispatch_list\n@tf_export('gather_tree')\ndef gather_tree(step_ids, parent_ids, max_sequence_lengths, end_token, name=None):\n r\"\"\"Calculates the full beams from the per-step ids and parent beam ids.\n\n On CPU, if an out of bound parent id is found, an error is returned.\n On GPU, if an out of bound parent id is found, a -1 is stored in the\n corresponding output value and the execution for that beam returns early.\n\n For a given beam, past the time step containing the first decoded `end_token`\n all values are filled in with `end_token`.\n\n TODO(ebrevdo): fill in the remainder of this docstring.\n\n Args:\n step_ids: A `Tensor`. Must be one of the following types: `int32`.\n `[max_time, batch_size, beam_width]`.\n parent_ids: A `Tensor`. Must have the same type as `step_ids`.\n `[max_time, batch_size, beam_width]`.\n max_sequence_lengths: A `Tensor` of type `int32`. `[batch_size]`.\n end_token: A `Tensor`. Must have the same type as `step_ids`. `[]`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `step_ids`.\n `[max_time, batch_size, beam_width]`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"GatherTree\", name, _ctx._post_execution_callbacks, step_ids,\n parent_ids, max_sequence_lengths, end_token)\n return _result\n except _core._FallbackException:\n try:\n return gather_tree_eager_fallback(\n step_ids, parent_ids, max_sequence_lengths, end_token, name=name,\n ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except (TypeError, ValueError):\n result = _dispatch.dispatch(\n gather_tree, step_ids=step_ids, parent_ids=parent_ids,\n max_sequence_lengths=max_sequence_lengths,\n end_token=end_token, name=name)\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\n return result\n raise\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n try:\n _, _, _op = _op_def_lib._apply_op_helper(\n \"GatherTree\", step_ids=step_ids, parent_ids=parent_ids,\n max_sequence_lengths=max_sequence_lengths,\n end_token=end_token, name=name)\n except (TypeError, ValueError):\n result = _dispatch.dispatch(\n gather_tree, step_ids=step_ids, parent_ids=parent_ids,\n max_sequence_lengths=max_sequence_lengths,\n end_token=end_token, name=name)\n if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:\n return result\n raise\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op._get_attr_type(\"T\"))\n _execute.record_gradient(\n \"GatherTree\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef GatherTree(step_ids, parent_ids, max_sequence_lengths, end_token, name=None):\n return gather_tree(step_ids=step_ids, parent_ids=parent_ids, max_sequence_lengths=max_sequence_lengths, end_token=end_token, name=name)\nGatherTree.__doc__ = gather_tree.__doc__\nGatherTree = _doc_controls.do_not_generate_docs(_kwarg_only(GatherTree))\ntf_export(\"raw_ops.GatherTree\")(GatherTree)\n\n\ndef gather_tree_eager_fallback(step_ids, parent_ids, max_sequence_lengths, end_token, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function gather_tree\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n _attr_T, _inputs_T = _execute.args_to_matching_eager([step_ids, parent_ids, end_token], _ctx)\n (step_ids, parent_ids, end_token) = _inputs_T\n max_sequence_lengths = _ops.convert_to_tensor(max_sequence_lengths, _dtypes.int32)\n _inputs_flat = [step_ids, parent_ids, max_sequence_lengths, end_token]\n _attrs = (\"T\", _attr_T)\n _result = _execute.execute(b\"GatherTree\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"GatherTree\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n_ops.RegisterShape(\"GatherTree\")(None)\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"GatherTree\"\n# input_arg {\n# name: \"step_ids\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"parent_ids\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"max_sequence_lengths\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"end_token\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"beams\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\nt\\n\\nGatherTree\\022\\r\\n\\010step_ids\\\"\\001T\\022\\017\\n\\nparent_ids\\\"\\001T\\022\\030\\n\\024max_sequence_lengths\\030\\003\\022\\016\\n\\tend_token\\\"\\001T\\032\\n\\n\\005beams\\\"\\001T\\\"\\020\\n\\001T\\022\\004type:\\005\\n\\0032\\001\\003\")\n", "id": "3989362", "language": "Python", "matching_score": 8.604758262634277, "max_stars_count": 2, "path": "env_name/lib/python3.6/site-packages/tensorflow_core/contrib/seq2seq/ops/gen_beam_search_ops.py" }, { "content": "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: ragged_math_ops.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\nfrom tensorflow.python.util import dispatch as _dispatch\nfrom tensorflow.python.util.tf_export import tf_export\nfrom tensorflow.python.util.tf_export import kwarg_only as _kwarg_only\nfrom tensorflow.tools.docs import doc_controls as _doc_controls\n\n\n_ragged_range_outputs = [\"rt_nested_splits\", \"rt_dense_values\"]\n_RaggedRangeOutput = _collections.namedtuple(\n \"RaggedRange\", _ragged_range_outputs)\n\n\ndef ragged_range(starts, limits, deltas, Tsplits=_dtypes.int64, name=None):\n r\"\"\"Returns a `RaggedTensor` containing the specified sequences of numbers.\n\n \n Returns a `RaggedTensor` `result` composed from `rt_dense_values` and\n `rt_nested_splits`, such that\n `result[i] = range(starts[i], limits[i], deltas[i])`.\n\n ```python\n >>> (rt_nested_splits, rt_dense_values) = gen_ragged_ops.ragged_range(\n ... starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)\n >>> result = ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)\n >>> print result.eval().tolist()\n [[2], # result[0] = range(2, 3)\n [], # result[1] = range(5, 5)\n [8, 9, 10, 11]] # result[2] = range(8, 12)\n ```\n\n The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.\n The vector inputs must all have the same size. Scalar inputs are broadcast\n to match the size of the vector inputs.\n\n Args:\n starts: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`, `int32`, `int64`.\n The starts of each range.\n limits: A `Tensor`. Must have the same type as `starts`.\n The limits of each range.\n deltas: A `Tensor`. Must have the same type as `starts`.\n The deltas of each range.\n Tsplits: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A tuple of `Tensor` objects (rt_nested_splits, rt_dense_values).\n\n rt_nested_splits: A `Tensor` of type `Tsplits`.\n rt_dense_values: A `Tensor`. Has the same type as `starts`.\n \"\"\"\n _ctx = _context._context or _context.context()\n if _ctx is not None and _ctx._thread_local_data.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._thread_local_data.device_name,\n \"RaggedRange\", name, _ctx._post_execution_callbacks, starts, limits,\n deltas, \"Tsplits\", Tsplits)\n _result = _RaggedRangeOutput._make(_result)\n return _result\n except _core._FallbackException:\n try:\n return ragged_range_eager_fallback(\n starts, limits, deltas, Tsplits=Tsplits, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if Tsplits is None:\n Tsplits = _dtypes.int64\n Tsplits = _execute.make_type(Tsplits, \"Tsplits\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"RaggedRange\", starts=starts, limits=limits, deltas=deltas,\n Tsplits=Tsplits, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op._get_attr_type(\"T\"), \"Tsplits\",\n _op._get_attr_type(\"Tsplits\"))\n _execute.record_gradient(\n \"RaggedRange\", _inputs_flat, _attrs, _result, name)\n _result = _RaggedRangeOutput._make(_result)\n return _result\n\ndef RaggedRange(starts, limits, deltas, Tsplits=_dtypes.int64, name=None):\n return ragged_range(starts=starts, limits=limits, deltas=deltas, Tsplits=Tsplits, name=name)\nRaggedRange.__doc__ = ragged_range.__doc__\nRaggedRange = _doc_controls.do_not_generate_docs(_kwarg_only(RaggedRange))\ntf_export(\"raw_ops.RaggedRange\")(RaggedRange)\n\n\ndef ragged_range_eager_fallback(starts, limits, deltas, Tsplits=_dtypes.int64, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function ragged_range\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if Tsplits is None:\n Tsplits = _dtypes.int64\n Tsplits = _execute.make_type(Tsplits, \"Tsplits\")\n _attr_T, _inputs_T = _execute.args_to_matching_eager([starts, limits, deltas], _ctx, _dtypes.int32)\n (starts, limits, deltas) = _inputs_T\n _inputs_flat = [starts, limits, deltas]\n _attrs = (\"T\", _attr_T, \"Tsplits\", Tsplits)\n _result = _execute.execute(b\"RaggedRange\", 2, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"RaggedRange\", _inputs_flat, _attrs, _result, name)\n _result = _RaggedRangeOutput._make(_result)\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"RaggedRange\"\n# input_arg {\n# name: \"starts\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"limits\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"deltas\"\n# type_attr: \"T\"\n# }\n# output_arg {\n# name: \"rt_nested_splits\"\n# type_attr: \"Tsplits\"\n# }\n# output_arg {\n# name: \"rt_dense_values\"\n# type_attr: \"T\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# default_value {\n# type: DT_INT32\n# }\n# allowed_values {\n# list {\n# type: DT_BFLOAT16\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"Tsplits\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\236\\001\\n\\013RaggedRange\\022\\013\\n\\006starts\\\"\\001T\\022\\013\\n\\006limits\\\"\\001T\\022\\013\\n\\006deltas\\\"\\001T\\032\\033\\n\\020rt_nested_splits\\\"\\007Tsplits\\032\\024\\n\\017rt_dense_values\\\"\\001T\\\"\\030\\n\\001T\\022\\004type\\032\\0020\\003:\\t\\n\\0072\\005\\016\\001\\002\\003\\t\\\"\\033\\n\\007Tsplits\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\")\n", "id": "836512", "language": "Python", "matching_score": 7.057945251464844, "max_stars_count": 2, "path": "env_name/lib/python3.6/site-packages/tensorflow_core/python/ops/gen_ragged_math_ops.py" } ]
7.151234
arkalon76
[ { "content": "'''\nMIT License\n\nCopyright (c) 2017 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport os\nimport sys\nimport argparse\nimport re\nimport configparser\n\n\nfrom guessit import guessit\n\n\nclass bcolors:\n \"\"\" Allows for us to give some colour to the output text\n \"\"\"\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\nACCEPTED_EXTENTIONS = ['.mkv', '.mp4', '.avi', '.mov']\n\n\ndef printInfoMessage(message):\n \"\"\" Prints out a information message for the user\n\n Parameters:\n -----------\n message: Message to be displayed to the user\n \"\"\"\n print(bcolors.HEADER + message + bcolors.ENDC)\n\n\ndef printFailureMessage(message):\n \"\"\" Prints out a error message for the user\n\n Parameters:\n -----------\n message: Message to be displayed to the user\n \"\"\"\n print(bcolors.FAIL + ' ' + 'Warning: ' + bcolors.ENDC + message)\n\n\ndef parseFiles(rootdir):\n \"\"\" Will walk all the files under rootdir and, if valid, rename them.\n If Dry run flag is set then we will only print out, not actually rename.\n\n Parameters:\n -----------\n rootdir: The directory from where we start the walk.\n \"\"\"\n # First pass. Rename files\n # We start from root and work ourself down the subdirectories.\n printInfoMessage('= Working my way through the files =')\n for dir_path, subpaths, files in os.walk(rootdir):\n for file in files:\n if isValidPath(dir_path + '/' + file):\n renameFile(dir_path, file)\n else:\n # Let's assume this isn't a folder we are intrested in\n printFailureMessage(file + ' <== What is this file? Is it really a movie?')\n\n printInfoMessage('\\n= Working my way through the folders =')\n for dir_path, subpaths, files in os.walk(rootdir):\n for path in subpaths:\n # Only match on directories that start with a Word.\n # This to avoid some system directories (Like .git, @EAB and so on)\n if isValidPath(path):\n renamePath(dir_path, path)\n else:\n # Let's assume this isn't a folder we are intrested in\n printFailureMessage(file + ' <== Is this really a movie folder?')\n\n\ndef isValidPath(path):\n \"\"\" Validates a path to make sure that it can be converted to a Title (year) format.\n\n Parameters:\n -----------\n path: The full path to the folder or file\n \"\"\"\n if os.path.isfile(path):\n # Extract the filename from the path\n filename = os.path.basename(path)\n # Extract the extention from the filename\n extension = os.path.splitext(filename)[1].lower()\n # Let's see if we can get the title and year from the filename\n fileguess = guessit(filename)\n if extension in ACCEPTED_EXTENTIONS and ('title' in fileguess) and ('year' in fileguess):\n return True\n else:\n return False\n else:\n foldername = os.path.basename(path)\n pathguess = guessit(foldername)\n if re.match('^\\W.*', foldername) is None and ('title' in pathguess) and ('year' in pathguess):\n return True\n else:\n return False\n\n\ndef buildPlexMovieName(guessDict):\n return guessDict['title'] + ' (' + str(guessDict['year']) + ')'\n\n# Stephen.Colbert.2017.04.21.Rosario.Dawson.720p.HDTV.x264-SORNY[rarbg].mkv\ndef buildPlexTVShowName(guessDict):\n if 'season' in guessDict:\n return guessDict['title'] + ' - ' + 'S' + str(guessDict['season']) + 'E' + str(guessDict['episode'])\n elif 'year' in guessDict:\n title = guessDict['title']\n year = ' (' + str(guessDict['year']) + ') - '\n season = 'S' + str(guessDict['season']) if 'season' in guessDict else ''\n episode = 'E' + str(guessDict['episode']) if 'episode' in guessDict else ''\n ep_title = ' - ' + guessDict['episode_title'] if 'episode_title' in guessDict else ''\n return title + year + season + episode + ep_title\n elif 'date' in guessDict:\n title = guessDict['title'] + ' - '\n date = str(guessDict['date'])\n season = ' - ' + 'S' + str(guessDict['season']) if 'season' in guessDict else ''\n episode = 'E' + str(guessDict['episode']) if 'episode' in guessDict else ''\n ep_title = ' - ' + guessDict['episode_title'] if 'episode_title' in guessDict else ''\n return title + date + season + episode + ep_title\n\n\nnamebuilder = {\n 'movie': buildPlexMovieName,\n 'episode': buildPlexTVShowName,\n}\n\n\ndef renameFile(dir_path, file):\n \"\"\" Renames a file to match a standard Plex format { Title (year) }.\n If the Dry run flag is set then we will just print the text but not make the move.\n\n Parameters:\n -----------\n dir_path: Full path to the file\n file: File name\n \"\"\"\n\n # Extract the extention of the file so we can pick the ones we want\n extension = os.path.splitext(file)[1].lower()\n myguess = guessit(file)\n print(' ' + file + bcolors.OKGREEN + ' ==> ' + bcolors.ENDC + namebuilder[myguess['type']](myguess) + extension)\n if not DRYRUN:\n new_name = namebuilder[myguess['type']](myguess)\n src = dir_path + '/' + file\n dest = dir_path + '/' + new_name + extension\n os.rename(src, dest)\n\n\ndef renamePath(dir_path, path):\n \"\"\" Renames a folder to match a standard Plex format { Title (year) }.\n If the Dry run flag is set then we will just print the text but not make the move.\n\n Parameters:\n -----------\n dir_path: Full path to the related folder\n path: Folder name\n \"\"\"\n new_name = guessit(path)['title'] + ' (' + str(guessit(path)['year']) + ')'\n src = dir_path + '/' + path\n dest = dir_path + '/' + new_name\n print(' ' + src + bcolors.OKGREEN + ' ==> ' + bcolors.ENDC + dest)\n\n if not DRYRUN:\n os.rename(src, dest)\n\n\ndef main():\n \"\"\" Here is where the magic happens\n \"\"\"\n\n # Setup the Argument Parser\n parser = argparse.ArgumentParser(description='Rename files and folders to fit Plex')\n parser.add_argument('media', help='Where your mediafiles are')\n parser.add_argument('-d', '--dryrun', action='store_true', help='Print out the changes without actually doing them')\n args = parser.parse_args()\n\n # Extract the rootpath\n rootdir = args.media\n\n # Set if we are doing a dry run our not\n global DRYRUN\n DRYRUN = args.dryrun\n\n # Warn the user if it's a dry run\n if DRYRUN:\n print('\\n')\n print(bcolors.UNDERLINE + 'NOTE: This is a dry run!' + bcolors.ENDC)\n print('\\n')\n\n # Walk through the root dir and look at all the files\n parseFiles(rootdir)\n # Walk through the root dir and look at all the folders\n # parseFolders(rootdir)\n", "id": "6640361", "language": "Python", "matching_score": 2.2192089557647705, "max_stars_count": 1, "path": "salmiak/__init__.py" }, { "content": "'''\nMIT License\n\nCopyright (c) 2017 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nfrom flask import Flask, render_template, url_for\nfrom flask import request\nimport requests_cache\nimport requests\nimport json\nimport codecs\nimport datetime\nimport pymongo\n\nimport time\n\nclass TorrentFile(object):\n \"\"\"docstring for TorrentFile.\"\"\"\n\n CATEGORY_MOVIE_H264_1080P = 44\n CATEGORY_MOVIE_H264_720P = 45\n CATEGORY_MOVIE_H264_3D = 47\n CATEGORY_MOVIE_FULL_BD = 42\n CATEGORY_MOVIE_BD_REMUX = 46\n\n def __init__(self, torrent_json):\n self.title = torrent_json['title']\n self.category = torrent_json['category']\n self.magnet = torrent_json['download']\n self.seeder_count = torrent_json['seeders']\n self.info_page = torrent_json['info_page']\n\n\n\nclass VideoFile(object):\n\n def __init__(self, mediainfo_json):\n\n self.high_quality_codecs = ['AVC','HEVC','VC-1']\n self.high_quality_resolution = 1080\n self.high_quality_bitrate = 20000000\n\n if 'other_overall_bit_rate' in mediainfo_json['tracks'][0]:\n self.video_bitrate_txt = mediainfo_json['tracks'][0]['other_overall_bit_rate'][0]\n else:\n self.video_bitrate_txt = '0'\n\n if 'overall_bit_rate' in mediainfo_json['tracks'][0]:\n self.video_bitrate_int = int(mediainfo_json['tracks'][0]['overall_bit_rate'])\n else:\n self.video_bitrate_int = 11\n\n self.video_resolution = mediainfo_json['tracks'][1]['sampled_height']\n self.movie_title = mediainfo_json['tracks'][0]['file_name']\n self.video_codec = mediainfo_json['tracks'][1]['format']\n self.video_profile = mediainfo_json['tracks'][1]['format_profile']\n try:\n self.video_imdb_id = mediainfo_json['quick_facts']['imdb_id']\n except KeyError:\n self.video_imdb_id = None\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__,\n self.movie_title,\n self.video_bitrate_txt)\n\n def getVideoBitrate(self):\n return self.video_bitrate_txt\n\n def getVideoResolution(self):\n return self.video_resolution\n\n def getMovieTitle(self):\n return self.movie_title\n\n def getVideoCodec(self):\n return self.video_codec\n\n def getVideoProfile(self):\n return self.video_profile\n\n def getIMDB_ID(self):\n return self.video_imdb_id\n\n def isOfGoodQuality(self):\n if int(self.video_resolution) < self.high_quality_resolution:\n return False\n elif not self.video_codec in self.high_quality_codecs:\n return False\n elif self.video_bitrate_int < self.high_quality_bitrate:\n return False\n else:\n return True\n\ndef get_Torrent_List_By_IMDB_ID(imdb_id):\n token_response = requests.get('https://torrentapi.org/pubapi_v2.php?get_token=get_token')\n token_json = json.loads(token_response.text)\n\n search_uri = 'https://torrentapi.org/pubapi_v2.php?category=42;46&format=json_extended&mode=search&token=' + token_json['token'] + '&search_imdb=' + imdb_id\n print(search_uri)\n\n if token_response.from_cache == False: #Let's make sure we don't call to fast if we just got a new key\n print('We just got a new token, Lets wait for 3 secs before we move on')\n time.sleep(2)\n\n response = requests.get(search_uri)\n print(\"Was that API request cached? \",response.from_cache)\n json_resp = json.loads(response.text)\n if 'error' in json_resp:\n return json_resp\n torrent_list = []\n for torrent in json_resp['torrent_results']:\n torrent_list.append(TorrentFile(torrent))\n return torrent_list\n\ndef getVideoRate(VideoFile):\n return VideoFile.video_bitrate_int;\n\ndef getMovieListFromDB():\n result = db.Movies.find()\n formated_result = list(result)\n video_list = []\n for movie in formated_result:\n video_list.append(VideoFile(movie))\n sorted_list = sorted(video_list, key=getVideoRate)\n return sorted_list\n\napplication = Flask(__name__)\ncache_expire_time = datetime.timedelta(minutes=15)\nrequests_cache.install_cache(expire_after=cache_expire_time)\nrequests_cache.clear()\nclient = pymongo.MongoClient('ds137261.mlab.com',37261)\ndb = client['bacon_2017']\ndb.authenticate('bacon','F463Rlund')\n# app.config['MONGO_DBNAME'] = 'something'\n# app.config['MONGO_URI'] = 'URI'\n#\n# mongo = PyMongo(app)\n\[email protected]('/movies', methods=['GET'])\ndef list_movies():\n # https://torrentapi.org/apidocs_v2.txt\n url_for('static', filename='css/movies.css')\n video_list = getMovieListFromDB()\n return render_template('movies.html', movie_list=video_list)\n\[email protected]('/movies/update/<imdb_id>', methods=['GET'])\ndef list_torrents(imdb_id):\n url_for('static', filename='css/movies.css')\n torrent_list = get_Torrent_List_By_IMDB_ID(imdb_id)\n return render_template('torrents.html', torrent_list=torrent_list)\n\[email protected]('/pickup/wilson', methods=['GET'])\ndef announce_wilson():\n url_for('static', filename='style.css')\n return render_template('wilson.html')\n\n\[email protected]('/ferry', methods=['GET'])\ndef get_all_ferry():\n\n # TODO: Set a timout for the cache\n response = requests.get('http://www.nwff.com.hk/api/time_table_search.php?lang=eng&origin=CE&destination=MW&vessel=any')\n print(\"Was that API request cached? \",response.from_cache)\n\n schedule = json.loads(response.text.replace(u'\\ufeff', ''))\n index = find_last_two_departures_from_now(schedule)\n url_for('static', filename='css/ferry.css')\n url_for('static', filename='css/bootstrap.min.css')\n url_for('static', filename='js/bootstrap.min.js')\n return render_template('ferry.html', schedule=schedule)\n\[email protected]('/api', methods=['GET'])\ndef test_api():\n\n return json.text\n\ndef find_last_two_departures_from_now(json):\n current_time = datetime.datetime.now()\n for departure in json:\n departure_time_str = departure['schedule']['time'];\n departure_date_str = datetime.date.fromtimestamp(departure['date'])\n d = datetime.datetime.strptime(departure_date_str.isoformat() + \"-\" + departure_time_str, '%Y-%m-%d-%H:%M:%S')\n print(d)\n if current_time < d:\n return json.index(departure)-2\n\nif __name__ == '__main__':\n application.debug = True\n application.run()\n", "id": "2482364", "language": "Python", "matching_score": 4.180445671081543, "max_stars_count": 1, "path": "application.py" }, { "content": "from pymediainfo import MediaInfo\nimport sys, os, pymongo, hashlib, configparser, xxhash, locale, argparse, json, logging\nfrom guessit import guessit\nfrom imdbpie import Imdb\n\nimdb = Imdb(anonymize=True) # to proxy requests\nREBUILD_SIDECAR = False\n\n# Setting default hasher - can be changed with command line\n\n\n# Let's configure the locale\nlocale.setlocale(locale.LC_ALL, 'en_US') # We use this for number formating while we count blocks\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')\n\nclass FileManagement():\n\n def validate_sidecar_file(sidecar_file):\n try:\n fact_size = sidecar_file['quick_facts']['file_size']\n fact_name = sidecar_file['quick_facts']['file_name']\n fact_last_known = sidecar_file['quick_facts']['last_known_location']\n return True\n except KeyError: # We couldn't find the keys we need. Let's rebuild it\n print(\"--> There seems to be some issue with the sidecar file. Let me fix that for you.\")\n return False\n\n # Ok, so we got the key's, now let's make sure they are all valid values\n # attached to the key's\n\n\n\n def hashfile(fullpath_to_mediafile):\n \"\"\" Hashes any given file using xxhash (https://cyan4973.github.io/xxHash/)\n\n Args:\n fullpath: The full path, including file name to the file to be hashed\n\n Returns:\n A String hash value\n \"\"\"\n # Setting the block size\n hasher = xxhash.xxh64() #Set the hasher\n BLOCKSIZE = 65536\n\n size = os.path.getsize(fullpath_to_mediafile)\n blocks = int(size / BLOCKSIZE)\n\n with open(fullpath_to_mediafile, 'rb') as afile:\n buf = afile.read(BLOCKSIZE) #Read one block\n while len(buf) > 0: #\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n if (blocks % 1000) == 0: # Only print every 1000 blocks so not to spam the terminal\n print(\"Blocks to go:\", locale.format(\"%d\", blocks, grouping=True), end=\"\\r\", flush=True)\n blocks -= 1\n\n return hasher.hexdigest()\n\ndef find_imdb_ID_from_title(filename):\n # First, let's extract the name of the movie and it's year\n nameDict = guessit(filename)\n try:\n title = nameDict['title']\n year = nameDict['year']\n except KeyError:\n print('This file \"' + filename + '\" seems oddly named. Please follow [title] [year] format')\n return None\n imdbResult = imdb.search_for_title(title)\n for movie in imdbResult:\n if title == movie['title'] and str(year) == movie['year']:\n print('Match found')\n return movie['imdb_id']\n\n return None\n\ndef scanMovies(fd):\n \"\"\" Goes through the directory structure seaching for specific files\n matching the extention mentioned in the list\n \"\"\"\n\n for dir_path,subpaths,files in os.walk(fd):\n for file in files:\n extension = os.path.splitext(file)[1].lower()\n if extension in ['.mkv', '.mp4', '.avi', '.mov']:\n fullpath = os.path.abspath(dir_path) + \"/\" + file\n # Get the media info. This an take a while\n scanMediaInfo(dir_path, fullpath, file)\n elif extension in ['.ts', '.m2ts']:\n fullpath = os.path.abspath(dir_path) + \"/\" + file\n filesize = os.path.getsize(fullpath)\n if filesize > 20000000000:\n convert_to_mkv(dir_path, fullpath, file)\n\n\n\ndef convert_to_mkv(path, fullpath, filename):\n print('Video convertion is not yet done. In progress since 17 May 2017')\n # Let's establish what we are working with first. Is bluray structure intact or just a odd format.\n base_path = os.path.basename(path)\n if base_path == 'STREAM': # Bluray structure is intact it seems [Basefolder]/BDMV/STREAM/mediafile.m2ts\n print('Bluray rip convertion')\n else:\n print('Asuming we are in a ripped directory')\n\ndef scanMediaInfo(path, fullpath, filename):\n \"\"\" Parses the media info of the file. If, new, we will hash it and add it to the library.\n We use the MKV FileUID as our guide if it's new or now. Hashing is just to slow for a quick check.\n\n Args: path: The URI of the file\n fullpath: The URI + Filename\n filename: the file name of the media we try to scan\n\n \"\"\"\n filelen = len(filename)\n print('=======================' + \"=\" * filelen)\n print('Scanning Media info on', filename)\n print('=======================' + \"=\" * filelen)\n # Getting the media info\n\n # Let's just have a quick check if we seen this file before\n filesize = os.path.getsize(fullpath)\n result = is_this_file_known(filename=filename, path=path, filesize=filesize)\n\n if result == False or REBUILD_SIDECAR ==True: #We couldn't find the sidecar file. Doing a full update\n media_info = MediaInfo.parse(fullpath)\n\n # We need to add some metadata here so we can do some quick lookups\n media_json = json.loads(media_info.to_json(), parse_int=str)\n\n if 'unique_id' in media_json['tracks'][0]:\n media_xxhash = media_json['tracks'][0]['unique_id']\n else:\n media_xxhash = FileManagement.hashfile(fullpath)\n\n imdb_id = find_imdb_ID_from_title(filename)\n media_json['quick_facts'] = {'file_size':filesize,\n 'file_hash': media_xxhash,\n 'file_name': filename,\n 'last_known_location' : fullpath,\n 'imdb_id': imdb_id}\n # Save it to a file next to the media file for later use\n sidecar_file = open(path + '/' + filename + '_sidcar.json', 'w')\n sidecar_file.write(json.dumps(media_json))\n insertMediaFile(media_json)\n else: #Hey, we know this one, no need to do anything about this.\n # Save it to a file next to the media file for later use\n print('Seems like we have scanned this before.\\n--> If you want to scan it again, remove the _sidecar file next to the original file')\n print('You can find it here:\\n')\n print(path + filename + '_sidcar.json')\n print('\\n')\n print('--> Will still try to add it to the DB just in case we deleted it at some point.')\n sidecar_file = open(path + '/' + filename + '_sidcar.json', 'r')\n insertMediaFile(json.load(sidecar_file))\n\n\"\"\"\n\nSo sorry for the deep iffing here. Will fix it after lunch... :D\n\"\"\"\ndef is_this_file_known(filename, filesize, path):\n sidecar_uri = path + '/' + filename + '_sidcar.json' #Path to the sidecar file\n if os.path.isfile(sidecar_uri):\n sidecar_file = json.load(open(sidecar_uri, 'r')) # We found it, lets look inside\n try:\n fact_size = sidecar_file['quick_facts']['file_size']\n fact_name = sidecar_file['quick_facts']['file_name']\n fact_last_known = sidecar_file['quick_facts']['last_known_location']\n except KeyError: # We couldn't find the keys we need. Let's rebuild it\n print(\"--> There seems to be some issue with the sidecar file. Let me fix that for you.\")\n return False\n if fact_size != filesize: # We check filesize first since that would qualify for a full rescan no matter what the name is of the file\n print(\"--> The filesize doesn't match the sidecar file info. We should scan again.. \\n----\")\n return False #Sidecar file exist but the basic info is not matching\n elif fact_name != filename: #Ok, so the name doesn't match but the size does. Maybe we renamed both mediafile and the sidecar. Let's verify this.\n print(\"--> The filename doesn't match the sidecar file info. Let's check the hash. Please wait... \\n----\")\n file_hash = FileManagement.hashfile(path + \"/\" + filename)\n fact_hash = sidecar_file['quick_facts']['file_hash']\n print(file_hash + \" + \" + fact_hash)\n if fact_hash == file_hash:\n print(\"--> Seems like the file is the same but renamed. Let me update that for you!\")\n sidecar_file['quick_facts']['file_name'] = filename\n f = open(sidecar_uri, 'w')\n f.write(json.dumps(sidecar_file));\n return True\n else:\n print(\"--> The xxhash doesn't match. Something has changed so let's re-scan it all\")\n return False #Sidecar file exist but the basic info is not matching\n elif fact_last_known != os.path.abspath(path + '/' + filename):\n print('--> The location seem to have change. Rebuiding the file. I know it a pain, I will make this faster later')\n return False\n else: # Everything is good. Lets just skip this file.\n return True\n else:\n print(\"--> Can't find the sidecar file. Assuming this is a new file, or renamed\\n----\")\n return False #Can't even find the sidecar file\n\n\ndef insertMediaFile(file_data):\n \"\"\" Inserts a record in the MongoDB\n\n \"\"\"\n # client = pymongo.MongoClient('mongodb://arkalon:[email protected]:27017,cluster0-shard-00-01-if3vm.mongodb.net:27017,cluster0-shard-00-02-if3vm.mongodb.net:27017/icecream?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin')\n # db = client[db_name]\n client = pymongo.MongoClient(db_url,db_port)\n db = client[db_name]\n db.authenticate(db_username,db_password)\n # First, make sure there is no duplicate\n result = db.Movies.find({'quick_facts.file_hash' : file_data['quick_facts']['file_hash']})\n if result.count() != 0:\n print('--> Hey! We already have this bad boy in the database. Will not add it twice.')\n print('\\n\\n')\n else:\n db.Movies.insert_one(file_data)\n print('--> File has been added to the DB and a sidcar file to the filesystem.')\n print('\\n\\n')\n\ndef configure_application():\n # Let's configure stuff\n config = configparser.RawConfigParser()\n\n #First, let's make sure we have a config file. If not, create a template and quit\n is_configured = os.path.isfile('media_organiser.cfg')\n if is_configured:\n config.read('media_organiser.cfg')\n # Configure mLab Database\n global db_name\n global db_port\n global db_url\n global db_username\n global db_password\n\n db_name = config.get('mLab','db_name')\n db_port = config.getint('mLab','db_port')\n db_url = config.get('mLab','db_url')\n db_username = config.get('mLab','username')\n db_password = config.get('mLab','password')\n elif os.path.isfile('media_organiser_template.cfg'):\n sys.exit('--> Did you forget to rename the template file to \"media_organiser.cfg\"?')\n else:\n f = open('media_organiser_template.cfg', mode='w')\n f.write(\"[mLab]\\ndb_url = \\ndb_port = \\nusername = \\npassword = \\ndb_name = \")\n sys.exit(\"--> App has no config file. Creating a template and quitting\")\n\n\nif __name__ == \"__main__\":\n configure_application()\n # Setup the Argument Parser\n parser = argparse.ArgumentParser(description='Documentation of all media files as you have. Will get some media details and hash them.')\n parser.add_argument('media', help='Where your mediafiles are')\n parser.add_argument('-c', '--config', help='Location of the config file. Default: Same directory as main file [media_organiser.cfg]')\n parser.add_argument('-m', '--remux', help='[Not working yet!!] If selected, we will remux non-mkv to mkv format.')\n parser.add_argument('-r', '--rebuild', action=\"store_true\" ,help='Rebuild ALL sidecar files')\n args = parser.parse_args()\n REBUILD_SIDECAR = args.rebuild\n\n if REBUILD_SIDECAR:\n response = input('Are you sure you want to rebuild ALL sidecar files? (y/n) --> ')\n if response.lower() == 'y':\n scanMovies(args.media)\n else:\n print('Oh, did you forget to remove the \"-r\" flag?')\n else:\n scanMovies(args.media)\n print('================================')\n print(' Scan finished. ')\n print('================================')\n", "id": "6630700", "language": "Python", "matching_score": 4.6536078453063965, "max_stars_count": 1, "path": "media_organiser.py" }, { "content": "import os\nimport guessit\nfrom imdbpie import Imdb\nfrom guessit import guessit\n\nSUPPORTED_CONTAINERS = ['.mkv', '.avi', '.mp4', '.m4v']\nimdb = Imdb(anonymize=True) # to proxy requests\n\n\ndef find_all_media_files(dir_to_search, extentions=SUPPORTED_CONTAINERS):\n file_list = []\n for dirpath, dirnames, filenames in os.walk(dir_to_search):\n for filename in filenames:\n if os.path.splitext(filename)[1] in extentions:\n file_list.append(filename)\n return file_list\n\n\ndef resolve_imdb_from_filename(test_file):\n # First, let's extract the name of the movie and it's year\n nameDict = guessit(test_file)\n try:\n title = nameDict['title']\n year = str(nameDict['year'])\n except KeyError:\n print('This file \"' + test_file + '\" seems oddly named.\\\n Please follow [title] [year] format')\n return None\n imdbResult = imdb.search_for_title(title)\n for movie in imdbResult:\n if year == movie['year']:\n print('Match found')\n return movie['imdb_id']\n", "id": "11488034", "language": "Python", "matching_score": 0.8105184435844421, "max_stars_count": 1, "path": "samosa/__init__.py" }, { "content": "import pytest\nimport salmiak\nimport os\n\n\[email protected](\"test_file,expected\", [\n ('Kimi.no.na.wa.aka.Your.Name.2016.JAPANESE.1080p.BluRay.REMUX.AVC.DTS-HD.MA.5.1-FGT.mkv', True),\n ('Looney.Tunes.Volume.2.1936-1959.1080p.BluRay.REMUX.AVC.DD1.0-RARBG.mkv', True),\n ('The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT.mkv', True),\n ('Vaid.name.but.no.mkv.extention.2017', False),\n ('Random.Bullshit.without.year.mkv', False),\n ('.git.filename', False)\n])\n\n\ndef test_valid_filename(test_file, expected, tmpdir):\n f1 = tmpdir.mkdir('download').join(test_file)\n f1.write('VideoContent')\n assert salmiak.isValidPath(str(f1)) == expected\n\n\[email protected](\"test_path, expected\", [\n ('Name.2001', True),\n ('Looney.Tunes.Volume.2.1936-1959.1080p.BluRay.REMUX.AVC.DD1.0-RARBG', True),\n ('@download', False),\n ('Movie.Title', False),\n ('.ssh.2001', False),\n ('Brother.Where.Art.Thou.2000.1080p.BluRay.X264-AMIABLE', True),\n ('.Brother.Where.Art.Thou.2000.1080p.BluRay.X264-AMIABLE', False)\n])\n\n\ndef test_valid_path(test_path, expected, tmpdir):\n f1 = tmpdir.mkdir(test_path)\n assert salmiak.isValidPath(str(f1)) == expected\n\n\[email protected](\"test_file, renamed_file\", [\n ('Kimi.no.na.wa.aka.Your.Name.2016.JAPANESE.1080p.BluRay.REMUX.AVC.DTS-HD.MA.5.1-FGT.mkv', 'Kimi no na wa aka Your Name (2016).mkv'),\n ('The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT.mkv', 'The Princess And The Frog (2009).mkv'),\n])\n\n\ndef test_rename_file(test_file, renamed_file, tmpdir):\n ''' Testing that renaming a valid file actually moves it from one to the other.\n '''\n salmiak.DRYRUN = False\n f1 = tmpdir.mkdir('download').join(test_file)\n f1.write('VideoContent')\n salmiak.renameFile(str(tmpdir) + '/download', str(test_file))\n assert os.path.isfile(str(tmpdir) + '/download/' + renamed_file) is True\n assert os.path.isfile(str(tmpdir) + '/download/' + test_file) is False\n\n\[email protected](\"test_file, renamed_file\", [\n ('Kimi.no.na.wa.aka.Your.Name.2016.JAPANESE.1080p.BluRay.REMUX.AVC.DTS-HD.MA.5.1-FGT.mkv', 'Kimi no na wa aka Your Name (2016).mkv'),\n ('The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT.mkv', 'The Princess And The Frog (2009).mkv'),\n])\n\n\ndef test_dryrun_rename_file(test_file, renamed_file, tmpdir):\n ''' Dry run should not move the files\n '''\n salmiak.DRYRUN = True\n f1 = tmpdir.mkdir('download').join(test_file)\n f1.write('VideoContent')\n salmiak.renameFile(str(tmpdir) + '/download', str(test_file))\n assert os.path.isfile(str(tmpdir) + '/download/' + renamed_file) is False\n assert os.path.isfile(str(tmpdir) + '/download/' + test_file) is True\n\n\[email protected](\"test_folder, renamed_folder\", [\n ('Kimi.no.na.wa.aka.Your.Name.2016.JAPANESE.1080p.BluRay.REMUX.AVC.DTS-HD.MA.5.1-FGT', 'Kimi no na wa aka Your Name (2016)'),\n ('The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT', 'The Princess And The Frog (2009)'),\n])\n\n\ndef test_rename_folder(test_folder, renamed_folder, tmpdir):\n ''' Testing that renaming a valid file actually moves it from one to the other.\n '''\n salmiak.DRYRUN = False\n f1 = tmpdir.mkdir(test_folder)\n salmiak.renamePath(str(tmpdir), str(test_folder))\n assert os.path.isdir(str(tmpdir) + '/' + renamed_folder) is True\n assert os.path.isdir(str(tmpdir) + '/' + test_folder) is False\n\n\[email protected](\"test_folder, renamed_folder\", [\n ('Kimi.no.na.wa.aka.Your.Name.2016.JAPANESE.1080p.BluRay.REMUX.AVC.DTS-HD.MA.5.1-FGT', 'Kimi no na wa aka Your Name (2016)'),\n ('The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT', 'The Princess And The Frog (2009)'),\n])\n\n\ndef test_dryrun_rename_folder(test_folder, renamed_folder, tmpdir):\n ''' Testing that renaming a valid file actually moves it from one to the other.\n '''\n salmiak.DRYRUN = True\n f1 = tmpdir.mkdir(test_folder)\n salmiak.renamePath(str(tmpdir), str(test_folder))\n assert os.path.isdir(str(tmpdir) + '/' + renamed_folder) is False\n assert os.path.isdir(str(tmpdir) + '/' + test_folder) is True\n\n\n################\n# Test TVShows #\n################\n\n\[email protected](\"test_tvshow_file, renamed_file\", [\n ('Last.Week.Tonight.with.John.Oliver.S04E13.720p.HBO.WEBRip.AAC2.0.H264-monkee[rarbg].mkv', 'Last Week Tonight with <NAME> - S4E13.mkv'),\n ('Married With Children - 0106 - Sixteen Years and What Do You Get.mkv', 'Married With Children - S1E6.mkv'),\n ('BBC.Life.2009.E02.Reptiles.and.Amphibians.1080p.BluRay.Remux.VC1.-HDME.mkv', 'BBC Life (2009) - E2 - Reptiles and Amphibians.mkv'),\n ('Stephen.Colbert.2017.04.21.Rosario.Dawson.720p.HDTV.x264-SORNY[rarbg].mkv', '<NAME> - 2017-04-21 - Rosario Dawson.mkv'),\n ('Westworld.S01E04.1080p.AMZN.WEBRip.DD5.1.x264-FGT.mkv', 'Westworld - S1E4.mkv')\n])\n\n\ndef test_rename_tvshows(test_tvshow_file, renamed_file, tmpdir):\n ''' Testing that renaming a valid file actually moves it from one to the other.\n '''\n salmiak.DRYRUN = False\n f1 = tmpdir.mkdir('download').join(test_tvshow_file)\n f1.write('VideoContent')\n salmiak.renameFile(str(tmpdir) + '/download', str(test_tvshow_file))\n assert os.path.isfile(str(tmpdir) + '/download/' + renamed_file) is True\n assert os.path.isfile(str(tmpdir) + '/download/' + test_tvshow_file) is False\n\n\[email protected](\"test_tvshow_file, renamed_file\", [\n ('Last.Week.Tonight.with.John.Oliver.S04E13.720p.HBO.WEBRip.AAC2.0.H264-monkee[rarbg].mkv', 'Last Week Tonight with <NAME> - S4E13.mkv'),\n ('Married With Children - 0106 - Sixteen Years and What Do You Get.mkv', 'Married With Children - S1E6.mkv'),\n ('BBC.Life.2009.E02.Reptiles.and.Amphibians.1080p.BluRay.Remux.VC1.-HDME.mkv', 'BBC Life (2009) - E2 - Reptiles and Amphibians.mkv'),\n ('Stephen.Colbert.2017.04.21.Rosario.Dawson.720p.HDTV.x264-SORNY[rarbg].mkv', '<NAME> - 2017-04-21 - Rosario Dawson.mkv'),\n ('Westworld.S01E04.1080p.AMZN.WEBRip.DD5.1.x264-FGT.mkv', 'Westworld - S1E4.mkv'),\n])\n\n\ndef test_dryrun_rename_tvshows(test_tvshow_file, renamed_file, tmpdir):\n ''' Testing that renaming a valid file actually moves it from one to the other.\n '''\n salmiak.DRYRUN = True\n f1 = tmpdir.mkdir('download').join(test_tvshow_file)\n f1.write('VideoContent')\n salmiak.renameFile(str(tmpdir) + '/download', str(test_tvshow_file))\n assert os.path.isfile(str(tmpdir) + '/download/' + renamed_file) is False\n assert os.path.isfile(str(tmpdir) + '/download/' + test_tvshow_file) is True\n", "id": "6602627", "language": "Python", "matching_score": 3.562316656112671, "max_stars_count": 1, "path": "tests/test_salmiak.py" }, { "content": "import pytest\nimport samosa\n\n\[email protected](\"test_file,expected\", [\n ('Ghost.In.The.Shell.2017.1080p.3D.BluRay.AVC.TrueHD.7.1.Atmos-FGT.mkv', 'tt1219827'),\n ('The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT.mkv', 'tt0780521'),\n ('Shot.Caller.2017.1080p.BluRay.REMUX.AVC.TrueHD.5.1-FGT.mkv', 'tt4633690'),\n ('Random.Bullshit.without.year.mkv', None),\n ('.git.filename', None)\n])\n\n\ndef test_resolve_imdb_id_from_filename(test_file, expected):\n imdb_id = samosa.resolve_imdb_from_filename(test_file)\n assert imdb_id == expected\n\n\[email protected]\ndef movielist():\n return ['Kimi.no.na.wa.aka.Your.Name.2016.JAPANESE.1080p.BluRay.REMUX.AVC.DTS-HD.MA.5.1-FGT.mkv',\n 'Looney.Tunes.Volume.2.1936-1959.1080p.BluRay.REMUX.AVC.DD1.0-RARBG.mp4',\n 'The.Princess.And.The.Frog.2009.1080p.BluRay.AVC.DTS-HD.MA.5.1-FGT.avi',\n 'MyPresentation.ppt'\n ]\n\n\ndef test_find_only_mkv_files(movielist, tmpdir):\n for movie in movielist:\n f1 = tmpdir.join(movie)\n f1.write('MovieContent')\n\n file_list = samosa.find_all_media_files(extentions=['.mkv'],\n dir_to_search=str(tmpdir))\n assert len(file_list) is 1\n\n\ndef test_find_all_supported_files(movielist, tmpdir):\n for movie in movielist:\n f1 = tmpdir.join(movie)\n f1.write('MovieContent')\n\n file_list = samosa.find_all_media_files(dir_to_search=str(tmpdir))\n assert len(file_list) is 3\n", "id": "5851478", "language": "Python", "matching_score": 0.39325153827667236, "max_stars_count": 1, "path": "tests/test_main.py" }, { "content": "from setuptools import setup\n\nsetup(name='salmiak',\n version='0.4',\n description='The easiest movie file renamer this side of github',\n long_description=\"Look, it's not very powerful, but it's simple. And sometimes that's enough\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Natural Language :: English',\n ],\n keywords='movie rename file scene',\n url='https://github.com/arkalon76/salmiak',\n author='<NAME>',\n author_email='<EMAIL>',\n license='MIT',\n packages=['salmiak'],\n install_requires=[\n 'guessit',\n ],\n test_suite='tests',\n tests_require=['pytest',\n 'pytest',\n 'pytest-pep8',\n 'pytest-cov'],\n entry_points={'console_scripts': [\n 'salmiak = salmiak:main',\n ],\n },\n zip_safe=False)\n", "id": "12045677", "language": "Python", "matching_score": 5.297504425048828, "max_stars_count": 1, "path": "setup.py" }, { "content": "from setuptools import setup\n\nsetup(name='samosa',\n version='0.1',\n description='Scans media files to find a better version of it self',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Natural Language :: English',\n ],\n keywords='movie search quality download torrent',\n url='https://github.com/arkalon76/samosa',\n author='<NAME>',\n author_email='<EMAIL>',\n license='MIT',\n packages=['samosa'],\n install_requires=[\n 'mediainfo',\n ],\n test_suite='tests',\n tests_require=['pytest',\n 'pytest',\n 'pytest-pep8',\n 'pytest-cov'],\n entry_points={'console_scripts': [\n 'samosa = samosa:main',\n ],\n },\n zip_safe=False)\n", "id": "7085475", "language": "Python", "matching_score": 4.654295444488525, "max_stars_count": 1, "path": "setup.py" } ]
3.871381
Nesbi
[ { "content": "from .data_augmenter import DataAugmenter, SimpleDataAugmenter, NoopDataAugmenter", "id": "11753260", "language": "Python", "matching_score": 0.07249853014945984, "max_stars_count": 5, "path": "calamari_ocr/ocr/augmentation/__init__.py" }, { "content": "from .reader import XMLReader as AbbyyReader\nfrom .writer import XMLWriter as AbbyyWriter\nfrom .dataset import AbbyyDataSet", "id": "10146909", "language": "Python", "matching_score": 1.8027138710021973, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/abbyy_dataset/__init__.py" }, { "content": "from .dataset import Hdf5DataSet", "id": "11794628", "language": "Python", "matching_score": 1.5836941003799438, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/hdf5_dataset/__init__.py" }, { "content": "from .dataset import DataSet, DataSetMode, RawDataSet, DatasetGenerator\nfrom .file_dataset import FileDataSet\nfrom .abbyy_dataset import AbbyyDataSet\nfrom .pagexml_dataset import PageXMLDataset\nfrom .dataset_factory import DataSetType, create_dataset\nfrom .input_dataset import InputDataset, RawInputDataset, StreamingInputDataset\n\n__all__ = [\n 'DataSet',\n 'DataSetType',\n 'DataSetMode',\n 'RawDataSet',\n 'FileDataSet',\n 'AbbyyDataSet',\n 'PageXMLDataset',\n 'create_dataset',\n 'InputDataset',\n 'RawInputDataset',\n 'StreamingInputDataset',\n]\n", "id": "11969983", "language": "Python", "matching_score": 3.029214382171631, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/__init__.py" }, { "content": "from .dataset import PageXMLDataset", "id": "6581287", "language": "Python", "matching_score": 0.572923481464386, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/pagexml_dataset/__init__.py" }, { "content": "from .dataset import GeneratedLineDataset\n", "id": "8712027", "language": "Python", "matching_score": 0.0586242750287056, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/generated_line_dataset/__init__.py" }, { "content": "from itertools import groupby\nimport re\nimport datetime\n\nclass Entry:\n def __init__(self,entry_id,book,position,timestamp,text):\n self.id = entry_id\n self.book = book\n self.position = position\n self.timestamp = timestamp\n self.text = text\n\n\nclass Book:\n def __init__(self,title,entries):\n self.title = title\n self.entries = sorted(entries, key=lambda e: [e.position,len(e.text)])\n\n def get_text(self):\n return \"\\n\\n\".join(e.text for e in self.clean_entries())\n\n def clean_entries(self):\n return [e for e in self.entries if len([p for p in self.entries if p.id != e.id and e.text in p.text]) == 0]\n\n def export(self,file_path):\n with open(file_path, 'w+') as notes:\n notes.write(self.title+'\\n')\n notes.write(\"=\" * len(self.title) + \"\\n\\n\")\n\n notes.write(self.get_text())\n\n\nclass Kindle:\n def load(file_path):\n delimiter = '==========\\n'\n\n with open(file_path, 'r') as clippings:\n line_groups = enumerate((list(group) \n for key, group in groupby(clippings, lambda line: line == delimiter) if not key))\n entries = [Kindle.entry_from_lines(entry_id, entry_lines) for entry_id, entry_lines in line_groups]\n\n return [Book(title,list(book_entries)) \n for title, book_entries in groupby(entries,lambda entry: entry.book)]\n\n\n def entry_from_lines(entry_id, lines):\n # TODO use locale instead of hard coded german\n m = re.search(r\"- Ihre Markierung (.*) \\| Hinzugefügt am (.*)\",lines[1])\n\n position = None\n if \"bei Position\" in m.group(1):\n p = re.search(\"bei Position (.*)-(.*)\",m.group(1))\n position = (p.group(1),p.group(2))\n elif \"auf Seite\" in m.group(1):\n p = re.search(\"auf Seite (.*)\",m.group(1))\n position = (p.group(1),p.group(1))\n else:\n raise Error(\"Unkown position string '{}'\".format(m.group(1)))\n\n\n\n months = {\n \"Januar\":1,\n \"Februar\":2,\n \"März\":3,\n \"April\":4,\n \"Mai\":5,\n \"Juni\":6,\n \"Juli\":7,\n \"August\":8,\n \"September\":9,\n \"Oktober\":10,\n \"November\":11,\n \"Dezember\":12\n } \n\n d = re.search(r\".*, (.*)\\. (.*) (.*) (.*):(.*):(.*)\",m.group(2))\n\n date = datetime.datetime(\n int(d.group(3)),\n months[d.group(2)],\n int(d.group(1)),\n int(d.group(4)),\n int(d.group(5)),\n int(d.group(6)))\n\n return Entry(entry_id, Kindle.clean_line(lines[0]), position, date, Kindle.clean_line(\"\".join(lines[2:])))\n\n\n def clean_line(line):\n return line.replace('\\ufeff','').strip()\n\n\n", "id": "9807398", "language": "Python", "matching_score": 0.7547674775123596, "max_stars_count": 0, "path": "kindle.py" }, { "content": "import re\n\nfrom calamari_ocr.ocr.text_processing import TextProcessor, TextProcessorParams\n\n\ndef default_groups():\n return {\n \"quotes\": False,\n \"spaces\": False,\n \"roman_digits\": False,\n \"ligatures\": False,\n \"various\": False,\n }\n\n\ndef parse_groups(string_list):\n groups = default_groups()\n\n for s in map(str.lower, string_list):\n if s == \"none\":\n groups[\"quotes\"] = False\n groups[\"spaces\"] = False\n groups[\"roman_digits\"] = False\n groups[\"ligatures\"] = False\n groups[\"various\"] = False\n elif s == \"simple\":\n groups[\"quotes\"] = False\n groups[\"spaces\"] = True\n groups[\"roman_digits\"] = False\n groups[\"ligatures\"] = False\n groups[\"various\"] = True\n elif s == \"extended\":\n groups[\"quotes\"] = True\n groups[\"spaces\"] = True\n groups[\"roman_digits\"] = True\n groups[\"ligatures\"] = False\n groups[\"various\"] = True\n elif s == \"all\":\n groups[\"quotes\"] = True\n groups[\"spaces\"] = True\n groups[\"roman_digits\"] = True\n groups[\"ligatures\"] = True\n groups[\"various\"] = True\n elif s in groups:\n groups[s] = True\n else:\n raise KeyError(\"Unknown key '{}', allowed: {}\".format(s, groups.keys()))\n\n return groups\n\n\ndef default_text_regularizer_params(params=TextProcessorParams(), groups=[\"simple\"]):\n params.type = TextProcessorParams.TEXT_REGULARIZER\n\n groups = parse_groups(groups)\n\n def replacement(old, new, regex=False):\n r = params.replacements.add()\n r.old = old\n r.new = new\n r.regex = regex\n\n if groups[\"various\"]:\n replacement(\"µ\", \"μ\") # replace micro unit with greek character\n replacement(\"–——\", \"-\") # variant length hyphens\n replacement(\"–—\", \"-\") # variant length hyphens\n\n if groups[\"quotes\"]:\n replacement('\"', \"''\") # typewriter double quote\n replacement(\"`\", \"'\") # grave accent\n replacement('“', \"''\") # fancy quotes\n replacement('”', \"''\") # fancy quotes\n replacement(\"´\", \"'\") # acute accent\n replacement(\"‘\", \"'\") # single quotation mark\n replacement(\"’\", \"'\") # single quotation mark\n replacement(\"“\", \"''\") # double quotation mark\n replacement(\"”\", \"''\") # double quotation mark\n replacement(\"“\", \"''\") # German quotes\n replacement(\"„\", \",,\") # German quotes\n replacement(\"…\", \"...\") # ellipsis\n replacement(\"′\", \"'\") # prime\n replacement(\"″\", \"''\") # double prime\n replacement(\"‴\", \"'''\") # triple prime\n replacement(\"〃\", \"''\") # ditto mark\n\n if groups[\"ligatures\"]:\n # compare https://en.wikipedia.org/wiki/Typographic_ligature#Ligatures_in_Unicode_(Latin_alphabets)\n replacement(\"Ꜳ\", \"AA\")\n replacement(\"ꜳ\", \"aa\")\n replacement(\"Æ\", \"AE\")\n replacement(\"æ\", \"ae\")\n replacement(\"Ꜵ\", \"AO\")\n replacement(\"ꜵ\", \"ao\")\n replacement(\"Ꜷ\", \"AU\")\n replacement(\"ꜷ\", \"au\")\n replacement(\"Ꜹ\", \"AV\")\n replacement(\"ꜹ\", \"av\")\n replacement(\"Ꜻ\", \"AV\")\n replacement(\"ꜻ\", \"av\")\n replacement(\"Ꜽ\", \"AY\")\n replacement(\"ꜽ\", \"ay\")\n replacement(\"🙰\", \"et\")\n replacement(\"ff\", \"ff\")\n replacement(\"ffi\", \"ffi\")\n replacement(\"ffl\", \"ffl\")\n replacement(\"fl\", \"fl\")\n replacement(\"fi\", \"fi\")\n replacement(\"Œ\", \"OE\")\n replacement(\"œ\", \"oe\")\n replacement(\"Ꝏ\", \"OO\")\n replacement(\"ꝏ\", \"oo\")\n replacement(\"ẞ\", \"ſs\")\n replacement(\"ß\", \"ſz\")\n replacement(\"st\", \"st\")\n replacement(\"ſt\", \"ſt\")\n replacement(\"Ꜩ\", \"TZ\")\n replacement(\"ꜩ\", \"tz\")\n replacement(\"ᵫ\", \"ue\")\n replacement(\"Ꝡ\", \"VY\")\n replacement(\"ꝡ\", \"vy\")\n\n if groups[\"roman_digits\"]:\n replacement(\"Ⅰ\", \"I\") # expand unicode roman digits\n replacement(\"Ⅱ\", \"II\") # expand unicode roman digits\n replacement(\"Ⅲ\", \"III\") # expand unicode roman digits\n replacement(\"Ⅳ\", \"IV\") # expand unicode roman digits\n replacement(\"Ⅴ\", \"V\") # expand unicode roman digits\n replacement(\"Ⅵ\", \"VI\") # expand unicode roman digits\n replacement(\"Ⅶ\", \"VII\") # expand unicode roman digits\n replacement(\"Ⅷ\", \"VIII\") # expand unicode roman digits\n replacement(\"Ⅸ\", \"IX\") # expand unicode roman digits\n replacement(\"Ⅹ\", \"X\") # expand unicode roman digits\n replacement(\"Ⅺ\", \"XI\") # expand unicode roman digits\n replacement(\"Ⅻ\", \"XII\") # expand unicode roman digits\n replacement(\"Ⅼ\", \"L\") # expand unicode roman digits\n replacement(\"Ⅽ\", \"C\") # expand unicode roman digits\n replacement(\"Ⅾ\", \"D\") # expand unicode roman digits\n replacement(\"Ⅿ\", \"M\") # expand unicode roman digits\n replacement(\"ⅰ\", \"i\") # expand unicode roman digits\n replacement(\"ⅱ\", \"ii\") # expand unicode roman digits\n replacement(\"ⅲ\", \"iii\") # expand unicode roman digits\n replacement(\"ⅳ\", \"iv\") # expand unicode roman digits\n replacement(\"ⅴ\", \"v\") # expand unicode roman digits\n replacement(\"ⅵ\", \"vi\") # expand unicode roman digits\n replacement(\"ⅶ\", \"vii\") # expand unicode roman digits\n replacement(\"ⅷ\", \"viii\") # expand unicode roman digits\n replacement(\"ⅸ\", \"ix\") # expand unicode roman digits\n replacement(\"ⅹ\", \"x\") # expand unicode roman digits\n replacement(\"ⅺ\", \"xi\") # expand unicode roman digits\n replacement(\"ⅻ\", \"xii\") # expand unicode roman digits\n replacement(\"ⅼ\", \"l\") # expand unicode roman digits\n replacement(\"ⅽ\", \"c\") # expand unicode roman digits\n replacement(\"ⅾ\", \"d\") # expand unicode roman digits\n replacement(\"ⅿ\", \"m\") # expand unicode roman digits\n\n if groups[\"spaces\"]:\n replacement(r\"\\s+(?u)\", ' ', True) # Multiple spaces to one\n replacement(r\"\\n(?u)\", '', True) # Remove line breaks\n replacement(r\"^\\s+(?u)\", '', True) # strip left\n replacement(r\"\\s+$(?u)\", '', True) # strip right\n\n return params\n\n\nclass TextRegularizer(TextProcessor):\n def __init__(self, params=default_text_regularizer_params()):\n super().__init__()\n self.params = params\n\n def _apply_single(self, txt):\n for replacement in self.params.replacements:\n if replacement.regex:\n txt = re.sub(replacement.old, replacement.new, txt)\n else:\n txt = txt.replace(replacement.old, replacement.new)\n\n return txt\n\n\nif __name__ == \"__main__\":\n n = TextRegularizer(default_text_regularizer_params(groups=[\"quotes\", \"spaces\"]))\n assert(n.apply([\"“Resolve quotes”\"]) == [\"''Resolve quotes''\"])\n assert(n.apply([\" “Resolve spaces ” \"]) == [\"''Resolve spaces ''\"])\n", "id": "3854992", "language": "Python", "matching_score": 1.472885251045227, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/text_regularizer.py" }, { "content": "from calamari_ocr.ocr.text_processing import TextProcessor, TextProcessorParams\n\n\nclass StrToCharList(TextProcessor):\n def __init__(self, params: TextProcessorParams):\n super().__init__()\n # chars are priority ordered and might be words as-well!\n self.chars = params.characters\n\n def _apply_single(self, txt):\n index = 0\n out = []\n while index < len(txt):\n found = False\n for char in self.chars:\n if len(char) == 0:\n continue # blank\n if txt[index:index+len(char)] == char:\n out.append(char)\n index += len(char)\n found = True\n break\n\n if found:\n continue\n\n else:\n raise Exception(\"Could not parse remainder '{}' of '{}'\".format(txt[index:], txt))\n\n return out\n\n", "id": "4642925", "language": "Python", "matching_score": 1.4955021142959595, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/str_to_char_list.py" }, { "content": "import bidi.algorithm as bidi_algorithm\n\nfrom calamari_ocr.ocr.text_processing import TextProcessor, TextProcessorParams\n\n\nclass StripTextProcessor(TextProcessor):\n def __init__(self):\n super().__init__()\n\n def _apply_single(self, txt):\n if isinstance(txt, str):\n return txt.strip()\n\n elif isinstance(txt, list):\n while txt[0].isspace():\n del txt[0]\n\n while txt[-1].isspace():\n del txt[-1]\n\n return txt\n\n else:\n raise TypeError()\n\n\nclass BidiTextProcessor(TextProcessor):\n def __init__(self, default_bidi_direction=TextProcessorParams.BIDI_AUTO):\n super().__init__()\n self.base_dir = None\n self.set_base_dir_from_enum(default_bidi_direction)\n\n def set_base_dir_from_enum(self, d):\n self.base_dir = {TextProcessorParams.BIDI_LTR: 'L',\n TextProcessorParams.BIDI_RTL: 'R',\n TextProcessorParams.BIDI_AUTO: None,\n }[d]\n\n def _apply_single(self, txt):\n # To support arabic text\n return bidi_algorithm.get_display(txt, base_dir=self.base_dir)\n", "id": "6132276", "language": "Python", "matching_score": 0.5612974166870117, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/basic_text_processors.py" }, { "content": "import argparse\nimport os\nimport random\n\n\ndef mkdir(d):\n if not os.path.exists(d):\n os.makedirs(d)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--base_dir\", type=str, required=True)\n parser.add_argument(\"--output_dir\", type=str, required=True)\n parser.add_argument(\"--train_sub_out\", type=str, default=\"train\")\n parser.add_argument(\"--eval_sub_out\", type=str, default=\"eval\")\n parser.add_argument(\"--train_amount\", type=float, required=True,\n help=\"If >= 1 this value is interpreted as absolute value, else as relative value\")\n parser.add_argument(\"--seed\", type=int, default=-1)\n\n args = parser.parse_args()\n\n if args.seed > 0:\n random.seed(args.seed)\n\n all_dirs_ = [d for d in os.listdir(args.base_dir) if os.path.isdir(os.path.join(args.base_dir, d))]\n\n if len(all_dirs_) == 0:\n raise Exception(\"No directories found at '{}'\".format(args.base_dir))\n\n train_dir = os.path.join(args.output_dir, args.train_sub_out)\n eval_dir = os.path.join(args.output_dir, args.eval_sub_out)\n\n mkdir(train_dir)\n mkdir(eval_dir)\n\n n_train = int(args.train_amount) if args.train_amount >= 1 else int(len(all_dirs_) * args.train_amount)\n # n_eval = len(all_dirs_) - n_train\n\n indices = list(range(len(all_dirs_)))\n random.shuffle(indices)\n\n train_dirs_ = [all_dirs_[i] for i in indices[:n_train]]\n eval_dirs_ = [all_dirs_[i] for i in indices[n_train:]]\n\n def make_lns(dirs_, out_dir):\n for d_ in dirs_:\n os.symlink(os.path.join(args.base_dir, d_), os.path.join(out_dir, d_), target_is_directory=True)\n\n for td, od in zip([train_dirs_, eval_dirs_], [train_dir, eval_dir]):\n print(\"Processing '{}' to '{}'\".format(td, od))\n make_lns(td, od)\n\n\nif __name__ == \"__main__\":\n main()\n\n", "id": "6192720", "language": "Python", "matching_score": 0.42107093334198, "max_stars_count": 5, "path": "calamari_ocr/scripts/split_dirs_to_train_eval.py" }, { "content": "import multiprocessing\nimport os\nimport inspect\nimport json\nimport tempfile\nimport sys\n\nfrom calamari_ocr.ocr import CrossFold\nfrom calamari_ocr.utils.multiprocessing import prefix_run_command, run\n\n# path to the dir of this script to automatically detect the training script\nthis_absdir = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\n\n\ndef train_individual_model(run_args):\n # Call the training script with the json file as args\n # The json file contains all training parameters, including the files for training and validation\n # Note: It is necessary to launch a new thread because the command might be prefixed (e. g. use slurm as job\n # skeduler to train all folds on different machines\n args = run_args[\"args\"]\n train_args_json = run_args[\"json\"]\n for line in run(prefix_run_command([\n sys.executable, \"-u\",\n args[\"train_script\"],\n \"--files\", train_args_json,\n\n ], args.get(\"run\", None), {\"threads\": args.get('num_threads', -1)}), verbose=args.get(\"verbose\", False)):\n # Print the output of the thread\n if args.get(\"verbose\", False):\n print(\"FOLD {} | {}\".format(args[\"id\"], line), end=\"\")\n\n return args\n\n\nclass CrossFoldTrainer:\n def __init__(self, n_folds, dataset,\n best_models_dir, best_model_label,\n train_args,\n progress_bars=False,\n ):\n self.n_folds = n_folds\n self.dataset = dataset\n self.best_models_dir = best_models_dir\n self.best_model_label = best_model_label\n self.progress_bars = progress_bars\n self.train_args = train_args\n # locate the training script (must be in the same dir as \"this\")\n self.train_script_path = os.path.abspath(os.path.join(this_absdir, \"..\", \"scripts\", \"train.py\"))\n\n # location of best models output\n if not os.path.exists(self.best_models_dir):\n os.makedirs(self.best_models_dir)\n\n if not os.path.exists(self.train_script_path):\n raise FileNotFoundError(\"Missing train script path. Expected 'train.py' at {}\".format(self.train_script_path))\n\n if not isinstance(train_args, dict):\n raise TypeError(\"Train args must be type of dict\")\n\n def run(self, single_fold=None, seed=-1, weights=None, max_parallel_models=-1,\n temporary_dir=None, keep_temporary_files=False,\n ):\n # Default params\n single_fold = single_fold if single_fold else []\n weights = weights if weights else []\n if max_parallel_models <= 0:\n max_parallel_models = self.n_folds\n\n # argument checks\n if len(weights) > 1 and len(weights) != self.n_folds:\n raise Exception(\"Either no, one or n_folds (={}) models are required for pretraining but got {}.\".format(\n self.n_folds, len(weights)\n ))\n\n if len(single_fold) > 0:\n if len(set(single_fold)) != len(single_fold):\n raise Exception(\"Repeated fold id's found.\")\n for fold_id in single_fold:\n if fold_id < 0 or fold_id >= self.n_folds:\n raise Exception(\"Invalid fold id found: 0 <= id <= {}, but id == {}\".format(self.n_folds, fold_id))\n\n # create temporary dir\n # by default, the temporary files will be deleted after a successful training\n # if you specify a temporary dir, you can easily resume to train if an error occurred\n if keep_temporary_files and not temporary_dir:\n raise Exception(\"If you want to keep the temporary model files you have to specify a temporary dir\")\n\n # temporary dir\n if temporary_dir is None:\n temporary_dir = tempfile.mkdtemp(prefix=\"calamari\")\n else:\n temporary_dir = os.path.abspath(temporary_dir)\n\n if not os.path.exists(temporary_dir):\n os.makedirs(temporary_dir)\n\n # Compute the files in the cross fold (create a CrossFold)\n fold_file = os.path.join(temporary_dir, \"folds.json\")\n cross_fold = CrossFold(n_folds=self.n_folds, dataset=self.dataset, output_dir=temporary_dir,\n progress_bar=self.progress_bars\n )\n cross_fold.write_folds_to_json(fold_file)\n\n # Create the json argument file for each individual training\n run_args = []\n folds_to_run = single_fold if len(single_fold) > 0 else range(len(cross_fold.folds))\n for fold in folds_to_run:\n train_files = cross_fold.train_files(fold)\n test_files = cross_fold.test_files(fold)\n path = os.path.join(temporary_dir, \"fold_{}.json\".format(fold))\n with open(path, 'w') as f:\n fold_args = self.train_args.copy()\n fold_args[\"dataset\"] = cross_fold.dataset_type.name\n fold_args[\"validation_dataset\"] = cross_fold.dataset_type.name\n fold_args[\"validation_extension\"] = self.train_args['gt_extension']\n fold_args[\"id\"] = fold\n fold_args[\"files\"] = train_files\n fold_args[\"validation\"] = test_files\n fold_args[\"train_script\"] = self.train_script_path\n fold_args[\"verbose\"] = True\n fold_args[\"output_dir\"] = os.path.join(temporary_dir, \"fold_{}\".format(fold))\n fold_args[\"early_stopping_best_model_output_dir\"] = self.best_models_dir\n fold_args[\"early_stopping_best_model_prefix\"] = self.best_model_label.format(id=fold)\n\n if seed >= 0:\n fold_args[\"seed\"] = seed + fold\n\n if len(weights) == 1:\n fold_args[\"weights\"] = weights[0]\n elif len(weights) > 1:\n fold_args[\"weights\"] = weights[fold]\n else:\n fold_args[\"weights\"] = None\n\n # start from scratch via None\n if fold_args[\"weights\"]:\n if len(fold_args[\"weights\"].strip()) == 0 or fold_args[\"weights\"].upper() == \"NONE\":\n fold_args[\"weights\"] = None\n\n json.dump(\n fold_args,\n f,\n indent=4,\n )\n\n run_args.append({\"json\": path, \"args\": fold_args})\n\n # Launch the individual processes for each training\n with multiprocessing.Pool(processes=max_parallel_models) as pool:\n # workaround to forward keyboard interrupt\n pool.map_async(train_individual_model, run_args).get()\n\n if not keep_temporary_files:\n import shutil\n shutil.rmtree(temporary_dir)\n", "id": "86922", "language": "Python", "matching_score": 2.037245035171509, "max_stars_count": 1, "path": "calamari_ocr/ocr/cross_fold_trainer.py" }, { "content": "import os\nimport json\nfrom contextlib import ExitStack\n\nfrom calamari_ocr.utils import tqdm_wrapper\nfrom calamari_ocr.ocr.datasets import DataSetType\nfrom calamari_ocr.ocr.datasets.file_dataset import FileDataSet\nfrom calamari_ocr.ocr.datasets.input_dataset import StreamingInputDataset\nfrom calamari_ocr.ocr.data_processing import NoopDataPreprocessor\nfrom calamari_ocr.ocr.text_processing import NoopTextProcessor\nfrom calamari_ocr.ocr.datasets.hdf5_dataset.hdf5_dataset_writer import Hdf5DatasetWriter\n\n\nclass CrossFold:\n def __init__(self, n_folds, dataset, output_dir, progress_bar=True,\n ):\n \"\"\" Prepare cross fold training\n\n This class creates folds out of the given source files.\n The individual splits are the optionally written to the `output_dir` in a json format.\n\n The file with index i will be assigned to fold i % n_folds (not randomly!)\n\n Parameters\n ----------\n n_folds : int\n the number of folds to create\n dataset : Dataset\n dataset containing all files\n output_dir : str\n where to store the folds\n \"\"\"\n self.n_folds = n_folds\n self.dataset = dataset\n self.output_dir = os.path.abspath(output_dir)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if len(self.dataset) == 0:\n raise Exception(\"Empty dataset\")\n\n if self.n_folds <= 1:\n raise Exception(\"At least two folds are required\")\n\n # fill single fold files\n\n # if a FileDataSet, we can just use the paths of the images\n if isinstance(self.dataset, FileDataSet):\n self.dataset_type = DataSetType.FILE\n self.folds = [[] for _ in range(self.n_folds)]\n for i, sample in enumerate(self.dataset.samples()):\n self.folds[i % n_folds].append(sample['image_path'])\n else:\n self.dataset_type = DataSetType.HDF5\n # else load the data of each fold and write it to hd5 data files\n with StreamingInputDataset(self.dataset, NoopDataPreprocessor(), NoopTextProcessor(), processes=1) as input_dataset:\n with ExitStack() as stack:\n folds = [stack.enter_context(Hdf5DatasetWriter(os.path.join(self.output_dir, 'fold{}'.format(i)))) for i in range(self.n_folds)]\n\n for i, (data, text, _) in tqdm_wrapper(enumerate(input_dataset.generator(epochs=1)), progress_bar=progress_bar,\n total=len(dataset), desc=\"Creating hdf5 files\"):\n folds[i % self.n_folds].write(data, text)\n\n self.folds = [f.files for f in folds]\n\n def train_files(self, fold):\n \"\"\" List the train files of the `fold`\n\n Parameters\n ----------\n fold : int\n index of the fold\n\n Returns\n -------\n list of str\n files in this fold\n See Also\n --------\n test_files\n \"\"\"\n all_files = []\n for fold_id, inputs in enumerate(self.folds):\n if fold_id != fold:\n all_files += inputs\n\n return all_files\n\n def test_files(self, fold):\n \"\"\" List the test files of the `fold`\n\n Parameters\n ----------\n fold : int\n index of the fold\n\n Returns\n -------\n list of str\n files in this fold\n See Also\n --------\n train_files\n \"\"\"\n for fold_id, inputs in enumerate(self.folds):\n if fold_id == fold:\n return inputs\n\n return []\n\n def write_folds_to_json(self, filepath):\n \"\"\" Write the fold split to the `filepath` as json.\n\n format is for 3 folds:\n {\n \"folds\": [\n [file1, file4, file7, ...],\n [file2, file5, file8, ...],\n [file3, file6, file9, ...]\n ]\n \"type\": FILE (or HDF5)\n }\n\n\n Parameters\n ----------\n filepath : str\n\n \"\"\"\n with open(filepath, 'w') as f:\n json.dump({\n \"type\": self.dataset_type.name,\n \"folds\": self.folds,\n }, f, indent=4)\n\n\n", "id": "4732059", "language": "Python", "matching_score": 2.1514172554016113, "max_stars_count": 5, "path": "calamari_ocr/ocr/cross_fold.py" }, { "content": "from contextlib import ExitStack\nfrom collections import deque\n\n\nclass ExitStackWithPop(ExitStack):\n def pop(self, cm):\n callbacks = self._exit_callbacks\n self._exit_callbacks = deque()\n found = None\n\n def unpack_cb(cb):\n if isinstance(cb, tuple):\n return cb[1]\n else:\n return cb\n\n while callbacks:\n cb = callbacks.popleft()\n if unpack_cb(cb).__self__ == cm:\n found = cb\n else:\n self._exit_callbacks.append(cb)\n if not found:\n raise KeyError(\"context manager not found\")\n found = unpack_cb(found)\n found(None, None, None)\n", "id": "3974401", "language": "Python", "matching_score": 0, "max_stars_count": 5, "path": "calamari_ocr/utils/contextmanager.py" }, { "content": "class RunningStatistics:\n def __init__(self, max_size, initial_values=None):\n self.max_size = max_size\n self.values = list(map(float, initial_values)) if initial_values is not None else []\n self.cur_sum = sum(self.values)\n\n if max_size < 1:\n raise Exception(\"A size > 0 is required. Got {}\".format(max_size))\n\n def sum(self):\n return self.cur_sum\n\n def mean(self):\n return self.cur_sum / len(self.values)\n\n def push(self, v):\n v = float(v)\n self.values.append(v)\n\n if len(self.values) > self.max_size:\n self.cur_sum -= self.values[0]\n del self.values[0]\n\n self.cur_sum += v\n", "id": "10171028", "language": "Python", "matching_score": 0.9577404856681824, "max_stars_count": 5, "path": "calamari_ocr/utils/running_statistics.py" }, { "content": "import argparse\n\nimport numpy as np\n\nfrom calamari_ocr.utils import glob_all, split_all_ext\nfrom calamari_ocr.ocr import create_dataset, DataSetType, DataSetMode\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", nargs=\"+\", required=True,\n help=\"List of all image files with corresponding gt.txt files\")\n parser.add_argument(\"--dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--line_height\", type=int, default=48,\n help=\"The line height\")\n parser.add_argument(\"--pad\", type=int, default=16,\n help=\"Padding (left right) of the line\")\n\n args = parser.parse_args()\n\n print(\"Resolving files\")\n image_files = glob_all(args.files)\n gt_files = [split_all_ext(p)[0] + \".gt.txt\" for p in image_files]\n\n ds = create_dataset(\n args.dataset,\n DataSetMode.TRAIN,\n images=image_files, texts=gt_files, non_existing_as_empty=True)\n\n print(\"Loading {} files\".format(len(image_files)))\n ds.load_samples(processes=1, progress_bar=True)\n images, texts = ds.train_samples(skip_empty=True)\n statistics = {\n \"n_lines\": len(images),\n \"chars\": [len(c) for c in texts],\n \"widths\": [img.shape[1] / img.shape[0] * args.line_height + 2 * args.pad for img in images\n if img is not None and img.shape[0] > 0 and img.shape[1] > 0],\n \"total_line_width\": 0,\n \"char_counts\": {},\n }\n\n for image, text in zip(images, texts):\n for c in text:\n if c in statistics[\"char_counts\"]:\n statistics[\"char_counts\"][c] += 1\n else:\n statistics[\"char_counts\"][c] = 1\n\n statistics[\"av_line_width\"] = np.average(statistics[\"widths\"])\n statistics[\"max_line_width\"] = np.max(statistics[\"widths\"])\n statistics[\"min_line_width\"] = np.min(statistics[\"widths\"])\n statistics[\"total_line_width\"] = np.sum(statistics[\"widths\"])\n\n statistics[\"av_chars\"] = np.average(statistics[\"chars\"])\n statistics[\"max_chars\"] = np.max(statistics[\"chars\"])\n statistics[\"min_chars\"] = np.min(statistics[\"chars\"])\n statistics[\"total_chars\"] = np.sum(statistics[\"chars\"])\n\n statistics[\"av_px_per_char\"] = statistics[\"av_line_width\"] / statistics[\"av_chars\"]\n statistics[\"codec_size\"] = len(statistics[\"char_counts\"])\n\n del statistics[\"chars\"]\n del statistics[\"widths\"]\n\n\n print(statistics)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "1075409", "language": "Python", "matching_score": 3.594773769378662, "max_stars_count": 5, "path": "calamari_ocr/scripts/dataset_statistics.py" }, { "content": "from argparse import ArgumentParser\nimport os\nimport json\nimport numpy as np\n\nfrom google.protobuf import json_format\n\nfrom calamari_ocr.utils import glob_all, split_all_ext\nfrom calamari_ocr.ocr import Evaluator\nfrom calamari_ocr.ocr.datasets import create_dataset, DataSetType, DataSetMode\nfrom calamari_ocr.proto import CheckpointParams\nfrom calamari_ocr.ocr.text_processing import text_processor_from_proto\n\n\ndef print_confusions(r, n_confusions):\n # sort descending\n if n_confusions != 0 and r[\"total_sync_errs\"] > 0:\n total_percent = 0\n keys = sorted(r['confusion'].items(), key=lambda item: -item[1])\n print(\"{:8s} {:8s} {:8s} {:10s}\".format(\"GT\", \"PRED\", \"COUNT\", \"PERCENT\"))\n\n for i, ((gt, pred), count) in enumerate(keys):\n gt_fmt = \"{\" + gt + \"}\"\n pred_fmt = \"{\" + pred + \"}\"\n if i == n_confusions:\n break\n\n percent = count * max(len(gt), len(pred)) / r[\"total_sync_errs\"]\n print(\"{:8s} {:8s} {:8d} {:10.2%}\".format(gt_fmt, pred_fmt, count, percent))\n total_percent += percent\n\n print(\"The remaining but hidden errors make up {:.2%}\".format(1.0 - total_percent))\n\n\ndef print_worst_lines(r, gt_samples, n_worst_lines):\n if len(r[\"single\"]) != len(gt_samples):\n raise Exception(\"Mismatch in number of predictions and gt files\")\n\n sorted_lines = sorted(zip(r[\"single\"], gt_samples), key=lambda a: -a[0][1])\n\n if n_worst_lines < 0:\n n_worst_lines = len(gt_samples)\n\n if n_worst_lines > 0:\n print(\"{:60s} {:4s} {:3s} {:3s} {}\".format(\"GT FILE\", \"LEN\", \"ERR\", \"SER\", \"CONFUSIONS\"))\n for (len_gt, errs, sync_errs, confusion, gt_pred), sample in sorted_lines[:n_worst_lines]:\n print(\"{:60s} {:4d} {:3d} {:3d} {}\".format(sample['id'][-60:], len_gt, errs, sync_errs, confusion))\n\n\ndef write_xlsx(xlsx_file, eval_datas):\n print(\"Writing xlsx file to {}\".format(xlsx_file))\n import xlsxwriter\n workbook = xlsxwriter.Workbook(xlsx_file)\n\n for eval_data in eval_datas:\n prefix = eval_data[\"prefix\"]\n r = eval_data[\"results\"]\n gt_files = eval_data[\"gt_files\"]\n\n # all files\n ws = workbook.add_worksheet(\"{} - per line\".format(prefix))\n\n for i, heading in enumerate([\"GT FILE\", \"GT\", \"PRED\", \"LEN\", \"ERR\", \"CER\", \"REL. ERR\", \"SYNC ERR\", \"CONFUSIONS\"]):\n ws.write(0, i, heading)\n\n sorted_lines = sorted(zip(r[\"single\"], gt_files), key=lambda a: -a[0][1])\n\n all_cs = []\n for i, ((len_gt, errs, sync_errs, confusion, (gt, pred)), gt_file) in enumerate(sorted_lines):\n ws.write(i + 1, 0, gt_file)\n ws.write(i + 1, 1, gt.strip())\n ws.write(i + 1, 2, pred.strip())\n ws.write(i + 1, 3, len_gt)\n ws.write(i + 1, 4, errs)\n ws.write(i + 1, 5, errs / max(len(gt), len(pred)))\n ws.write(i + 1, 6, errs / r[\"total_char_errs\"] if r[\"total_char_errs\"] > 0 else 0)\n ws.write(i + 1, 7, sync_errs)\n ws.write(i + 1, 8, \"{}\".format(confusion))\n all_cs.append(errs / max(len(gt), len(pred)))\n\n # total confusions\n ws = workbook.add_worksheet(\"{} - global\".format(prefix))\n for i, heading in enumerate([\"GT\", \"PRED\", \"COUNT\", \"PERCENT\"]):\n ws.write(0, i, heading)\n\n keys = sorted(r['confusion'].items(), key=lambda item: -item[1])\n\n for i, ((gt, pred), count) in enumerate(keys):\n gt_fmt = \"{\" + gt + \"}\"\n pred_fmt = \"{\" + pred + \"}\"\n\n percent = count * max(len(gt), len(pred)) / r[\"total_sync_errs\"]\n ws.write(i + 1, 0, gt_fmt)\n ws.write(i + 1, 1, pred_fmt)\n ws.write(i + 1, 2, count)\n ws.write(i + 1, 3, percent)\n\n # histogram of cers\n hsl = \"{} - histogram\".format(prefix)\n ws = workbook.add_worksheet(hsl)\n ws.write_row(\"A1\", [\"Class\", \"Count\"])\n hist, bin_edges = np.histogram(all_cs, bins=\"auto\")\n ws.write_column(\"A2\", bin_edges)\n ws.write_column(\"B2\", hist)\n\n chart = workbook.add_chart({'type': 'column'})\n chart.add_series({'name': \"CER hist\",\n 'categories': \"='{}'!$A$2:$A${}\".format(hsl, 2 + len(bin_edges)),\n 'values': \"='{}'!$B$2:$B${}\".format(hsl, 2 + len(bin_edges))\n })\n chart.set_title({'name': 'CER distribution'})\n chart.set_x_axis({'name': 'CER'})\n chart.set_y_axis({'name': 'Amount'})\n\n ws.insert_chart(\"D2\", chart, {\"x_offset\": 25, 'y_offset': 10})\n\n workbook.close()\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"--dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--gt\", nargs=\"+\", required=True,\n help=\"Ground truth files (.gt.txt extension). \"\n \"Optionally, you can pass a single json file defining all parameters.\")\n parser.add_argument(\"--pred\", nargs=\"+\", default=None,\n help=\"Prediction files if provided. Else files with .pred.txt are expected at the same \"\n \"location as the gt.\")\n parser.add_argument(\"--pred_dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--pred_ext\", type=str, default=\".pred.txt\",\n help=\"Extension of the predicted text files\")\n parser.add_argument(\"--n_confusions\", type=int, default=10,\n help=\"Only print n most common confusions. Defaults to 10, use -1 for all.\")\n parser.add_argument(\"--n_worst_lines\", type=int, default=0,\n help=\"Print the n worst recognized text lines with its error\")\n parser.add_argument(\"--xlsx_output\", type=str,\n help=\"Optionally write a xlsx file with the evaluation results\")\n parser.add_argument(\"--num_threads\", type=int, default=1,\n help=\"Number of threads to use for evaluation\")\n parser.add_argument(\"--non_existing_file_handling_mode\", type=str, default=\"error\",\n help=\"How to handle non existing .pred.txt files. Possible modes: skip, empty, error. \"\n \"'Skip' will simply skip the evaluation of that file (not counting it to errors). \"\n \"'Empty' will handle this file as would it be empty (fully checking for errors).\"\n \"'Error' will throw an exception if a file is not existing. This is the default behaviour.\")\n parser.add_argument(\"--skip_empty_gt\", action=\"store_true\", default=False,\n help=\"Ignore lines of the gt that are empty.\")\n parser.add_argument(\"--no_progress_bars\", action=\"store_true\",\n help=\"Do not show any progress bars\")\n parser.add_argument(\"--checkpoint\", type=str, default=None,\n help=\"Specify an optional checkpoint to parse the text preprocessor (for the gt txt files)\")\n\n # page xml specific args\n parser.add_argument(\"--pagexml_gt_text_index\", default=0)\n parser.add_argument(\"--pagexml_pred_text_index\", default=1)\n\n args = parser.parse_args()\n\n # check if loading a json file\n if len(args.gt) == 1 and args.gt[0].endswith(\"json\"):\n with open(args.gt[0], 'r') as f:\n json_args = json.load(f)\n for key, value in json_args.items():\n setattr(args, key, value)\n\n print(\"Resolving files\")\n gt_files = sorted(glob_all(args.gt))\n\n if args.pred:\n pred_files = sorted(glob_all(args.pred))\n else:\n pred_files = [split_all_ext(gt)[0] + args.pred_ext for gt in gt_files]\n args.pred_dataset = args.dataset\n\n if args.non_existing_file_handling_mode.lower() == \"skip\":\n non_existing_pred = [p for p in pred_files if not os.path.exists(p)]\n for f in non_existing_pred:\n idx = pred_files.index(f)\n del pred_files[idx]\n del gt_files[idx]\n\n text_preproc = None\n if args.checkpoint:\n with open(args.checkpoint if args.checkpoint.endswith(\".json\") else args.checkpoint + '.json', 'r') as f:\n checkpoint_params = json_format.Parse(f.read(), CheckpointParams())\n text_preproc = text_processor_from_proto(checkpoint_params.model.text_preprocessor)\n\n non_existing_as_empty = args.non_existing_file_handling_mode.lower() != \"error \"\n gt_data_set = create_dataset(\n args.dataset,\n DataSetMode.EVAL,\n texts=gt_files,\n non_existing_as_empty=non_existing_as_empty,\n args={'text_index': args.pagexml_gt_text_index},\n )\n pred_data_set = create_dataset(\n args.pred_dataset,\n DataSetMode.EVAL,\n texts=pred_files,\n non_existing_as_empty=non_existing_as_empty,\n args={'text_index': args.pagexml_pred_text_index},\n )\n\n evaluator = Evaluator(text_preprocessor=text_preproc, skip_empty_gt=args.skip_empty_gt)\n r = evaluator.run(gt_dataset=gt_data_set, pred_dataset=pred_data_set, processes=args.num_threads,\n progress_bar=not args.no_progress_bars)\n\n # TODO: More output\n print(\"Evaluation result\")\n print(\"=================\")\n print(\"\")\n print(\"Got mean normalized label error rate of {:.2%} ({} errs, {} total chars, {} sync errs)\".format(\n r[\"avg_ler\"], r[\"total_char_errs\"], r[\"total_chars\"], r[\"total_sync_errs\"]))\n\n # sort descending\n print_confusions(r, args.n_confusions)\n\n print_worst_lines(r, gt_data_set.samples(), args.n_worst_lines)\n\n if args.xlsx_output:\n write_xlsx(args.xlsx_output,\n [{\n \"prefix\": \"evaluation\",\n \"results\": r,\n \"gt_files\": gt_files,\n }])\n\n\nif __name__ == '__main__':\n main()\n", "id": "3311249", "language": "Python", "matching_score": 5.019374370574951, "max_stars_count": 5, "path": "calamari_ocr/scripts/eval.py" }, { "content": "from argparse import ArgumentParser\n\nfrom calamari_ocr.ocr.datasets import create_dataset, DataSetMode, DataSetType, InputDataset\nfrom calamari_ocr.utils import glob_all\n\nimport numpy as np\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"--pred\", nargs=\"+\", required=True,\n help=\"Extended prediction files (.json extension)\")\n\n args = parser.parse_args()\n\n print(\"Resolving files\")\n pred_files = sorted(glob_all(args.pred))\n\n data_set = create_dataset(\n DataSetType.EXTENDED_PREDICTION,\n DataSetMode.EVAL,\n texts=pred_files,\n )\n\n print('Average confidence: {:.2%}'.format(np.mean([s['best_prediction'].avg_char_probability for s in data_set.samples()])))\n\n\nif __name__ == '__main__':\n main()\n", "id": "10898069", "language": "Python", "matching_score": 2.5909268856048584, "max_stars_count": 5, "path": "calamari_ocr/scripts/compute_average_prediction_confidence.py" }, { "content": "import argparse\nimport webbrowser\n\nfrom calamari_ocr.utils import glob_all, split_all_ext\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", nargs=\"+\", required=True,\n help=\"The image files to predict with its gt and pred\")\n parser.add_argument(\"--html_output\", type=str, required=True,\n help=\"Where to write the html file\")\n parser.add_argument(\"--open\", action=\"store_true\",\n help=\"Automatically open the file\")\n\n args = parser.parse_args()\n img_files = sorted(glob_all(args.files))\n gt_files = [split_all_ext(f)[0] + \".gt.txt\" for f in img_files]\n pred_files = [split_all_ext(f)[0] + \".pred.txt\" for f in img_files]\n\n with open(args.html_output, 'w') as html:\n html.write(\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n </head>\n <body>\n <ul>\"\"\")\n\n for img, gt, pred in zip(img_files, gt_files, pred_files):\n html.write(\"<li><p><img src=\\\"file://{}\\\"></p><p>{}</p><p>{}</p>\\n\".format(\n img.replace('\\\\', '/').replace('/', '\\\\\\\\'), open(gt).read(), open(pred).read()\n ))\n\n html.write(\"</ul></body></html>\")\n\n if args.open:\n webbrowser.open(args.html_output)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "9495750", "language": "Python", "matching_score": 2.4061248302459717, "max_stars_count": 5, "path": "calamari_ocr/scripts/img_gt_pred_to_html.py" }, { "content": "import argparse\nfrom tqdm import tqdm\n\nfrom google.protobuf.json_format import MessageToJson\n\nfrom calamari_ocr.utils import glob_all, split_all_ext\nfrom calamari_ocr.proto import Predictions\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", type=str, default=[], nargs=\"+\", required=True,\n help=\"Protobuf files to convert\")\n parser.add_argument(\"--logits\", action=\"store_true\",\n help=\"Do write logits\")\n args = parser.parse_args()\n\n files = glob_all(args.files)\n for file in tqdm(files, desc=\"Converting\"):\n predictions = Predictions()\n with open(file, 'rb') as f:\n predictions.ParseFromString(f.read())\n\n if not args.logits:\n for prediction in predictions.predictions:\n prediction.logits.rows = 0\n prediction.logits.cols = 0\n prediction.logits.data[:] = []\n\n out_json_path = split_all_ext(file)[0] + \".json\"\n with open(out_json_path, 'w') as f:\n f.write(MessageToJson(predictions, including_default_value_fields=True))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "6928572", "language": "Python", "matching_score": 2.491671323776245, "max_stars_count": 5, "path": "calamari_ocr/scripts/pred_to_json.py" }, { "content": "import argparse\nfrom calamari_ocr.utils import glob_all\nfrom tqdm import tqdm\nimport multiprocessing\n\nimport skimage.io as skimage_io\n\nfrom calamari_ocr.proto import DataPreprocessorParams\nfrom calamari_ocr.ocr.data_processing import MultiDataProcessor, DataRangeNormalizer, FinalPreparation, CenterNormalizer\n\n\nclass Handler:\n def __init__(self, data_proc, dry_run):\n self.data_proc = data_proc\n self.dry_run = dry_run\n\n def handle_single(self, path):\n try:\n img = skimage_io.imread(path, flatten=True)\n img = self.data_proc.apply(img)\n\n if not self.dry_run:\n skimage_io.imsave(path, img)\n except ValueError as e:\n print(e)\n print(path)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", type=str, nargs=\"+\", required=True,\n help=\"Text files to apply text processing\")\n parser.add_argument(\"--line_height\", type=int, default=48,\n help=\"The line height\")\n parser.add_argument(\"--pad\", type=int, default=16,\n help=\"Padding (left right) of the line\")\n parser.add_argument(\"--pad_value\", type=int, default=1,\n help=\"Padding (left right) of the line\")\n parser.add_argument(\"--processes\", type=int, default=1)\n parser.add_argument(\"--verbose\", action=\"store_true\")\n parser.add_argument(\"--invert\", action=\"store_true\")\n parser.add_argument(\"--transpose\", action=\"store_true\")\n parser.add_argument(\"--dry_run\", action=\"store_true\",\n help=\"No not overwrite files, just run\")\n\n args = parser.parse_args()\n\n params = DataPreprocessorParams()\n params.line_height = args.line_height\n params.pad = args.pad\n params.pad_value = args.pad_value\n params.no_invert = not args.invert\n params.no_transpose = not args.transpose\n\n data_proc = MultiDataProcessor([\n DataRangeNormalizer(),\n CenterNormalizer(params),\n FinalPreparation(params, as_uint8=True),\n ])\n\n print(\"Resolving files\")\n img_files = sorted(glob_all(args.files))\n\n handler = Handler(data_proc, args.dry_run)\n\n with multiprocessing.Pool(processes=args.processes, maxtasksperchild=100) as pool:\n list(tqdm(pool.imap(handler.handle_single, img_files), desc=\"Processing\", total=len(img_files)))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "1644638", "language": "Python", "matching_score": 3.4298977851867676, "max_stars_count": 5, "path": "calamari_ocr/scripts/apply_data_preprocessing.py" }, { "content": "import argparse\nimport codecs\nfrom calamari_ocr.utils import glob_all\nfrom tqdm import tqdm\n\nfrom calamari_ocr.proto import TextProcessorParams\nfrom calamari_ocr.ocr.text_processing import default_text_normalizer_params, default_text_regularizer_params, text_processor_from_proto\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", type=str, nargs=\"+\", required=True,\n help=\"Text files to apply text processing\")\n parser.add_argument(\"--text_regularization\", type=str, nargs=\"+\", default=[\"extended\"],\n help=\"Text regularization to apply.\")\n parser.add_argument(\"--text_normalization\", type=str, default=\"NFC\",\n help=\"Unicode text normalization to apply. Defaults to NFC\")\n parser.add_argument(\"--verbose\", action=\"store_true\")\n parser.add_argument(\"--dry_run\", action=\"store_true\",\n help=\"No not overwrite files, just run\")\n\n args = parser.parse_args()\n\n # Text pre processing (reading)\n preproc = TextProcessorParams()\n preproc.type = TextProcessorParams.MULTI_NORMALIZER\n default_text_normalizer_params(preproc.children.add(), default=args.text_normalization)\n default_text_regularizer_params(preproc.children.add(), groups=args.text_regularization)\n strip_processor_params = preproc.children.add()\n strip_processor_params.type = TextProcessorParams.STRIP_NORMALIZER\n\n txt_proc = text_processor_from_proto(preproc, \"pre\")\n\n print(\"Resolving files\")\n text_files = glob_all(args.files)\n\n for path in tqdm(text_files, desc=\"Processing\", total=len(text_files)):\n with codecs.open(path, \"r\", \"utf-8\") as f:\n content = f.read()\n\n content = txt_proc.apply(content)\n\n if args.verbose:\n print(content)\n\n if not args.dry_run:\n with codecs.open(path, \"w\", \"utf-8\") as f:\n f.write(content)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3006620", "language": "Python", "matching_score": 3.7944135665893555, "max_stars_count": 5, "path": "calamari_ocr/scripts/apply_text_preprocessing.py" }, { "content": "import unicodedata\n\nfrom calamari_ocr.ocr.text_processing import TextProcessor, TextProcessorParams\n\n\ndef default_text_normalizer_params(params=TextProcessorParams(), default=\"NFC\"):\n params.type = TextProcessorParams.TEXT_NORMALIZER\n params.unicode_normalization = TextProcessorParams.UnicodeNormalizationType.Value(default.upper())\n\n return params\n\n\nclass TextNormalizer(TextProcessor):\n def __init__(self, params=default_text_normalizer_params()):\n super().__init__()\n self.params = params\n\n def _apply_single(self, txt):\n txt = unicodedata.normalize(\n TextProcessorParams.UnicodeNormalizationType.Name(self.params.unicode_normalization),\n txt\n )\n\n return txt\n\n\nif __name__ == \"__main__\":\n n = TextNormalizer(default_text_normalizer_params(default=\"NFC\"))\n", "id": "8194720", "language": "Python", "matching_score": 3.2878026962280273, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/text_normalizer.py" }, { "content": "from calamari_ocr.proto import TextProcessorParams\n\nfrom calamari_ocr.ocr.text_processing.text_processor import MultiTextProcessor, TextProcessor, NoopTextProcessor\n\nfrom calamari_ocr.ocr.text_processing.text_normalizer import TextNormalizer, default_text_normalizer_params\nfrom calamari_ocr.ocr.text_processing.text_regularizer import TextRegularizer, default_text_regularizer_params\nfrom calamari_ocr.ocr.text_processing.basic_text_processors import StripTextProcessor, BidiTextProcessor\nfrom calamari_ocr.ocr.text_processing.default_text_preprocessor import DefaultTextPreprocessor\nfrom calamari_ocr.ocr.text_processing.default_text_postprocessor import DefaultTextPostprocessor\nfrom calamari_ocr.ocr.text_processing.str_to_char_list import StrToCharList\nfrom calamari_ocr.ocr.text_processing.text_synchronizer import synchronize\n\n\ndef text_processor_from_proto(text_processor_params, pre_or_post=None):\n if len(text_processor_params.children) > 0 and text_processor_params.type != TextProcessorParams.MULTI_NORMALIZER:\n raise ValueError(\"Only a MULTI_NORMALIZER may have children, however got {}\".format(\n TextProcessorParams.Type.Name(text_processor_params.type)))\n\n if text_processor_params.type == TextProcessorParams.MULTI_NORMALIZER:\n return MultiTextProcessor(\n [text_processor_from_proto(c) for c in text_processor_params.children]\n )\n elif text_processor_params.type == TextProcessorParams.DEFAULT_NORMALIZER:\n if not pre_or_post:\n raise Exception(\"pre or post parameter must be set to specify pre or postprocessing default\")\n return {\"pre\": DefaultTextPreprocessor(), \"post\": DefaultTextPostprocessor()}[pre_or_post.lower()]\n elif text_processor_params.type == TextProcessorParams.DEFAULT_PRE_NORMALIZER:\n return DefaultTextPreprocessor()\n elif text_processor_params.type == TextProcessorParams.DEFAULT_POST_NORMALIZER:\n return DefaultTextPostprocessor()\n elif text_processor_params.type == TextProcessorParams.NOOP_NORMALIZER:\n return NoopTextProcessor()\n elif text_processor_params.type == TextProcessorParams.STRIP_NORMALIZER:\n return StripTextProcessor()\n elif text_processor_params.type == TextProcessorParams.BIDI_NORMALIZER:\n return BidiTextProcessor(text_processor_params.bidi_direction)\n elif text_processor_params.type == TextProcessorParams.TEXT_NORMALIZER:\n return TextNormalizer(text_processor_params)\n elif text_processor_params.type == TextProcessorParams.TEXT_REGULARIZER:\n return TextRegularizer(text_processor_params)\n elif text_processor_params.type == TextProcessorParams.STR_TO_CHAR_LIST:\n return StrToCharList(text_processor_params)\n\n raise Exception(\"Unknown proto type {} of an text processor\".format(text_processor_params.type))\n", "id": "3393338", "language": "Python", "matching_score": 5.099444389343262, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/__init__.py" }, { "content": "from calamari_ocr.ocr.text_processing import \\\n MultiTextProcessor, StripTextProcessor, BidiTextProcessor, \\\n TextNormalizer, TextRegularizer\n\n\nclass DefaultTextPreprocessor(MultiTextProcessor):\n def __init__(self):\n super().__init__(\n [\n BidiTextProcessor(),\n StripTextProcessor(),\n TextNormalizer(),\n TextRegularizer(),\n ]\n )\n", "id": "1370254", "language": "Python", "matching_score": 0.9523974061012268, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/default_text_preprocessor.py" }, { "content": "from calamari_ocr.ocr.checkpoint import Checkpoint\nfrom calamari_ocr.ocr.codec import Codec\nfrom calamari_ocr.ocr.datasets import *\nfrom calamari_ocr.ocr.evaluator import Evaluator\nfrom calamari_ocr.ocr.predictor import Predictor, MultiPredictor, PredictionResult\nfrom calamari_ocr.ocr.trainer import Trainer\nfrom calamari_ocr.ocr.cross_fold import CrossFold\nfrom calamari_ocr.ocr.cross_fold_trainer import CrossFoldTrainer\nimport calamari_ocr.ocr.backends\n", "id": "2022487", "language": "Python", "matching_score": 1.4672008752822876, "max_stars_count": 5, "path": "calamari_ocr/ocr/__init__.py" }, { "content": "from copy import deepcopy\nfrom abc import ABC, abstractmethod\n\nfrom calamari_ocr.proto import Prediction\n\n\nclass Voter(ABC):\n def __init__(self, text_postproc=None):\n super().__init__()\n self.text_postproc = text_postproc\n\n def vote_prediction_result(self, prediction_results):\n if len(prediction_results) == 0:\n raise Exception(\"Empty prediction results\")\n elif len(prediction_results) == 1:\n # no voting required\n return deepcopy(prediction_results[0].prediction)\n else:\n return self.vote_prediction_result_tuple(tuple(prediction_results))\n\n def vote_prediction_results(self, prediction_results):\n return [self.vote_prediction_result(p) for p in prediction_results]\n\n def vote_prediction_result_tuple(self, predictions):\n p = Prediction()\n p.is_voted_result = True\n self._apply_vote(predictions, p)\n\n # postprocessing after voting\n # option 1: Use custom text postprocessor\n # option 2: (Not implemented) Use only the first text postprocessor\n # option 3: Apply all known postprocessors and apply a sequence voting if different results are received\n if self.text_postproc:\n p.sentence = self.text_postproc.apply(p.sentence)\n else:\n sentences = [pred.text_postproc.apply(p.sentence) for pred in predictions]\n\n if all([s == sentences[0] for s in sentences[1:]]):\n # usually all postproc should yield the same results\n p.sentence = sentences[0]\n else:\n # we need to vote again\n from calamari_ocr.ocr.voting import SequenceVoter\n sv = SequenceVoter()\n p.sentence = \"\".join([c for c, _ in sv.process_text(sentences)])\n\n p.avg_char_probability = 0\n for pos in p.positions:\n if len(pos.chars) > 0:\n p.avg_char_probability += pos.chars[0].probability\n p.avg_char_probability /= len(p.positions) if len(p.positions) > 0 else 1\n\n return p\n\n @abstractmethod\n def _apply_vote(self, predictions, p):\n pass\n\n", "id": "4915224", "language": "Python", "matching_score": 1.1954433917999268, "max_stars_count": 5, "path": "calamari_ocr/ocr/voting/voter.py" }, { "content": "from calamari_ocr.ocr.voting.sequence_voter import SequenceVoter\nfrom calamari_ocr.ocr.voting.confidence_voter import ConfidenceVoter\n\nfrom calamari_ocr.proto import VoterParams\n\n\ndef voter_from_proto(voter_params):\n if voter_params.type == VoterParams.SEQUENCE_VOTER:\n return SequenceVoter()\n elif voter_params.type == VoterParams.CONFIDENCE_VOTER_FUZZY_CTC:\n return ConfidenceVoter(fuzzy_ctc=True, blank_index=voter_params.blank_index)\n elif voter_params.type == VoterParams.CONFIDENCE_VOTER_DEFAULT_CTC:\n return ConfidenceVoter(fuzzy_ctc=False, blank_index=voter_params.blank_index)\n else:\n raise Exception(\"Unknown voter type '{}'\".format(voter_params.type))\n", "id": "5975836", "language": "Python", "matching_score": 0.5872728824615479, "max_stars_count": 5, "path": "calamari_ocr/ocr/voting/__init__.py" }, { "content": "import tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.nn_grad import _BroadcastMul\nfrom tensorflow.python.framework import sparse_tensor\n\n\ndef load(library_path):\n fuzzy_module = tf.load_op_library(library_path)\n\n @ops.RegisterGradient(\"FuzzyCTCLoss\")\n def _FuzzyCTCLossGrad(op, grad_loss, _):\n grad_without_gradient = array_ops.prevent_gradient(\n op.outputs[1], message=\"Currently there is no way to take the second \"\n \" derivative of ctc_loss due to the fused implementation's interaction \"\n \" with tf.gradients()\")\n return [_BroadcastMul(tf.expand_dims(grad_loss, -1), grad_without_gradient), None, None, None]\n\n def fuzzy_ctc_greedy_decoder(inputs, sequence_length):\n outputs = fuzzy_module.fuzzy_ctc_greedy_decoder(inputs, sequence_length)\n (decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs\n return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)],\n log_probabilities)\n\n return {\"module\": fuzzy_module, \"decoder_op\": fuzzy_ctc_greedy_decoder}\n\n", "id": "8459872", "language": "Python", "matching_score": 0.4054865539073944, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/tensorflow_backend/tensorflow_fuzzy_ctc_loader.py" }, { "content": "import numpy as np\n\n\nclass Sync:\n def __init__(self, texts, substr=None, match=None):\n self.texts = texts\n\n if substr:\n assert(substr.shape[0] == len(self.texts))\n self.substr = substr\n else:\n self.substr = np.zeros((len(texts), 3), dtype=int)\n\n self.match = match\n\n def __str__(self):\n return str(self.substr)\n\n def get_text(self):\n return [self.texts[i][start:start+length] for i, (start, end, length) in enumerate(self.substr)]\n\n def is_valid(self):\n return np.any(self.substr[:, 2] > 0)\n\n def lengths(self):\n return self.substr[:, 2]\n\n def start(self, idx):\n return self.substr[idx, 0]\n\n def stop(self, idx):\n return self.substr[idx, 1]\n\n def length(self, idx):\n return self.substr[idx, 2]\n\n def set_start(self, idx, v):\n self.substr[idx, 0] = v\n\n def set_stop(self, idx, v):\n self.substr[idx, 1] = v\n\n def set_length(self, idx, v):\n self.substr[idx, 2] = v\n\n def set_all(self, idx, v):\n self.substr[idx, :] = v\n\n\ndef synchronize(texts):\n def init():\n sync = Sync(texts)\n for i, text in enumerate(texts):\n sync.set_all(i, [0, len(text) - 1, len(text)])\n\n if sync.is_valid():\n return [sync]\n\n return []\n\n def longest_match(maxlen, c1, start1, stop1, c2, start2, stop2):\n mstart1 = 0\n mstart2 = 0\n s1limit = stop1 - maxlen\n s2limit = stop2 - maxlen\n for s1 in range(start1, s1limit + 1):\n for s2 in range(start2, s2limit + 1):\n if c1[s1] == c2[s2]:\n i1 = s1 + 1\n i2 = s2 + 1\n while i1 <= stop1 and i2 <= stop2 and c1[i1] == c2[i2]:\n i1 += 1\n i2 += 1\n\n increase = i1 - s1 - maxlen\n if increase > 0:\n s1limit -= increase\n s2limit -= increase\n maxlen += increase\n mstart1 = s1\n mstart2 = s2\n\n return maxlen, mstart1, mstart2\n\n def save_match(synclist, num_text, sync, start, length, match):\n left, right = Sync(texts), Sync(texts)\n for i in range(num_text):\n stop = start[i] + length - 1\n left.set_all(i, [sync.start(i), start[i] - 1, start[i] - sync.start(i)])\n right.set_all(i, [stop + 1, sync.stop(i), sync.stop(i) - stop])\n sync.set_all(i, [start[i], stop, length])\n\n sync.match = match\n if left.is_valid():\n synclist.insert(synclist.index(sync), left)\n\n if right.is_valid():\n synclist.insert(synclist.index(sync) + 1, right)\n\n def recursive_sync(synclist, texts, start_index):\n sync = synclist[start_index]\n if np.any(sync.lengths() == 0):\n return\n\n start = np.zeros(len(texts), dtype=int)\n start[0] = sync.start(0)\n length = sync.length(0)\n for i, text in enumerate(texts[1:], 1):\n length, new_start, start[i] = longest_match(0, texts[0], start[0], start[0] + length - 1,\n text, sync.start(i), sync.stop(i))\n\n if length == 0:\n return\n\n change = new_start - start[0]\n if change > 0:\n for j in range(i):\n start[j] += change\n\n save_match(synclist, len(texts), sync, start, length, True)\n\n start_index = synclist.index(sync)\n if start_index - 1 >= 0:\n recursive_sync(synclist, texts, start_index - 1)\n\n start_index = synclist.index(sync)\n if start_index + 1 < len(synclist):\n recursive_sync(synclist, texts, start_index + 1)\n\n return\n\n synclist = init()\n\n if len(synclist) > 0:\n recursive_sync(synclist, texts, 0)\n\n return synclist\n\n\nif __name__ == \"__main__\":\n synclist = synchronize([\"AbcdEfG\", \"cdEFG\"])\n print([s.get_text() for s in synclist])\n", "id": "2605281", "language": "Python", "matching_score": 0.9304836988449097, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/text_synchronizer.py" }, { "content": "import numpy as np\nfrom calamari_ocr.ocr.data_processing.data_preprocessor import DataPreprocessor\n\n\nclass DataRangeNormalizer(DataPreprocessor):\n def __init__(self):\n super().__init__()\n\n def _apply_single(self, data : np.ndarray):\n \"\"\"\n Read an image and returns it as a floating point array.\n The optional page number allows images from files containing multiple\n images to be addressed. Byte and short arrays are rescaled to\n the range 0...1 (unsigned) or -1...1 (signed).\n :rtype np.array in range 0...1 (unsigned) or -1...1 (signed)\n \"\"\"\n if data.dtype == np.dtype('uint8'):\n data = data / 255.0\n if data.dtype == np.dtype('int8'):\n data = data / 127.0\n elif data.dtype == np.dtype('uint16'):\n data = data / 65536.0\n elif data.dtype == np.dtype('int16'):\n data = data / 32767.0\n elif data.dtype in [np.dtype('f'), np.dtype('float32'), np.dtype('float64')]:\n pass\n elif data.dtype == bool:\n data = data.astype(np.float32)\n else:\n raise Exception(\"unknown image type: {}\".format(data.dtype))\n\n if data.ndim == 3:\n data = np.mean(data, axis=2)\n\n return data, None", "id": "1014439", "language": "Python", "matching_score": 1.2891093492507935, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/data_range_normalizer.py" }, { "content": "import numpy as np\nfrom calamari_ocr.ocr.data_processing.data_preprocessor import DataPreprocessor\nfrom calamari_ocr.proto import DataPreprocessorParams\n\n\nclass FinalPreparation(DataPreprocessor):\n def __init__(self,\n params: DataPreprocessorParams,\n as_uint8=True):\n super().__init__()\n self.params = params\n self.normalize = not params.no_normalize\n self.invert = not params.no_invert\n self.transpose = not params.no_transpose\n self.pad = params.pad\n self.pad_value = params.pad_value\n self.as_uint8 = as_uint8 # To save memory!\n\n def _apply_single(self, data):\n if self.normalize:\n amax = np.amax(data)\n if amax > 0:\n data = data * 1.0 / amax\n\n if self.invert:\n data = np.amax(data) - data\n\n if self.transpose:\n data = data.T\n\n if self.pad > 0:\n if self.transpose:\n w = data.shape[1]\n data = np.vstack([np.full((self.pad, w), self.pad_value), data, np.full((self.pad, w), self.pad_value)])\n else:\n w = data.shape[0]\n data = np.hstack([np.full((w, self.pad), self.pad_value), data, np.full((w, self.pad), self.pad_value)])\n\n if self.as_uint8:\n data = (data * 255).astype(np.uint8)\n\n return data, None\n\n def local_to_global_pos(self, x, params):\n if self.pad > 0 and self.transpose:\n return x - self.pad\n else:\n return x\n", "id": "11573106", "language": "Python", "matching_score": 1.3050775527954102, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/final_preparation.py" }, { "content": "class OutputToInputTransformer:\n def __init__(self, data_processing, backend):\n self.data_processing = data_processing\n self.backend = backend\n\n def local_to_global(self, x, data_proc_params):\n x = self.backend.output_to_input_position(x)\n if self.data_processing:\n x = self.data_processing.local_to_global_pos(x, data_proc_params)\n return x", "id": "7573754", "language": "Python", "matching_score": 0.4159306585788727, "max_stars_count": 5, "path": "calamari_ocr/utils/output_to_input_transformer.py" }, { "content": "from calamari_ocr.utils.multiprocessing import tqdm_wrapper\nfrom calamari_ocr.ocr.datasets import InputDataset\n\nfrom typing import List\n\n\nclass Codec:\n @staticmethod\n def from_input_dataset(input_dataset: List[InputDataset], whitelist=None, progress_bar=False):\n chars = set() if whitelist is None else set(whitelist)\n for ds in input_dataset:\n if not ds:\n continue\n for text in tqdm_wrapper(ds.text_generator(), total=len(ds), desc=\"Computing codec\", progress_bar=progress_bar):\n for c in text:\n chars.add(c)\n\n return Codec(sorted(list(chars)))\n\n @staticmethod\n def from_texts(texts, whitelist=None):\n \"\"\"Compute a codec from given text\n\n First computes a set of all available characters.\n Then, a Codec is created\n\n Parameters\n ----------\n texts : obj:`list` of :obj:`str`\n a list of strings\n whitelist\n a list of characters that are forced to be in the codec\n Returns\n -------\n Codec based on the set of characters + whitelist\n \"\"\"\n chars = set() if whitelist is None else set(whitelist)\n\n for text in texts:\n for c in text:\n chars.add(c)\n\n return Codec(sorted(list(chars)))\n\n def __init__(self, charset):\n \"\"\" Construct a codec based on a given charsed (symbols)\n\n A symbol is typically a character (e.g. a, b, c, d, ...) in OCR, in OMR this might be\n the position of a note in the staff.\n The labels are required for training, since the DNN will only predict integer numbers (classes).\n Given a set of symbols the codec will automatically assign a label to each character and store it for\n processing of a line.\n\n The codec then is used to `decode` and `encode` a line.\n\n\n As first index a __blank__ (empty string) will be added as required for the CTC algorithm.\n\n Parameters\n ----------\n charset : obj:`list` of :obj:`str`\n a list of characters\n \"\"\"\n charset = list(charset)\n if len(charset) == 0:\n raise Exception(\"Got empty charset\")\n\n if charset[0] != \"\":\n self.charset = [\"\"] + charset # blank is label 0\n else:\n self.charset = charset\n\n self.code2char = {}\n self.char2code = {}\n for code, char in enumerate(self.charset):\n self.code2char[code] = char\n self.char2code[char] = code\n\n def __len__(self):\n \"\"\" Get the number of characeters in the charset\n\n this is equal to the maximum possible label.\n\n Returns\n -------\n number of characters in the charset\n \"\"\"\n return len(self.charset)\n\n def size(self):\n \"\"\" Get the number of characeters in the charset\n\n this is equal to the maximum possible label.\n\n Returns\n -------\n number of characters in the charset\n \"\"\"\n return len(self.charset)\n\n def encode(self, s):\n \"\"\" Encode the string into labels\n\n Parameters\n ----------\n s : str\n sequence of characeters\n\n Returns\n -------\n sequence of labels\n\n See Also\n --------\n decode\n \"\"\"\n return [self.char2code[c] for c in s]\n\n def decode(self, l):\n \"\"\" Decode the sequence of labels into a sequence of characters\n\n Parameters\n ----------\n l : list of int\n sequence of labels as predicted by the neural net\n\n Returns\n -------\n sequence of characters\n\n See Also\n --------\n encode\n \"\"\"\n return [self.code2char[c] for c in l]\n\n def extend(self, codec):\n \"\"\" extend the codec by the given characeters\n\n If a character is already present it will be skipped.\n The new characters will be added at the end of the codec (highest label numbers)\n\n Parameters\n ----------\n codec : list of str\n the characeters to add\n Returns\n -------\n list of int\n the positions/labels of added characters\n See Also\n --------\n shrink\n \"\"\"\n size = self.size()\n added = []\n for c in codec.code2char.values():\n if c not in self.charset: # append chars that don't appear in the codec\n self.code2char[size] = c\n self.char2code[c] = size\n self.charset.append(c)\n added.append(size)\n size += 1\n\n return added\n\n def shrink(self, codec):\n \"\"\" remove the given `codec` from this Codec\n\n This algorithm will compute the positions of the codes in the old charset and ignore non present chars.\n This output can then be used to delete specific nodes in the neural net.\n\n Parameters\n ----------\n codec : list of str\n chars to remove if present\n Returns\n -------\n list of int\n positions/labels of the chars that shall be removed on the old charset\n \"\"\"\n deleted_positions = []\n positions = []\n for number, char in self.code2char.items():\n if char not in codec.char2code:\n deleted_positions.append(number)\n else:\n positions.append(number)\n\n self.charset = [self.code2char[c] for c in sorted(positions)]\n self.code2char = {}\n self.char2code = {}\n for code, char in enumerate(self.charset):\n self.code2char[code] = char\n self.char2code[char] = code\n\n return deleted_positions\n\n def align(self, codec, shrink=True, extend=True):\n \"\"\" Change the codec to the new `codec` but keep the positions of chars that are in both codecs.\n\n This function is used to compute a codec change: deleted labels, added characters.\n\n Parameters\n ----------\n codec : list of str\n Characters of the new codec\n shrink : bool\n Shrink the codec by unavailable chars\n extend : bool\n Extend new chars to the codec\n Returns\n -------\n list of int\n list of the deleted positions\n list of int\n list of the labels of the newly added characters\n See Also\n --------\n shrink\n extend\n \"\"\"\n deleted_positions = self.shrink(codec) if shrink else []\n added_positions = self.extend(codec) if extend else []\n return deleted_positions, added_positions\n\n\ndef ascii_codec():\n \"\"\" default ascii codec\n\n Returns\n -------\n Codec\n codec based on the default ascii characters\n \"\"\"\n ascii_labels = [\"\", \" \", \"~\"] + [chr(x) for x in range(33, 126)]\n return Codec(ascii_labels)\n\n", "id": "6138271", "language": "Python", "matching_score": 1.60423743724823, "max_stars_count": 5, "path": "calamari_ocr/ocr/codec.py" }, { "content": "import multiprocessing\nfrom multiprocessing.pool import ThreadPool\nimport os\nimport time\nimport subprocess\nfrom tqdm import tqdm\n\n\ndef tqdm_wrapper(iterable, _sentinel=None, total=1, desc=\"\", progress_bar=False):\n if _sentinel:\n raise Exception(\"You must call tqdm_wrapper by using parameter names to specify additional parameters.\")\n\n if not progress_bar:\n return iterable\n else:\n return tqdm(iterable, total=total, desc=desc)\n\n\ndef parallel_map(f, d, _sentinel=None, desc=\"\", processes=1, progress_bar=False, use_thread_pool=False, max_tasks_per_child=None):\n if _sentinel:\n raise Exception(\"You must call parallel_map by using parameter names to specify additional parameters besides the default map(func, data).\")\n\n if processes <= 0:\n processes = os.cpu_count()\n\n processes = min(processes, len(d))\n\n if processes == 1:\n if progress_bar:\n out = list(tqdm(map(f, d), desc=desc, total=len(d)))\n else:\n out = list(map(f, d))\n\n else:\n if use_thread_pool:\n with ThreadPool(processes=processes) as pool:\n if progress_bar:\n out = list(tqdm(pool.imap(f, d), desc=desc, total=len(d)))\n else:\n out = pool.map(f, d)\n else:\n with multiprocessing.Pool(processes=processes, maxtasksperchild=max_tasks_per_child) as pool:\n if progress_bar:\n out = list(tqdm(pool.imap(f, d), desc=desc, total=len(d)))\n else:\n out = pool.map(f, d)\n\n return out\n\n\ndef prefix_run_command(command, prefix, args):\n if type(command) is not list and type(command) is not tuple:\n raise Exception(\"The command must be a list or tuple of commands and arguments\")\n\n if prefix:\n prefix = prefix.format(args).split()\n else:\n prefix = []\n\n return prefix + command\n\n\ndef run(command, verbose=False):\n if type(command) is not list and type(command) is not tuple:\n raise Exception(\"The command must be a list or tuple of commands and arguments\")\n\n if verbose:\n print(\"Executing: {}\".format(\" \".join(command)))\n\n env = os.environ.copy()\n env['PYTHONIOENCODING'] = 'utf-8'\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=False, env=env)\n while True:\n line = process.stdout.readline().decode('utf-8')\n\n # check if process has finished\n if process.poll() is not None:\n break\n\n # check if output is present\n if line is None or len(line) == 0:\n time.sleep(0.1)\n else:\n yield line\n\n if process.returncode != 0:\n raise Exception(\"Error: Process finished with code {}\".format(process.returncode))\n", "id": "3718454", "language": "Python", "matching_score": 0.5881634950637817, "max_stars_count": 5, "path": "calamari_ocr/utils/multiprocessing.py" }, { "content": "def default_network_meta():\n return {\n \"inter_threads\": 0,\n \"intra_threads\": 0,\n \"ctc_merge_repeated\": True,\n \"use_peepholes\": False,\n \"dropout\": False,\n \"solver\": \"Adam\",\n \"ctc\": \"Default\",\n \"learning_rate\": 1e-3,\n \"momentum\": 0.9,\n \"layers\": [],\n \"cudnn\": True,\n \"features\": 40, # i. e. the line height\n }\n", "id": "11011652", "language": "Python", "matching_score": 0.029579002410173416, "max_stars_count": 5, "path": "calamari_ocr/ocr/backends/network_meta.py" }, { "content": "import os\nfrom lxml import etree as ET\nfrom .data import Book, Page, Block, Format, Line, Par, Rect\nfrom .exceptions import XMLParseError\nfrom tqdm import tqdm\n\n\nclass XMLReader:\n \"\"\"\n This class can read Abbyy documents out of a directory\n \"\"\"\n\n def __init__(self, imgfiles: [], abbyfiles: [], skip_invalid: bool, remove_invalid: bool):\n\n \"\"\"\n Constructs an XMLReader class with the :param directory\n\n :param directory: Absolute or relative path of the directory there the abbyy documents are located\n \"\"\"\n assert(len(imgfiles) == len(abbyfiles))\n self.imgfiles = imgfiles\n self.xmlfiles = abbyfiles\n self.skip_invalid = skip_invalid\n self.remove_invalid = remove_invalid\n\n def read(self) -> Book:\n\n \"\"\"\n Start trying to read the data from the directory :var self.directory\n\n :return: a Data.Book class with all the readed data from :var self.directory\n :exception WrongFileStructureException: Is raised then files are missing in the directory\n (e.g.: no image file for an xml file which is named equally)\n :exception XMLParseError: Is raised then there are errors in a xml file\n \"\"\"\n\n book = Book()\n toremove = []\n\n # Searching for the xml abbyy files and handling Errors in the data structure\n for i, (imgfile, xmlfile) in tqdm(enumerate(zip(self.imgfiles, self.xmlfiles)),\n desc=\"Loading abby files\", total=len(self.imgfiles)):\n if xmlfile:\n if not os.path.exists(xmlfile):\n if not self.skip_invalid:\n raise XMLParseError('The abbyy xml file {} does not exist'.format(xmlfile))\n else:\n toremove.append(i)\n continue\n\n if imgfile:\n if not os.path.exists(imgfile):\n if not self.skip_invalid:\n raise XMLParseError('The image file {} does not exist'.format(imgfile))\n else:\n toremove.append(i)\n continue\n\n try:\n book.pages += list(self.parseXMLfile(imgfile, xmlfile))\n except XMLParseError as e:\n print(e)\n if self.skip_invalid:\n toremove.append(i)\n continue\n else:\n raise e\n\n for i in reversed(toremove):\n del self.imgfiles[i]\n del self.xmlfiles[i]\n\n return book\n\n @staticmethod\n def parseRect(node, required=True) -> Rect:\n try:\n a = XMLReader.requireAttr(node, ['l', 't', 'r', 'b'])\n\n rect = Rect(int(a['l']), int(a['t']), int(a['r']), int(a['b']))\n\n except Exception as e:\n if required:\n raise e\n else:\n return None\n\n return rect\n\n @staticmethod\n def requireAttr(node, attrs):\n a = {}\n for attr in attrs:\n a[attr] = node.get(attr)\n if a[attr] is None:\n raise XMLParseError('Missing required attribute {} on node {}'.format(attr, node))\n\n return a\n\n def parseXMLfile(self, imgfile, xmlfile):\n # Reads the xml file with the xml.etree.ElementTree package\n try:\n tree = ET.parse(xmlfile)\n except ET.ParseError as e:\n raise XMLParseError('The xml file \\'' + xmlfile + '\\' couldn\\'t be read because of a '\n 'syntax error in the xml file. ' + e.msg)\n\n root = tree.getroot()\n\n if root is None:\n raise XMLParseError('The xml file \\'' + xmlfile + '\\' is empty.')\n\n for pagecount, pageNode in enumerate(root):\n a = XMLReader.requireAttr(pageNode, ['width', 'height', 'resolution', 'originalCoords'])\n page = Page(a['width'], a['height'], a['resolution'], a['originalCoords'], imgfile, xmlfile)\n\n for blockcount, blockNode in enumerate(pageNode):\n\n # Checks if the blockType is text, ignoring all other types\n type = blockNode.get('blockType')\n if type is not None and type == 'Text':\n\n # Reads rectangle data and controls if they are empty\n name = blockNode.get('blockName')\n\n block = Block(type, name, XMLReader.parseRect(blockNode, required=False))\n\n for textNode in blockNode:\n\n # Again only text nodes will be considered\n\n if textNode.tag == '{http://www.abbyy.com/FineReader_xml/FineReader10-schema-v1.xml}text':\n for parNode in textNode:\n align = parNode.get('align')\n startIndent = parNode.get('startIndent')\n lineSpacing = parNode.get('lineSpacing')\n\n par = Par(align, startIndent, lineSpacing)\n\n for linecount, lineNode in enumerate(parNode):\n baseline = lineNode.get('baseline')\n\n line = Line(baseline, XMLReader.parseRect(lineNode))\n\n lang = None\n text = \"\"\n maxCount = 0\n for formNode in lineNode:\n countChars = 0\n if formNode.text is None or formNode.text == \"\\n\" or formNode.text == \"\":\n for charNode in formNode:\n text += str(charNode.text)\n countChars = countChars + 1\n if countChars > maxCount:\n maxCount = countChars\n lang = formNode.get('lang')\n\n\n else:\n lang = formNode.get('lang')\n text = str(formNode.text)\n\n format = Format(lang, text)\n line.formats.append(format)\n par.lines.append(line)\n\n block.pars.append(par)\n\n page.blocks.append(block)\n\n yield page\n\n", "id": "2260362", "language": "Python", "matching_score": 4.104443550109863, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/abbyy_dataset/reader.py" }, { "content": "from lxml import etree as ET\nfrom .data import Page\n\n\nclass XMLWriter:\n @staticmethod\n def write(page: Page, filename: str):\n self = XMLWriter\n\n root = ET.Element('document')\n tree = ET.ElementTree(root)\n\n self._addElement(root, \"xmlns\", \"http://www.abbyy.com/FineReader_xml/FineReader10-schema-v1.xml\")\n self._addElement(root, \"version\", \"1.0\")\n self._addElement(root, \"producer\", \"Calamari\")\n self._addElement(root, \"languages\", \"\")\n NS_XSI = \"{http://www.w3.org/2001/XMLSchema-instance}\"\n root.set(NS_XSI + \"schemaLocation\",\n \"http://www.abbyy.com/FineReader_xml/FineReader10-schema-v1.xml http://www.abbyy.com/FineReader_xml/FineReader10-schema-v1.xml\")\n\n pageNode = ET.SubElement(root, \"page\")\n self._addElement(pageNode, \"width\", page.width)\n self._addElement(pageNode, \"height\", page.height)\n self._addElement(pageNode, \"resolution\", page.resolution)\n self._addElement(pageNode, \"originalCoords\", page.resolution)\n\n for block in page.blocks:\n\n blockNode = ET.SubElement(pageNode, \"block\")\n self._addElement(blockNode, \"blockType\", block.blockType)\n self._addElement(blockNode, \"blockName\", block.blockName)\n if block.rect:\n self._addElement(blockNode, \"l\", block.rect.left.__str__())\n self._addElement(blockNode, \"t\", block.rect.top.__str__())\n self._addElement(blockNode, \"r\", block.rect.right.__str__())\n self._addElement(blockNode, \"b\", block.rect.bottom.__str__())\n\n textNode = ET.SubElement(blockNode, \"text\")\n\n for par in block.pars:\n\n parNode = ET.SubElement(textNode, \"par\")\n self._addElement(parNode, \"align\", par.align)\n self._addElement(parNode, \"startIndent\", par.startIndent)\n self._addElement(parNode, \"lineSpacing\", par.lineSpacing)\n\n for line in par.lines:\n\n lineNode = ET.SubElement(parNode, \"line\")\n self._addElement(lineNode, \"baseline\", line.baseline)\n self._addElement(lineNode, \"l\", line.rect.left.__str__())\n self._addElement(lineNode, \"t\", line.rect.top.__str__())\n self._addElement(lineNode, \"r\", line.rect.right.__str__())\n self._addElement(lineNode, \"b\", line.rect.bottom.__str__())\n\n for fo in line.formats:\n\n foNode = ET.SubElement(lineNode, \"formatting\")\n self._addElement(foNode, \"lang\", fo.lang)\n foNode.text = fo.text\n\n tree.write(open(filename, 'wb'), encoding='utf-8', xml_declaration=True,\n pretty_print=True)\n\n @staticmethod\n def _addElement(element, key, value):\n\n \"\"\"\n Only add attributes to an tag if the key is not None\n\n :param element: the tag element of the xml tree\n :param key: the key of the attribute\n :param value: the value of the attribute\n :return:\n \"\"\"\n\n if value is not None:\n element.set(key, value)\n\n", "id": "11462763", "language": "Python", "matching_score": 3.6233878135681152, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/abbyy_dataset/writer.py" }, { "content": "class Rect:\n\n \"\"\"\n Class defines the rectangle of an element in the Abbyy document\n \"\"\"\n\n def __init__(self, l: int, t: int, r: int, b: int):\n\n \"\"\"\n Constructs a Rect class\n :param l: length\n :param t: top\n :param r: right\n :param b: bottom\n \"\"\"\n\n self.left = l\n self.top = t\n self.right = r\n self.bottom = b\n self.height = self.bottom - self.top\n self.width = self.right - self.left\n\n def __str__(self):\n return 'Rect:[l=\\'' + self.left.__str__() + '\\', t=\\'' + self.top.__str__() + '\\', r=\\'' + \\\n self.right.__str__() + '\\', b=\\'' + self.bottom.__str__() + '\\']'\n\n\nclass Book:\n\n \"\"\"\n Main class; contains all subelements: book -> page -> block -> par -> line -> format\n \"\"\"\n\n def __init__(self):\n self.pages = []\n\n def __str__(self):\n \"\"\"\n Writes all information of the book element and all sub elements into the python console.\n :return: None\n \"\"\"\n\n s = \"\"\n\n for page in self.pages:\n s += page\n for block in page.blocks:\n s += (' '+block.__str__())\n for par in block.pars:\n s += (' '+par.__str__())\n for line in par.lines:\n s += ' '+line.__str__()\n for format in line.formats:\n s += ' ' + format.__str__()\n\n def getBlocks(self)->[]:\n\n \"\"\"\n :return: All the blocks of this book\n \"\"\"\n\n blocks = []\n\n for page in self.pages:\n for block in page.blocks:\n blocks.append(block)\n\n return blocks\n\n def getPars(self)->[]:\n\n \"\"\"\n :return: All the paragraphs of this book\n \"\"\"\n\n pars = []\n\n for page in self.pages:\n for block in page.blocks:\n for par in block.pars:\n pars.append(par)\n\n return pars\n\n def getLines(self)->[]:\n\n \"\"\"\n :return: All the lines of this book\n \"\"\"\n\n lines = []\n\n for page in self.pages:\n for block in page.blocks:\n for par in block.pars:\n for line in par.lines:\n lines.append(line)\n\n return lines\n\n def getFormats(self)->[]:\n\n \"\"\"\n :return: All the chars of this book\n \"\"\"\n\n formats = []\n\n for page in self.pages:\n for block in page.blocks:\n for par in block.pars:\n for line in par.lines:\n for format in line.formats:\n formats.append(format)\n\n return formats\n\n\nclass Page:\n\n \"\"\"\n Subelement of the book class; contains a list with the subelement block\n \"\"\"\n\n def __init__(self, width: int, height: int, resolution: int, originalCoords: int, imgFile: str, xmlFile: str):\n\n \"\"\"\n Construct a page class with an empty block list\n :param width: The width of the page (in pixel)\n :param height: The height of the page (in pixel)\n :param resolution: The resolution of the page (in dpi ???)\n :param originalCoords: ???\n :param imgFile: The name of the image file\n :param xmlFile: The name of the xml file\n \"\"\"\n\n self.width = width\n self.height = height\n self.resolution = resolution\n self.originalCoords = originalCoords\n self.imgFile = imgFile\n self.xmlFile = xmlFile\n self.blocks = []\n\n def __str__(self):\n return 'Page:[ImageFile=\\''+self.imgFile +\\\n '\\', XMLFile=\\''+self.xmlFile +\\\n '\\', width=\\''+self.width.__str__() +\\\n '\\', height=\\''+self.height.__str__() +\\\n '\\', resolution=\\''+self.resolution.__str__() +\\\n '\\', originalCoords=\\''+self.originalCoords.__str__()+'\\']'\n\n def getPars(self) -> []:\n\n \"\"\"\n :return: All the pars of this page\n \"\"\"\n\n pars = []\n\n for block in self.blocks:\n for par in block.pars:\n pars.append(par)\n\n return pars\n\n\n def getLines(self) -> []:\n\n \"\"\"\n :return: All the lines of this page\n \"\"\"\n\n lines = []\n\n for block in self.blocks:\n for par in block.pars:\n for line in par.lines:\n lines.append(line)\n\n return lines\n\n def getFormats(self) -> []:\n\n \"\"\"\n :return: All the Format Tags of this page\n \"\"\"\n\n formats = []\n\n for block in self.blocks:\n for par in block.pars:\n for line in par.lines:\n for format in line.formats:\n formats.append(format)\n\n return formats\n\n\nclass Block:\n\n \"\"\"\n Subelement of the page class; contains a list with the subelement par\n \"\"\"\n\n def __init__(self, blockType: str, blockName: str, rect: Rect):\n\n \"\"\"\n Construct a block class with an empty line list\n :param blockType: The type of a block (further information in the abbyy doc)\n :param rect: The rectangle of this element\n \"\"\"\n\n self.blockType = blockType\n self.blockName = blockName\n self.rect = rect\n self.pars = []\n\n def __str__(self):\n return 'Block:[BlockType={}, rect={}]'.format(self.blockType, self.rect)\n\n def getLines(self) -> []:\n\n \"\"\"\n :return: All the lines of this block\n \"\"\"\n\n lines = []\n\n for par in self.pars:\n for line in par.lines:\n lines.append(line)\n\n return lines\n\n def getFormats(self) -> []:\n\n \"\"\"\n :return: All the Format Tags of this block\n \"\"\"\n\n formats = []\n\n for par in self.pars:\n for line in par.lines:\n for format in line.formats:\n formats.append(format)\n\n return formats\n\n\nclass Par:\n \"\"\"\"\n Subelement of the block class; contains a list with the subelement line\n \"\"\"\n\n def __init__(self, align: str, startIndent: int, lineSpacing: int):\n\n \"\"\"\n Construct a Paragraph Class with an empty line list\n :param align:\n :param startIndent:\n :param lineSpacing:\n \"\"\"\n\n self.align = align\n self.startIndent = startIndent\n self.lineSpacing = lineSpacing\n self.lines = []\n\n def __str__(self):\n return 'Paragraph:[Align=\\''+self.align.__str__()+\\\n '\\', startIndent=\\''+self.startIndent.__str__()+\\\n '\\', lineSpacing=\\''+self.lineSpacing.__str__()+'\\']'\n\n def getFormats(self) -> []:\n\n \"\"\"\n :return: All the Format Tags of the Paragraph\n \"\"\"\n\n formats = []\n\n for line in self.lines:\n for format in line.formats:\n formats.append(format)\n\n return formats\n\n\nclass Line:\n\n \"\"\"\"\n Subelement of the par class; contains a list with the subelement format\n \"\"\"\n\n def __init__(self, baseline: int, rect: Rect):\n\n \"\"\"\n Construct a line class with an empty char list\n :param baseline: ???\n :param rect: The rectangle of this element\n \"\"\"\n\n self.baseline = baseline\n self.rect = rect\n self.formats = []\n\n def __str__(self):\n return 'Line:[baseline=\\''+self.baseline.__str__() +\\\n '\\', '+self.rect.__str__()+']'\n\n\nclass Format:\n\n def __init__(self, lang: str, text: str):\n\n self.lang = lang\n self.text = text\n\n def __str__(self):\n return 'Format:[lang=\\''+self.lang.__str__() + \\\n '\\', text=\\''+self.text+'\\']'", "id": "2655406", "language": "Python", "matching_score": 1.09645414352417, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/abbyy_dataset/data.py" }, { "content": "import numpy as np\nfrom scipy.ndimage import filters\nfrom calamari_ocr.ocr.data_processing.data_preprocessor import DataPreprocessor\nfrom calamari_ocr.ocr.data_processing.scale_to_height_processor import ScaleToHeightProcessor\n\n\nclass CenterNormalizer(DataPreprocessor):\n def __init__(self, params, extra_params=(4, 1.0, 0.3), debug=False):\n self.debug = debug\n self.target_height = params.line_height if params.line_height else 48\n self.range, self.smoothness, self.extra = extra_params\n super().__init__()\n\n def _apply_single(self, data):\n out, params = self.normalize(data, cval=np.amax(data))\n return out, params\n\n def set_height(self, target_height):\n self.target_height = target_height\n\n def measure(self, line):\n h, w = line.shape\n smoothed = filters.gaussian_filter(line, (h * 0.5, h * self.smoothness), mode='constant')\n smoothed += 0.001 * filters.uniform_filter(smoothed, (h * 0.5, w), mode='constant')\n a = np.argmax(smoothed, axis=0)\n a = filters.gaussian_filter(a, h * self.extra)\n center = np.array(a, 'i')\n deltas = abs(np.arange(h)[:, np.newaxis] - center[np.newaxis, :])\n mad = np.mean(deltas[line != 0])\n r = int(1 + self.range * mad)\n\n return center, r\n\n def dewarp(self, img, cval=0, dtype=np.dtype('f')):\n temp = np.amax(img) - img\n amax = np.amax(temp)\n if amax == 0:\n # white image\n return temp\n\n temp = temp * 1.0 / np.amax(temp)\n center, r = self.measure(temp)\n h, w = img.shape\n # The actual image img is embedded into a larger image by\n # adding vertical space on top and at the bottom (padding)\n hpadding = r # this is large enough\n padded = np.vstack([cval * np.ones((hpadding, w)), img, cval * np.ones((hpadding, w))])\n center = center + hpadding\n dewarped = [padded[center[i] - r:center[i]+r, i] for i in range(w)]\n dewarped = np.array(dewarped, dtype=dtype).T\n\n return dewarped\n\n def normalize(self, img, order=1, dtype=np.dtype('f'), cval=0):\n # resize the image to a appropriate height close to the target height to speed up dewarping\n intermediate_height = int(self.target_height * 1.5)\n m1 = 1\n if intermediate_height < img.shape[0]:\n m1 = intermediate_height / img.shape[0]\n img = ScaleToHeightProcessor.scale_to_h(img, intermediate_height, order=order, dtype=dtype, cval=cval)\n\n # dewarp\n dewarped = self.dewarp(img, cval=cval, dtype=dtype)\n\n t = dewarped.shape[0] - img.shape[0]\n # scale to target height\n scaled = ScaleToHeightProcessor.scale_to_h(dewarped, self.target_height, order=order, dtype=dtype, cval=cval)\n m2 = scaled.shape[1] / dewarped.shape[1]\n return scaled, (m1, m2, t)\n\n def local_to_global_pos(self, x, params):\n m1, m2, t = params\n return x / m1 / m2\n\n", "id": "5744206", "language": "Python", "matching_score": 4.207514762878418, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/center_normalizer.py" }, { "content": "import numpy as np\nfrom calamari_ocr.ocr.data_processing.data_preprocessor import DataPreprocessor\nfrom calamari_ocr.proto import DataPreprocessorParams\nfrom scipy.ndimage import interpolation\n\n\nclass ScaleToHeightProcessor(DataPreprocessor):\n def __init__(self, data_preprocessor_params: DataPreprocessorParams):\n super().__init__()\n self.height = data_preprocessor_params.line_height\n\n def _apply_single(self, data):\n scaled = ScaleToHeightProcessor.scale_to_h(data, self.height)\n scale = scaled.shape[1] / data.shape[1]\n return scaled, (scale, )\n\n def local_to_global_pos(self, x, params):\n scale, = params\n return x / scale\n\n @staticmethod\n def scale_to_h(img, target_height, order=1, dtype=np.dtype('f'), cval=0):\n h, w = img.shape\n scale = target_height * 1.0 / h\n target_width = np.maximum(int(scale * w), 1)\n output = interpolation.affine_transform(\n 1.0 * img,\n np.eye(2) / scale,\n order=order,\n output_shape=(target_height,target_width),\n mode='constant',\n cval=cval)\n\n output = np.array(output, dtype=dtype)\n return output\n", "id": "4396446", "language": "Python", "matching_score": 2.8654322624206543, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/scale_to_height_processor.py" }, { "content": "from calamari_ocr.ocr.data_processing.center_normalizer import CenterNormalizer\nfrom calamari_ocr.ocr.data_processing.data_preprocessor import DataPreprocessor, NoopDataPreprocessor, MultiDataProcessor\nfrom calamari_ocr.ocr.data_processing.data_range_normalizer import DataRangeNormalizer\nfrom calamari_ocr.ocr.data_processing.final_preparation import FinalPreparation\nfrom calamari_ocr.ocr.data_processing.scale_to_height_processor import ScaleToHeightProcessor\n\nfrom calamari_ocr.ocr.data_processing.default_data_preprocessor import DefaultDataPreprocessor\n\nfrom calamari_ocr.proto import DataPreprocessorParams\n\n\ndef data_processor_from_proto(data_preprocessor_params):\n if len(data_preprocessor_params.children) > 0 and data_preprocessor_params.type != DataPreprocessorParams.MULTI_NORMALIZER:\n raise ValueError(\"Only a MULTI_NORMALIZER may have children, however got {}\".format(\n DataPreprocessorParams.Type.Name(data_preprocessor_params.type)))\n\n if data_preprocessor_params.type == DataPreprocessorParams.MULTI_NORMALIZER:\n return MultiDataProcessor(\n [data_processor_from_proto(c) for c in data_preprocessor_params.children]\n )\n elif data_preprocessor_params.type == DataPreprocessorParams.DEFAULT_NORMALIZER:\n return DefaultDataPreprocessor(data_preprocessor_params)\n elif data_preprocessor_params.type == DataPreprocessorParams.NOOP_NORMALIZER:\n return NoopDataPreprocessor()\n elif data_preprocessor_params.type == DataPreprocessorParams.RANGE_NORMALIZER:\n return DataRangeNormalizer()\n elif data_preprocessor_params.type == DataPreprocessorParams.CENTER_NORMALIZER:\n return CenterNormalizer(data_preprocessor_params)\n elif data_preprocessor_params.type == DataPreprocessorParams.FINAL_PREPARATION:\n return FinalPreparation(data_preprocessor_params)\n elif data_preprocessor_params.type == DataPreprocessorParams.SCALE_TO_HEIGHT:\n return ScaleToHeightProcessor(data_preprocessor_params)\n\n raise Exception(\"Unknown proto type {} of an data processor\".format(data_preprocessor_params.type))\n\n", "id": "9202649", "language": "Python", "matching_score": 5.035991668701172, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/__init__.py" }, { "content": "from calamari_ocr.ocr.data_processing.data_range_normalizer import DataRangeNormalizer\nfrom calamari_ocr.ocr.data_processing.data_preprocessor import MultiDataProcessor\nfrom calamari_ocr.ocr.data_processing.center_normalizer import CenterNormalizer\nfrom calamari_ocr.ocr.data_processing.final_preparation import FinalPreparation\n\n\nclass DefaultDataPreprocessor(MultiDataProcessor):\n def __init__(self, params):\n super().__init__(\n [\n DataRangeNormalizer(),\n CenterNormalizer(params),\n FinalPreparation(params),\n ]\n )\n", "id": "12270506", "language": "Python", "matching_score": 1.570976734161377, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/default_data_preprocessor.py" }, { "content": "from abc import ABC, abstractmethod\nimport numpy as np\n\nfrom calamari_ocr.utils import parallel_map\n\n\nclass DataPreprocessor(ABC):\n def __init__(self):\n super().__init__()\n\n def apply(self, data, processes=1, progress_bar=False, max_tasks_per_child=100):\n if isinstance(data, np.ndarray):\n return self._apply_single(data)\n elif isinstance(data, list) or isinstance(data, tuple):\n if len(data) == 0:\n return []\n\n return parallel_map(self._apply_single, data, desc=\"Data Preprocessing\",\n processes=processes, progress_bar=progress_bar, max_tasks_per_child=max_tasks_per_child)\n else:\n raise Exception(\"Unknown instance of data: {}. Supported list and str\".format(type(data)))\n\n def local_to_global_pos(self, x, params):\n return x\n\n @abstractmethod\n def _apply_single(self, data):\n return data, None\n\n\nclass NoopDataPreprocessor(DataPreprocessor):\n def __init__(self):\n super().__init__()\n\n def _apply_single(self, data):\n return data, None\n\n\nclass MultiDataProcessor(DataPreprocessor):\n def __init__(self, processors=[]):\n super().__init__()\n self.sub_processors = processors\n\n def add(self, processor):\n self.sub_processors.append(processor)\n\n def _apply_single(self, data):\n stacked_params = []\n for proc in self.sub_processors:\n data, params = proc._apply_single(data)\n stacked_params.append(params)\n\n return data, stacked_params\n\n def local_to_global_pos(self, x, params):\n assert(len(params) == len(self.sub_processors))\n for i in reversed(range(len(self.sub_processors))):\n x = self.sub_processors[i].local_to_global_pos(x, params[i])\n\n return x\n", "id": "7733249", "language": "Python", "matching_score": 3.091238021850586, "max_stars_count": 5, "path": "calamari_ocr/ocr/data_processing/data_preprocessor.py" }, { "content": "from abc import ABC, abstractmethod\n\nfrom calamari_ocr.utils import parallel_map\n\n\nclass TextProcessor(ABC):\n def __init__(self):\n super().__init__()\n\n def apply(self, txts, processes=1, progress_bar=False):\n if isinstance(txts, str):\n return self._apply_single(txts)\n elif isinstance(txts, list):\n if len(txts) == 0:\n return []\n\n return parallel_map(self._apply_single, txts, desc=\"Text Preprocessing\", processes=processes, progress_bar=progress_bar)\n else:\n raise Exception(\"Unknown instance of txts: {}. Supported list and str\".format(type(txts)))\n\n @abstractmethod\n def _apply_single(self, txt):\n pass\n\n\nclass NoopTextProcessor(TextProcessor):\n def __init__(self):\n super().__init__()\n\n def _apply_single(self, txt):\n return txt\n\n\nclass MultiTextProcessor(TextProcessor):\n def __init__(self, processors=[]):\n super().__init__()\n self.sub_processors = processors\n\n def add(self, processor):\n self.sub_processors.append(processor)\n\n def _apply_single(self, txt):\n for proc in self.sub_processors:\n txt = proc._apply_single(txt)\n\n return txt\n\n def child_by_type(self, t):\n for proc in self.sub_processors:\n if type(proc) == t:\n return proc\n\n return None\n", "id": "4490378", "language": "Python", "matching_score": 0.2105645388364792, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_processing/text_processor.py" }, { "content": "from abc import ABC, abstractmethod\n\nimport numpy as np\n\nfrom calamari_ocr.proto import Prediction\n\n\nclass CTCDecoder(ABC):\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def decode(self, probabilities):\n \"\"\"\n Decoding algorithm of the individual CTCDecoder. This abstract function is reimplemented\n by the DefaultCTCDecoder and the FuzzyCTCDecoder.\n\n Parameters\n ----------\n probabilities : array_like\n Prediction probabilities of the neural net to decode or shape (length x character probability).\n The blank index must be 0.\n\n Returns\n -------\n a Prediction object\n \"\"\"\n return Prediction()\n\n def find_alternatives(self, probabilities, sentence, threshold):\n \"\"\"\n Find alternatives to the decoded sentence in the logits.\n E.g. if a 'c' is decoded in the range 2 to 4, this algorithm will add all characters in the interval [2, 4] to\n the output if the confidence of the character is higher than the threshold, respectively.\n\n\n Parameters\n ----------\n probabilities : array_like\n Prediction of the neural net to decode or shape (length x character probability).\n The blank index must be 0.\n sentence : list of tuple (character index, start pos, end pos)\n The decoded sentence (depends on the CTCDecoder).\n The position refer to the character position in the logits.\n threshold : float\n Minimum confidence for alternative characters to be listed.\n Returns\n -------\n a Prediction object\n\n \"\"\"\n # find alternatives\n pred = Prediction()\n pred.labels[:] = [c for c, _, _ in sentence]\n pred.is_voted_result = False\n pred.logits.rows, pred.logits.cols = probabilities.shape\n pred.logits.data[:] = probabilities.reshape([-1])\n for c, start, end in sentence:\n p = probabilities[start:end]\n p = np.max(p, axis=0)\n\n pos = pred.positions.add()\n pos.local_start = start\n pos.local_end = end\n\n for label in reversed(sorted(range(len(p)), key=lambda v: p[v])):\n if p[label] < threshold and len(pos.chars) > 0:\n break\n else:\n char = pos.chars.add()\n char.label = label\n char.probability = p[label]\n\n return pred\n", "id": "2661859", "language": "Python", "matching_score": 2.9390110969543457, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/ctc_decoder/ctc_decoder.py" }, { "content": "from calamari_ocr.ocr.backends.ctc_decoder.ctc_decoder import CTCDecoder\n\nimport numpy as np\n\n\nclass FuzzyCTCDecoder(CTCDecoder):\n def __init__(self, blank=0, blank_threshold=0.7, alternatives_threshold=0.0001):\n super().__init__()\n self._blank = blank\n self._blank_threshold = blank_threshold\n self._alternatives_threshold = alternatives_threshold\n\n def decode(self, probabilities):\n blanks = probabilities[:, self._blank] >= self._blank_threshold\n sentence = []\n # where blank is True 'character changes' are expected\n for idx in range(len(blanks)):\n if not blanks[idx]:\n if len(sentence) == 0:\n sentence.append((-1, idx, idx + 1))\n else:\n _, start, end = sentence[-1]\n if end == idx:\n del sentence[-1]\n sentence.append((-1, start, idx + 1))\n\n else:\n sentence.append((-1, idx, idx + 1))\n\n # get the best char in each range\n sentence = [(np.argmax(np.max(probabilities[start:end], axis=0)), start, end) for _, start, end in sentence]\n\n return self.find_alternatives(probabilities, sentence, self._alternatives_threshold)\n\n\nif __name__ == \"__main__\":\n d = FuzzyCTCDecoder()\n r = d.decode(np.array(np.transpose([[0.8, 0, 0.7, 0.2, 0.1], [0.1, 0.4, 0.2, 0.7, 0.8], [0.1, 0.6, 0.1, 0.1, 0.1]])))\n print(r)\n", "id": "5642377", "language": "Python", "matching_score": 0.1659415066242218, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/ctc_decoder/fuzzy_ctc_decoder.py" }, { "content": "from calamari_ocr.ocr.datasets import DataSet, DataSetMode, DatasetGenerator\nfrom calamari_ocr.utils import split_all_ext\n\nfrom google.protobuf.json_format import Parse\nfrom calamari_ocr.proto import Predictions\n\nimport codecs\nfrom typing import List\n\n\nclass ExtendedPredictionDataSet(DataSet):\n def __init__(self, texts: List[str] = None):\n super().__init__(DataSetMode.EVAL)\n\n if texts is None:\n texts = []\n\n for text in texts:\n text_bn, text_ext = split_all_ext(text)\n sample = {\n \"image_path\": None,\n \"pred_path\": text,\n \"id\": text_bn,\n }\n self._load_sample(sample)\n self.add_sample(sample)\n\n def _load_sample(self, sample):\n gt_txt_path = sample['pred_path']\n if gt_txt_path is None:\n return None, None\n\n if gt_txt_path.endswith('.json'):\n with codecs.open(gt_txt_path, 'r', 'utf-8') as f:\n p = Parse(str(f.read()), Predictions())\n if len(p.predictions) == 0:\n return None, None\n\n voted_p = p.predictions[0]\n for vp in p.predictions:\n if vp.id == 'voted':\n voted_p = vp\n\n sample['best_prediction'] = voted_p\n sample['predictions'] = p\n\n return None, voted_p.sentence\n\n def create_generator(self, output_queue, epochs, text_only) -> DatasetGenerator:\n raise NotImplemented()\n\n\n\n\n\n\n\n", "id": "4320590", "language": "Python", "matching_score": 3.0223753452301025, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/extended_prediction_dataset.py" }, { "content": "import codecs\nimport os\nfrom PIL import Image\nimport numpy as np\n\nfrom calamari_ocr.utils import split_all_ext\nfrom .dataset import DataSet, DataSetMode, DatasetGenerator\n\n\nclass FileDataSetGenerator(DatasetGenerator):\n def __init__(self, mp_context, output_queue, mode: DataSetMode, samples, non_existing_as_empty):\n super().__init__(mp_context, output_queue, mode, samples)\n self._non_existing_as_empty = non_existing_as_empty\n\n def _load_sample(self, sample, text_only):\n if text_only:\n yield None, self._load_gt_txt(sample[\"text_path\"])\n else:\n yield self._load_line(sample[\"image_path\"]), \\\n self._load_gt_txt(sample[\"text_path\"])\n\n def _load_gt_txt(self, gt_txt_path):\n if gt_txt_path is None:\n return None\n\n if not os.path.exists(gt_txt_path):\n if self._non_existing_as_empty:\n return \"\"\n else:\n raise Exception(\"Text file at '{}' does not exist\".format(gt_txt_path))\n\n with codecs.open(gt_txt_path, 'r', 'utf-8') as f:\n return f.read()\n\n def _load_line(self, image_path):\n if image_path is None:\n return None\n\n if not os.path.exists(image_path):\n if self._non_existing_as_empty:\n return np.zeros((1, 1))\n else:\n raise Exception(\"Image file at '{}' does not exist\".format(image_path))\n\n try:\n img = np.asarray(Image.open(image_path))\n except:\n return None\n\n return img\n\n\nclass FileDataSet(DataSet):\n def __init__(self, mode: DataSetMode,\n images=None, texts=None,\n skip_invalid=False, remove_invalid=True,\n non_existing_as_empty=False):\n \"\"\" Create a dataset from a list of files\n\n Images or texts may be empty to create a dataset for prediction or evaluation only.\n\n Parameters\n ----------\n images : list of str, optional\n image files\n texts : list of str, optional\n text files\n skip_invalid : bool, optional\n skip invalid files\n remove_invalid : bool, optional\n remove invalid files\n non_existing_as_empty : bool, optional\n tread non existing files as empty. This is relevant for evaluation a dataset\n \"\"\"\n super().__init__(mode,\n skip_invalid=skip_invalid,\n remove_invalid=remove_invalid)\n self._non_existing_as_empty = non_existing_as_empty\n\n images = [] if images is None else images\n texts = [] if texts is None else texts\n\n # when evaluating, only texts are set via --gt argument --> need dummy [None] imgs\n # when predicting, only imags are set via --files argument --> need dummy [None] texts\n \n if (mode == DataSetMode.PREDICT or mode == DataSetMode.PRED_AND_EVAL) and not texts:\n texts = [None] * len(images)\n\n if (mode == DataSetMode.EVAL or mode == DataSetMode.PRED_AND_EVAL) and not images: \n images = [None] * len(texts)\n\n for image, text in zip(images, texts):\n try:\n if image is None and text is None:\n raise Exception(\"An empty data point is not allowed. Both image and text file are None\")\n\n img_bn, text_bn = None, None\n if image:\n img_path, img_fn = os.path.split(image)\n img_bn, img_ext = split_all_ext(img_fn)\n\n if not self._non_existing_as_empty and not os.path.exists(image):\n raise Exception(\"Image at '{}' must exist\".format(image))\n\n if text:\n if not self._non_existing_as_empty and not os.path.exists(text):\n raise Exception(\"Text file at '{}' must exist\".format(text))\n\n text_path, text_fn = os.path.split(text)\n text_bn, text_ext = split_all_ext(text_fn)\n\n if image and text and img_bn != text_bn:\n raise Exception(\"Expected image base name equals text base name but got '{}' != '{}'\".format(\n img_bn, text_bn\n ))\n except Exception as e:\n if self.skip_invalid:\n print(\"Invalid data: {}\".format(e))\n continue\n else:\n raise e\n\n self.add_sample({\n \"image_path\": image,\n \"text_path\": text,\n \"id\": img_bn if image else text_bn,\n })\n\n def create_generator(self, mp_context, output_queue):\n return FileDataSetGenerator(mp_context, output_queue, self.mode, self.samples(), self._non_existing_as_empty)\n\n def _load_sample(self, sample, text_only):\n if text_only:\n return None, self._load_gt_txt(sample[\"text_path\"])\n else:\n return self._load_line(sample[\"image_path\"]), \\\n self._load_gt_txt(sample[\"text_path\"])\n\n def _load_gt_txt(self, gt_txt_path):\n if gt_txt_path is None:\n return None\n\n if not os.path.exists(gt_txt_path):\n if self._non_existing_as_empty:\n return \"\"\n else:\n raise Exception(\"Text file at '{}' does not exist\".format(gt_txt_path))\n\n with codecs.open(gt_txt_path, 'r', 'utf-8') as f:\n return f.read()\n\n def _load_line(self, image_path):\n if image_path is None:\n return None\n\n if not os.path.exists(image_path):\n if self._non_existing_as_empty:\n return np.zeros((1, 1))\n else:\n raise Exception(\"Image file at '{}' does not exist\".format(image_path))\n\n try:\n img = np.asarray(Image.open(image_path))\n except:\n return None\n\n return img\n\n", "id": "1183621", "language": "Python", "matching_score": 5.547849655151367, "max_stars_count": 0, "path": "calamari_ocr/ocr/datasets/file_dataset.py" }, { "content": "from calamari_ocr.ocr.datasets import DataSet, DataSetMode, DatasetGenerator\nimport numpy as np\nimport h5py\nfrom calamari_ocr.utils import split_all_ext\n\n\nclass Hdf5DataSetGenerator(DatasetGenerator):\n def __init__(self, mp_context, output_queue, mode: DataSetMode, hdf5_files):\n super().__init__(mp_context, output_queue, mode, hdf5_files)\n\n def _load_sample(self, filename, text_only):\n f = h5py.File(filename, 'r')\n codec = list(map(chr, f['codec']))\n if text_only:\n for i, text in enumerate(f['transcripts']):\n text = \"\".join([codec[c] for c in text])\n yield None, text\n else:\n for i, (image, shape, text) in enumerate(zip(f['images'], f['images_dims'], f['transcripts'])):\n image = np.reshape(image, shape)\n text = \"\".join([codec[c] for c in text])\n yield image, text\n\n\nclass Hdf5DataSet(DataSet):\n def __init__(self, mode: DataSetMode,\n images=None, texts=None,\n ):\n \"\"\" Create a dataset from memory\n\n Since this dataset already contains all data in the memory, this dataset may not be loaded\n\n Parameters\n ----------\n images : list of images\n the images of the dataset\n texts : list of str\n the texts of this dataset\n \"\"\"\n super().__init__(mode)\n\n images = images if images is not None else []\n texts = texts if texts is not None else []\n self.filenames = [i for i in set(images + texts) if i is not None]\n\n self.prediction = None\n if mode == DataSetMode.PREDICT or mode == DataSetMode.PRED_AND_EVAL:\n self.prediction = {}\n\n for filename in self.filenames:\n f = h5py.File(filename, 'r')\n codec = list(map(chr, f['codec']))\n if mode == DataSetMode.PREDICT or mode == DataSetMode.PRED_AND_EVAL:\n self.prediction[filename] = {'transcripts': [], 'codec': codec}\n\n # create empty samples for id and correct dataset size\n for i, text in enumerate(f['transcripts']):\n self.add_sample({\n \"image\": None,\n \"text\": \"\",\n \"id\": str(i),\n \"filename\": filename,\n })\n\n def store_text(self, sentence, sample, output_dir, extension):\n codec = self.prediction[sample['filename']]['codec']\n self.prediction[sample['filename']]['transcripts'].append(list(map(codec.index, sentence)))\n\n def store(self, extension):\n for filename, data in self.prediction.items():\n texts = data['transcripts']\n codec = data['codec']\n basename, ext = split_all_ext(filename)\n with h5py.File(basename + extension, 'w') as file:\n dt = h5py.special_dtype(vlen=np.dtype('int32'))\n file.create_dataset('transcripts', (len(texts),), dtype=dt)\n file['transcripts'][...] = texts\n file.create_dataset('codec', data=list(map(ord, codec)))\n\n def create_generator(self, mp_context, output_queue) -> DatasetGenerator:\n return Hdf5DataSetGenerator(mp_context, output_queue, self.mode, self.filenames)\n", "id": "11868772", "language": "Python", "matching_score": 2.8134090900421143, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/hdf5_dataset/dataset.py" }, { "content": "from calamari_ocr.ocr.datasets.dataset import DataSet, DataSetMode, DatasetGenerator\nfrom calamari_ocr.proto import TextGeneratorParameters, LineGeneratorParameters\nfrom calamari_ocr.ocr.line_generator import LineGenerator\nfrom calamari_ocr.ocr.text_generation.text_generator import TextGenerator\nfrom multiprocessing import Process, Queue, Manager\nimport numpy as np\nimport random\nfrom typing import Generator, Tuple\n\n\nclass LineGeneratorProcess(Process):\n def __init__(self, output_queue: Queue, text_generator, line_generator, name=-1):\n super().__init__(daemon=True)\n self.text_generator = TextGenerator(text_generator)\n self.line_generator = LineGenerator(line_generator)\n self.output_queue = output_queue\n self.text_only = False\n self.name = \"{}\".format(name)\n\n def _handle(self):\n try:\n words = self.text_generator.generate()\n image = self.line_generator.draw(words) if not self.text_only else None\n self.output_queue.put((image, TextGenerator.words_to_unformatted_text(words)))\n except ValueError as e:\n print(\"Exception during line generation:\", e)\n\n def run(self):\n random.seed()\n np.random.seed()\n try:\n while True:\n self._handle()\n except (EOFError, BrokenPipeError, ConnectionResetError):\n # queue closed, stop the process\n return\n\n\nclass GeneratedLineDatasetGenerator(DatasetGenerator):\n def __init__(self, mp_context, output_queue, mode: DataSetMode, samples,\n input_queue,\n ):\n super().__init__(mp_context, output_queue, mode, samples)\n self.input_queue = input_queue\n\n def _load_sample(self, sample, text_only) -> Generator[Tuple[np.array, str], None, None]:\n yield self.input_queue.get()\n\n\nclass GeneratedLineDataset(DataSet):\n def __init__(self,\n mode: DataSetMode,\n args: dict,\n ):\n \"\"\" Create a dataset from memory\n Since this dataset already contains all data in the memory, this dataset may not be loaded\n Parameters\n ----------\n \"\"\"\n super().__init__(mode)\n\n self.loaded = False\n self.lines_per_epoch = 10000\n self._samples = [{'id': '{}'.format(i)} for i in range(self.lines_per_epoch)]\n self.text_generator_params = args.get('text_generator_params', TextGeneratorParameters())\n self.line_generator_params = args.get('line_generator_params', LineGeneratorParameters())\n self.manager = Manager()\n self.data_queue = self.manager.Queue(50)\n self.data_generators = [\n LineGeneratorProcess(\n self.data_queue,\n self.text_generator_params,\n self.line_generator_params,\n \"{}\".format(i),\n ) for i in range(8)\n ]\n for d in self.data_generators:\n d.start()\n\n def _load_sample(self, sample, text_only):\n return self.data_queue.get()\n\n def create_generator(self, mp_context, output_queue) -> DatasetGenerator:\n return GeneratedLineDatasetGenerator(mp_context, output_queue, self.mode, self.samples(), self.data_queue)\n\n\nif __name__ == \"__main__\":\n args = dict()\n\n params = TextGeneratorParameters()\n params.word_length_mean = 11\n params.word_length_sigma = 3\n params.number_of_words_mean = 7\n params.number_of_words_mean = 4\n params.word_separator = \" \"\n params.sub_script_p = 0.2\n params.super_script_p = 0.2\n params.letter_spacing_p = 0.5\n params.letter_spacing_mean = 0.5\n params.letter_spacing_sigma = 0.05\n params.bold_p = 0.5\n params.italic_p = 0.5\n params.codec.charset.extend(list(\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789{}[]()_-.;:'\\\"\"\n \"éèíìóòúù\"\n \"ăȁĕȅĭŏőŭű\"\n \"āĀǟǞēĒěīĪōŌȫȪūŪǖǕ\"\n \"ẹḲḳ\"\n \"αβγδεζηθικλμνξοπρστυφχψω\"\n \"½\"\n \"—\"\n \"–\"\n \"℔\"\n \"šŠ\"\n \"„“\"\n \"†\"\n ))\n args['text_generator_params'] = params\n\n params = LineGeneratorParameters()\n params.font_size = 48\n params.min_script_offset = -0.5\n params.max_script_offset = 0.5\n params.fonts.extend(['Junicode.ttf', 'DejaVuSerif.ttf'])\n args['line_generator_params'] = params\n\n dataset = GeneratedLineDataset(DataSetMode.TRAIN, args)\n\n import matplotlib.pyplot as plt\n line, text = dataset.load_single_sample({}, None)\n print(text)\n plt.imshow(line)\n plt.title(text)\n plt.show()", "id": "1327903", "language": "Python", "matching_score": 4.68704080581665, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/generated_line_dataset/dataset.py" }, { "content": "from calamari_ocr.proto import TextGeneratorParameters\nfrom calamari_ocr.ocr.line_generator import Script, Word, FontVariantType\nimport numpy as np\nfrom calamari_ocr.ocr.codec import Codec\n\nclass TextGenerator:\n @staticmethod\n def words_to_unformatted_text(words):\n out = ''\n for word in words:\n out += word.text\n\n return out\n\n def __init__(self, text_generator_params: TextGeneratorParameters):\n self.params = text_generator_params\n self.charset = list(text_generator_params.charset)\n self.super_charset = list(text_generator_params.super_charset) if len(text_generator_params.super_charset) > 0 else self.charset\n self.sub_charset = list(text_generator_params.sub_charset) if len(text_generator_params.sub_charset) > 0 else self.charset\n assert(self.params.sub_script_p + self.params.super_script_p <= 1)\n\n def generate(self):\n number_of_words = int(np.round(max(1, np.random.normal(self.params.number_of_words_mean, self.params.number_of_words_sigma))))\n\n out = []\n for i in range(number_of_words):\n word_length = int(np.round(max(1, np.random.normal(self.params.word_length_mean, self.params.word_length_sigma))))\n rnd = np.random.rand(10)\n\n if rnd[0] < self.params.sub_script_p:\n script = Script.SUB\n elif rnd[0] < self.params.sub_script_p + self.params.super_script_p:\n script = Script.SUPER\n else:\n script = Script.NORMAL\n\n if rnd[1] < self.params.bold_p and rnd[2] < self.params.italic_p:\n variant = FontVariantType.BOLD_ITALICS\n elif rnd[1] < self.params.bold_p:\n variant = FontVariantType.BOLD\n elif rnd[2] < self.params.italic_p:\n variant = FontVariantType.ITALIC\n else:\n variant = FontVariantType.NORMAL\n\n if rnd[3] < self.params.letter_spacing_p:\n letter_spacing = np.random.normal(self.params.letter_spacing_mean, self.params.letter_spacing_sigma)\n else:\n letter_spacing = 0\n\n if script == Script.NORMAL and len(out) > 0:\n out.append(\n Word(self.params.word_separator, Script.NORMAL, 0, FontVariantType.NORMAL)\n )\n\n charset = [self.charset, self.super_charset, self.sub_charset][script]\n s = \"\".join(np.random.choice(charset, word_length))\n s = s.strip()\n\n out.append(\n Word(s, script, letter_spacing, variant)\n )\n\n return out\n\n\nif __name__ == \"__main__\":\n params = TextGeneratorParameters()\n params.word_length_mean = 11\n params.word_length_sigma = 3\n params.number_of_words_mean = 7\n params.number_of_words_mean = 4\n params.word_separator = \" \"\n params.sub_script_p = 0.0\n params.super_script_p = 0.2\n params.letter_spacing_p = 0.5\n params.letter_spacing_mean = 1\n params.letter_spacing_sigma = 0.1\n params.charset.extend(list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789{}[]()_-.;:'\\\" \"))\n params.super_charset.extend(list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"))\n gen = TextGenerator(params)\n\n print(gen.generate())\n print(gen.generate())\n print(gen.generate())\n\n text = gen.generate()\n print(text)\n print(TextGenerator.words_to_unformatted_text(text))\n\n", "id": "712729", "language": "Python", "matching_score": 0.23002223670482635, "max_stars_count": 5, "path": "calamari_ocr/ocr/text_generation/text_generator.py" }, { "content": "from enum import Enum\nfrom typing import List\n\nfrom .dataset import RawDataSet, DataSetMode\nfrom .file_dataset import FileDataSet\nfrom .abbyy_dataset import AbbyyDataSet\nfrom .pagexml_dataset import PageXMLDataset\nfrom .hdf5_dataset import Hdf5DataSet\nfrom calamari_ocr.utils import keep_files_with_same_file_name\nfrom .datasetype import DataSetType\n\n\n\ndef create_dataset(type: DataSetType,\n mode: DataSetMode,\n images: List[str] = None,\n texts: List[str] = None,\n skip_invalid=False,\n remove_invalid=True,\n non_existing_as_empty=False,\n args: dict = None,\n ):\n if images is None:\n images = []\n\n if texts is None:\n texts = []\n\n if args is None:\n args = dict()\n\n if DataSetType.files(type):\n if images:\n images.sort()\n\n if texts:\n texts.sort()\n\n if images and texts and len(images) > 0 and len(texts) > 0:\n images, texts = keep_files_with_same_file_name(images, texts)\n\n if type == DataSetType.RAW:\n return RawDataSet(mode, images, texts)\n\n elif type == DataSetType.FILE:\n return FileDataSet(mode, images, texts,\n skip_invalid=skip_invalid,\n remove_invalid=remove_invalid,\n non_existing_as_empty=non_existing_as_empty)\n elif type == DataSetType.ABBYY:\n return AbbyyDataSet(mode, images, texts,\n skip_invalid=skip_invalid,\n remove_invalid=remove_invalid,\n non_existing_as_empty=non_existing_as_empty)\n elif type == DataSetType.PAGEXML:\n return PageXMLDataset(mode, images, texts,\n skip_invalid=skip_invalid,\n remove_invalid=remove_invalid,\n non_existing_as_empty=non_existing_as_empty,\n args=args)\n elif type == DataSetType.HDF5:\n return Hdf5DataSet(mode, images, texts)\n elif type == DataSetType.EXTENDED_PREDICTION:\n from .extended_prediction_dataset import ExtendedPredictionDataSet\n return ExtendedPredictionDataSet(texts=texts)\n elif type == DataSetType.GENERATED_LINE:\n from .generated_line_dataset import GeneratedLineDataset\n return GeneratedLineDataset(mode, args=args)\n else:\n raise Exception(\"Unsupported dataset type {}\".format(type))\n", "id": "7370029", "language": "Python", "matching_score": 4.383353233337402, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/dataset_factory.py" }, { "content": "from enum import IntEnum\n\n\nclass DataSetType(IntEnum):\n RAW = 0\n FILE = 1\n ABBYY = 2\n PAGEXML = 3\n HDF5 = 4\n EXTENDED_PREDICTION = 5\n GENERATED_LINE = 6\n\n def __str__(self):\n return self.name\n\n @staticmethod\n def from_string(s):\n try:\n return DataSetType[s]\n except KeyError:\n raise ValueError()\n\n @staticmethod\n def files(type):\n files_meta = {\n DataSetType.RAW: False,\n DataSetType.FILE: True,\n DataSetType.ABBYY: True,\n DataSetType.PAGEXML: True,\n DataSetType.EXTENDED_PREDICTION: True,\n DataSetType.HDF5: False,\n DataSetType.GENERATED_LINE: False,\n }\n\n return files_meta[type]\n\n @staticmethod\n def gt_extension(type):\n return {\n DataSetType.RAW: None,\n DataSetType.FILE: \".gt.txt\",\n DataSetType.ABBYY: \".abbyy.xml\",\n DataSetType.PAGEXML: \".xml\",\n DataSetType.EXTENDED_PREDICTION: \".json\",\n DataSetType.HDF5: \".h5\",\n DataSetType.GENERATED_LINE: None,\n }[type]\n\n @staticmethod\n def pred_extension(type):\n return {\n DataSetType.RAW: None,\n DataSetType.FILE: \".pred.txt\",\n DataSetType.ABBYY: \".pred.abbyy.xml\",\n DataSetType.PAGEXML: \".pred.xml\",\n DataSetType.EXTENDED_PREDICTION: \".json\",\n DataSetType.HDF5: \".pred.h5\",\n DataSetType.GENERATED_LINE: None,\n }[type]\n\n", "id": "1474218", "language": "Python", "matching_score": 1.2917968034744263, "max_stars_count": 5, "path": "calamari_ocr/ocr/datasets/datasetype.py" }, { "content": "from argparse import ArgumentParser\nimport os\n\nfrom calamari_ocr.utils import glob_all, split_all_ext, keep_files_with_same_file_name\nfrom calamari_ocr.ocr import DataSetType, create_dataset, DataSetMode\nfrom calamari_ocr.ocr.trainer import Trainer\n\nfrom calamari_ocr.proto import CheckpointParams\n\nfrom google.protobuf import json_format\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"--checkpoint\", type=str, required=True,\n help=\"The checkpoint used to resume\")\n\n # validation files\n parser.add_argument(\"--validation\", type=str, nargs=\"+\",\n help=\"Validation line files used for early stopping\")\n parser.add_argument(\"--validation_text_files\", nargs=\"+\", default=None,\n help=\"Optional list of validation GT files if they are in other directory\")\n parser.add_argument(\"--validation_extension\", default=None,\n help=\"Default extension of the gt files (expected to exist in same dir)\")\n parser.add_argument(\"--validation_dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n\n # input files\n parser.add_argument(\"--files\", nargs=\"+\",\n help=\"List all image files that shall be processed. Ground truth fils with the same \"\n \"base name but with '.gt.txt' as extension are required at the same location\")\n parser.add_argument(\"--text_files\", nargs=\"+\", default=None,\n help=\"Optional list of GT files if they are in other directory\")\n parser.add_argument(\"--gt_extension\", default=None,\n help=\"Default extension of the gt files (expected to exist in same dir)\")\n parser.add_argument(\"--dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--no_skip_invalid_gt\", action=\"store_true\",\n help=\"Do no skip invalid gt, instead raise an exception.\")\n\n args = parser.parse_args()\n\n if args.gt_extension is None:\n args.gt_extension = DataSetType.gt_extension(args.dataset)\n\n if args.validation_extension is None:\n args.validation_extension = DataSetType.gt_extension(args.validation_dataset)\n\n # Training dataset\n print(\"Resolving input files\")\n input_image_files = sorted(glob_all(args.files))\n if not args.text_files:\n gt_txt_files = [split_all_ext(f)[0] + args.gt_extension for f in input_image_files]\n else:\n gt_txt_files = sorted(glob_all(args.text_files))\n input_image_files, gt_txt_files = keep_files_with_same_file_name(input_image_files, gt_txt_files)\n for img, gt in zip(input_image_files, gt_txt_files):\n if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:\n raise Exception(\"Expected identical basenames of file: {} and {}\".format(img, gt))\n\n if len(set(gt_txt_files)) != len(gt_txt_files):\n raise Exception(\"Some image are occurring more than once in the data set.\")\n\n dataset = create_dataset(\n args.dataset,\n DataSetMode.TRAIN,\n images=input_image_files,\n texts=gt_txt_files,\n skip_invalid=not args.no_skip_invalid_gt\n )\n print(\"Found {} files in the dataset\".format(len(dataset)))\n\n # Validation dataset\n if args.validation:\n print(\"Resolving validation files\")\n validation_image_files = glob_all(args.validation)\n if not args.validation_text_files:\n val_txt_files = [split_all_ext(f)[0] + args.validation_extension for f in validation_image_files]\n else:\n val_txt_files = sorted(glob_all(args.validation_text_files))\n validation_image_files, val_txt_files = keep_files_with_same_file_name(validation_image_files, val_txt_files)\n for img, gt in zip(validation_image_files, val_txt_files):\n if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:\n raise Exception(\"Expected identical basenames of validation file: {} and {}\".format(img, gt))\n\n if len(set(val_txt_files)) != len(val_txt_files):\n raise Exception(\"Some validation images are occurring more than once in the data set.\")\n\n validation_dataset = create_dataset(\n args.validation_dataset,\n DataSetMode.TRAIN,\n images=validation_image_files,\n texts=val_txt_files,\n skip_invalid=not args.no_skip_invalid_gt)\n print(\"Found {} files in the validation dataset\".format(len(validation_dataset)))\n else:\n validation_dataset = None\n\n print(\"Resuming training\")\n with open(args.checkpoint + '.json', 'r') as f:\n checkpoint_params = json_format.Parse(f.read(), CheckpointParams())\n\n trainer = Trainer(checkpoint_params, dataset,\n validation_dataset=validation_dataset,\n weights=args.checkpoint)\n trainer.train(progress_bar=True)\n\n\nif __name__ == \"__main__\":\n main()", "id": "8755826", "language": "Python", "matching_score": 4.722087860107422, "max_stars_count": 5, "path": "calamari_ocr/scripts/resume_training.py" }, { "content": "import matplotlib.pyplot as plt\nimport argparse\nfrom calamari_ocr.ocr.datasets import create_dataset, DataSetType, DataSetMode\nfrom calamari_ocr.ocr.datasets.input_dataset import StreamingInputDataset\nfrom calamari_ocr import __version__\nfrom calamari_ocr.utils import glob_all, split_all_ext, keep_files_with_same_file_name\nfrom calamari_ocr.ocr.text_processing import text_processor_from_proto\nfrom calamari_ocr.ocr.data_processing import data_processor_from_proto\nfrom calamari_ocr.proto import DataPreprocessorParams, TextProcessorParams\nfrom calamari_ocr.ocr.text_processing import \\\n default_text_normalizer_params, default_text_regularizer_params\nimport os\nfrom calamari_ocr.ocr.augmentation.data_augmenter import SimpleDataAugmenter\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', action='version', version='%(prog)s v' + __version__)\n parser.add_argument(\"--files\", nargs=\"+\",\n help=\"List all image files that shall be processed. Ground truth fils with the same \"\n \"base name but with '.gt.txt' as extension are required at the same location\",\n required=True)\n parser.add_argument(\"--text_files\", nargs=\"+\", default=None,\n help=\"Optional list of GT files if they are in other directory\")\n parser.add_argument(\"--gt_extension\", default=None,\n help=\"Default extension of the gt files (expected to exist in same dir)\")\n parser.add_argument(\"--dataset\", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)\n parser.add_argument(\"--line_height\", type=int, default=48,\n help=\"The line height\")\n parser.add_argument(\"--pad\", type=int, default=16,\n help=\"Padding (left right) of the line\")\n parser.add_argument(\"--processes\", type=int, default=1,\n help=\"The number of threads to use for all operations\")\n\n parser.add_argument(\"--n_cols\", type=int, default=1)\n parser.add_argument(\"--n_rows\", type=int, default=5)\n parser.add_argument(\"--select\", type=int, nargs=\"+\", default=[])\n\n # text normalization/regularization\n parser.add_argument(\"--n_augmentations\", type=float, default=0,\n help=\"Amount of data augmentation per line (done before training). If this number is < 1 \"\n \"the amount is relative.\")\n parser.add_argument(\"--text_regularization\", type=str, nargs=\"+\", default=[\"extended\"],\n help=\"Text regularization to apply.\")\n parser.add_argument(\"--text_normalization\", type=str, default=\"NFC\",\n help=\"Unicode text normalization to apply. Defaults to NFC\")\n parser.add_argument(\"--data_preprocessing\", nargs=\"+\", type=DataPreprocessorParams.Type.Value,\n choices=DataPreprocessorParams.Type.values(), default=[DataPreprocessorParams.DEFAULT_NORMALIZER])\n\n args = parser.parse_args()\n\n # Text/Data processing\n if args.data_preprocessing is None or len(args.data_preprocessing) == 0:\n args.data_preprocessing = [DataPreprocessorParams.DEFAULT_NORMALIZER]\n\n data_preprocessor = DataPreprocessorParams()\n data_preprocessor.type = DataPreprocessorParams.MULTI_NORMALIZER\n for preproc in args.data_preprocessing:\n pp = data_preprocessor.children.add()\n pp.type = preproc\n pp.line_height = args.line_height\n pp.pad = args.pad\n\n # Text pre processing (reading)\n text_preprocessor = TextProcessorParams()\n text_preprocessor.type = TextProcessorParams.MULTI_NORMALIZER\n default_text_normalizer_params(text_preprocessor.children.add(), default=args.text_normalization)\n default_text_regularizer_params(text_preprocessor.children.add(), groups=args.text_regularization)\n strip_processor_params = text_preprocessor.children.add()\n strip_processor_params.type = TextProcessorParams.STRIP_NORMALIZER\n\n text_preprocessor = text_processor_from_proto(text_preprocessor)\n data_preprocessor = data_processor_from_proto(data_preprocessor)\n\n print(\"Resolving input files\")\n input_image_files = sorted(glob_all(args.files))\n if not args.text_files:\n if args.gt_extension:\n gt_txt_files = [split_all_ext(f)[0] + args.gt_extension for f in input_image_files]\n else:\n gt_txt_files = [None] * len(input_image_files)\n else:\n gt_txt_files = sorted(glob_all(args.text_files))\n input_image_files, gt_txt_files = keep_files_with_same_file_name(input_image_files, gt_txt_files)\n for img, gt in zip(input_image_files, gt_txt_files):\n if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:\n raise Exception(\"Expected identical basenames of file: {} and {}\".format(img, gt))\n\n if len(set(gt_txt_files)) != len(gt_txt_files):\n raise Exception(\"Some image are occurring more than once in the data set.\")\n\n dataset = create_dataset(\n args.dataset,\n DataSetMode.TRAIN,\n images=input_image_files,\n texts=gt_txt_files,\n )\n\n if len(args.select) == 0:\n args.select = range(len(dataset.samples()))\n dataset._samples = dataset.samples()\n else:\n dataset._samples = [dataset.samples()[i] for i in args.select]\n\n samples = dataset.samples()\n\n print(\"Found {} files in the dataset\".format(len(dataset)))\n\n with StreamingInputDataset(dataset,\n data_preprocessor,\n text_preprocessor,\n SimpleDataAugmenter(),\n args.n_augmentations,\n ) as input_dataset:\n f, ax = plt.subplots(args.n_rows, args.n_cols, sharey='all')\n row, col = 0, 0\n for i, (id, sample) in enumerate(zip(args.select, input_dataset.generator(args.processes))):\n line, text, params = sample\n if args.n_cols == 1:\n ax[row].imshow(line.transpose())\n ax[row].set_title(\"ID: {}\\n{}\".format(id, text))\n else:\n ax[row, col].imshow(line.transpose())\n ax[row, col].set_title(\"ID: {}\\n{}\".format(id, text))\n\n row += 1\n if row == args.n_rows:\n row = 0\n col += 1\n\n if col == args.n_cols or i == len(samples) - 1:\n plt.show()\n f, ax = plt.subplots(args.n_rows, args.n_cols, sharey='all')\n row, col = 0, 0\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "7537935", "language": "Python", "matching_score": 3.116955518722534, "max_stars_count": 0, "path": "calamari_ocr/scripts/dataset_viewer.py" }, { "content": "import unittest\nimport os\n\nfrom calamari_ocr.ocr import DataSetType\nfrom calamari_ocr.proto import DataPreprocessorParams\nfrom calamari_ocr.scripts.train import run\nfrom calamari_ocr.utils import glob_all\n\nthis_dir = os.path.dirname(os.path.realpath(__file__))\n\nclass Attrs():\n def __init__(self):\n self.dataset = DataSetType.FILE\n self.gt_extension = DataSetType.gt_extension(self.dataset)\n self.files = glob_all([os.path.join(this_dir, \"data\", \"uw3_50lines\", \"train\", \"*.png\")])\n self.seed = 24\n self.backend = \"tensorflow\"\n self.network = \"cnn=40:3x3,pool=2x2,cnn=60:3x3,pool=2x2,lstm=200,dropout=0.5\"\n self.line_height = 48\n self.pad = 16\n self.num_threads = 1\n self.display = 1\n self.batch_size = 1\n self.checkpoint_frequency = 1000\n self.max_iters = 1000\n self.stats_size = 100\n self.no_skip_invalid_gt = False\n self.no_progress_bars = True\n self.output_dir = os.path.join(this_dir, \"test_models\")\n self.output_model_prefix = \"uw3_50lines\"\n self.bidi_dir = None\n self.weights = None\n self.whitelist_files = []\n self.whitelist = []\n self.gradient_clipping_mode = \"AUTO\"\n self.gradient_clipping_const = 0\n self.validation = None\n self.validation_dataset = DataSetType.FILE\n self.validation_extension = None\n self.early_stopping_frequency = -1\n self.early_stopping_nbest = 10\n self.early_stopping_best_model_prefix = \"uw3_50lines_best\"\n self.early_stopping_best_model_output_dir = self.output_dir\n self.n_augmentations = 0\n self.fuzzy_ctc_library_path = \"\"\n self.num_inter_threads = 0\n self.num_intra_threads = 0\n self.text_regularization = [\"extended\"]\n self.text_normalization = \"NFC\"\n self.text_generator_params = None\n self.line_generator_params = None\n self.pagexml_text_index = 0\n self.text_files = None\n self.only_train_on_augmented = False\n self.data_preprocessing = [DataPreprocessorParams.DEFAULT_NORMALIZER]\n self.shuffle_buffer_size = 1000\n self.keep_loaded_codec = False\n self.train_data_on_the_fly = False\n self.validation_data_on_the_fly = False\n self.no_auto_compute_codec = False\n\n\nclass TestSimpleTrain(unittest.TestCase):\n def test_simple_train(self):\n args = Attrs()\n run(args)\n\n\nif __name__ == \"__main__\":\n unittest.main()", "id": "7548540", "language": "Python", "matching_score": 4.964570045471191, "max_stars_count": 0, "path": "calamari_ocr/test/test_simple_train.py" }, { "content": "import unittest\nimport os\n\nfrom calamari_ocr.test.test_simple_train import Attrs, this_dir, run, glob_all\n\nclass TestValidationTrain(unittest.TestCase):\n def test_validation_train(self):\n args = Attrs()\n args.validation = glob_all([os.path.join(this_dir, \"data\", \"uw3_50lines\", \"test\", \"*.png\")])\n args.validation_text_files = None\n args.max_iters = 30000\n\n run(args)\n\n def test_validation_pretrain(self):\n args = Attrs()\n args.validation = glob_all([os.path.join(this_dir, \"data\", \"uw3_50lines\", \"test\", \"*.png\")])\n args.validation_text_files = None\n args.max_iters = 1000\n args.early_stopping_best_model_prefix = args.early_stopping_best_model_prefix + \"pretrain_\"\n args.weights = os.path.join(this_dir, \"test_models\", \"uw3_50lines_best.ckpt\")\n\n run(args)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "12365828", "language": "Python", "matching_score": 0.9406832456588745, "max_stars_count": 5, "path": "calamari_ocr/test/test_validation_train.py" }, { "content": "import argparse\n\nfrom calamari_ocr.utils import glob_all\nfrom tqdm import tqdm\nimport os\n\nusage_str = 'python tensorflow_rename_variables.py --checkpoints=path_to_models.json ' \\\n '--replace_from=substr --replace_to=substr --add_prefix=abc --dry_run'\n\n\ndef rename(checkpoint, replace_from, replace_to, add_prefix, dry_run, force_prefix=False):\n import tensorflow as tf\n tf.reset_default_graph()\n with tf.Session() as sess:\n for var_name, _ in tf.contrib.framework.list_variables(checkpoint):\n # Load the variable\n var = tf.contrib.framework.load_variable(checkpoint, var_name)\n\n # Set the new name\n new_name = var_name\n if None not in [replace_from, replace_to]:\n new_name = new_name.replace(replace_from, replace_to)\n if add_prefix:\n if force_prefix or not new_name.startswith(add_prefix):\n # force prefix or add prefix if it does not exist yet\n new_name = add_prefix + new_name\n\n if dry_run:\n print('%s would be renamed to %s.' % (var_name, new_name))\n else:\n if var_name == new_name:\n print('No change for {}'.format(var_name))\n else:\n print('Renaming %s to %s.' % (var_name, new_name))\n\n # Rename the variable\n tf.Variable(var, name=new_name)\n\n if not dry_run:\n # Save the variables\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n saver.save(sess, checkpoint)\n\n tf.reset_default_graph()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=usage_str)\n parser.add_argument('--checkpoints', nargs='+', type=str, required=True)\n parser.add_argument('--replace_from')\n parser.add_argument('--replace_to')\n parser.add_argument('--add_prefix')\n parser.add_argument('--dry_run', action='store_true')\n\n args = parser.parse_args()\n\n for ckpt in tqdm(glob_all(args.checkpoints)):\n ckpt = os.path.splitext(ckpt)[0]\n rename(ckpt, args.replace_from, args.replace_to, args.add_prefix, args.dry_run)\n\n\nif __name__ == '__main__':\n main()\n", "id": "7675579", "language": "Python", "matching_score": 2.7268662452697754, "max_stars_count": 0, "path": "calamari_ocr/scripts/tensorflow_rename_variables.py" }, { "content": "from calamari_ocr.proto import CheckpointParams\nimport calamari_ocr.scripts.tensorflow_rename_variables as tensorflow_rename_variables\n\nimport json\nfrom google.protobuf import json_format\nimport os\n\n\nclass Checkpoint:\n VERSION = 1\n\n def __init__(self, json_path: str, auto_update=True, dry_run=False):\n self.json_path = json_path if json_path.endswith('.json') else json_path + '.json'\n self.json_path = os.path.abspath(os.path.expanduser(os.path.expandvars(self.json_path)))\n self.ckpt_path = os.path.splitext(self.json_path)[0]\n self.dry_run = dry_run\n\n # do not parse as proto, since some parameters might have changed\n with open(self.json_path, 'r') as f:\n self.json = json.load(f)\n\n self.version = self.json['version'] if 'version' in self.json else 0\n\n if self.version != Checkpoint.VERSION:\n if auto_update:\n self.update_checkpoint()\n else:\n raise Exception(\"Version of checkpoint is {} but {} is required. Please upgrade the model or \"\n \"set the auto update flag.\".format(self.version, Checkpoint.VERSION))\n\n else:\n print(\"Checkpoint version {} is up-to-date.\".format(self.version))\n\n with open(self.json_path, 'r') as f:\n self.checkpoint = json_format.Parse(f.read(), CheckpointParams())\n\n def update_checkpoint(self):\n while self.version != Checkpoint.VERSION:\n self._single_upgrade()\n\n print(\"Successfully upgraded checkpoint version to {}\".format(Checkpoint.VERSION))\n\n def _single_upgrade(self):\n print('Upgrading from version {}'.format(self.version))\n if self.version == 0:\n if self.json['model']['network']['backend'].get('type', 'TENSORFLOW') == 'TENSORFLOW':\n tensorflow_rename_variables.rename(self.ckpt_path, '', '', 'cnn_lstm/',\n dry_run=self.dry_run, force_prefix=False)\n\n pass\n\n self.version += 1\n self._update_json_version()\n\n def _update_json_version(self):\n self.json['version'] = self.version\n\n if not self.dry_run:\n s = json.dumps(self.json, indent=2)\n\n with open(self.json_path, 'w') as f:\n f.write(s)\n\n\n", "id": "2164084", "language": "Python", "matching_score": 1.0699455738067627, "max_stars_count": 0, "path": "calamari_ocr/ocr/checkpoint.py" }, { "content": "import sys\nimport tensorflow as tf\nimport tensorflow.contrib.cudnn_rnn as cudnn_rnn\nfrom tensorflow.python.ops import ctc_ops\nimport numpy as np\nimport json\nfrom typing import Generator\n\nfrom calamari_ocr.ocr.backends.model_interface import ModelInterface, NetworkPredictionResult\nfrom calamari_ocr.proto import LayerParams, NetworkParams\n\n\nclass TensorflowModel(ModelInterface):\n def __init__(self, network_proto, graph, session, graph_type=\"train\", batch_size=1, reuse_weights=False,\n input_dataset=None, codec=None, processes=1):\n super().__init__(network_proto, graph_type, batch_size,\n input_dataset=input_dataset, codec=codec, processes=processes)\n self.graph = graph\n self.session = session\n self.gpu_available = any([d.device_type == \"GPU\" for d in self.session.list_devices()])\n\n # load fuzzy ctc module if available\n if len(network_proto.backend.fuzzy_ctc_library_path) > 0 and network_proto.ctc == NetworkParams.CTC_FUZZY:\n from calamari_ocr.ocr.backends.tensorflow_backend.tensorflow_fuzzy_ctc_loader import load as load_fuzzy\n self.fuzzy_module = load_fuzzy(network_proto.backend.fuzzy_ctc_library_path)\n else:\n self.fuzzy_module = None\n\n # create graph\n with self.graph.as_default():\n tf.set_random_seed(self.network_proto.backend.random_seed)\n\n # variables can also be used as placeholder directly\n self.inputs, self.input_seq_len, self.targets, self.dropout_rate, self.data_iterator, self.serialized_params = \\\n self.create_dataset_inputs(batch_size, network_proto.features, network_proto.backend.shuffle_buffer_size)\n\n # create network and solver (if train)\n if graph_type == \"train\":\n self.output_seq_len, self.time_major_logits, self.time_major_softmax, self.logits, self.softmax, self.decoded, self.sparse_decoded, self.scale_factor = \\\n self.create_network(self.inputs, self.input_seq_len, self.dropout_rate, reuse_variables=reuse_weights)\n self.train_op, self.loss, self.cer = self.create_solver(self.targets, self.time_major_logits, self.logits, self.output_seq_len, self.decoded)\n elif graph_type == \"test\":\n self.output_seq_len, self.time_major_logits, self.time_major_softmax, self.logits, self.softmax, self.decoded, self.sparse_decoded, self.scale_factor = \\\n self.create_network(self.inputs, self.input_seq_len, self.dropout_rate, reuse_variables=reuse_weights)\n self.cer = self.create_cer(self.decoded, self.targets)\n else:\n self.output_seq_len, self.time_major_logits, self.time_major_softmax, self.logits, self.softmax, self.decoded, self.sparse_decoded, self.scale_factor = \\\n self.create_network(self.inputs, self.input_seq_len, self.dropout_rate, reuse_variables=reuse_weights)\n\n self.uninitialized_variable_initializer = None\n self.all_variable_initializer = None\n\n def is_gpu_available(self):\n # create a dummy session and list available devices\n # search if a GPU is available\n gpu_enabled = False\n for d in self.session.list_devices():\n if d.device_type == \"GPU\":\n gpu_enabled = True\n break\n\n return gpu_enabled\n\n def create_network(self, inputs, input_seq_len, dropout_rate, reuse_variables):\n network_proto = self.network_proto\n seq_len = input_seq_len\n batch_size = tf.shape(inputs)[0]\n gpu_enabled = self.gpu_available\n\n with tf.variable_scope(\"cnn_lstm\", reuse=reuse_variables) as scope:\n no_layers = len(network_proto.layers) == 0\n if not no_layers:\n has_conv_or_pool = network_proto.layers[0].type != LayerParams.LSTM\n else:\n has_conv_or_pool = False\n\n factor = 1\n if has_conv_or_pool:\n cnn_inputs = tf.reshape(inputs, [batch_size, -1, network_proto.features, self.input_channels])\n shape = seq_len, network_proto.features\n\n layers = [cnn_inputs]\n last_num_filters = 1\n\n cnn_layer_index = 0\n for layer in [l for l in network_proto.layers if l.type != LayerParams.LSTM]:\n if layer.type == LayerParams.CONVOLUTIONAL:\n layers.append(tf.layers.conv2d(\n name=\"conv2d\" if cnn_layer_index == 0 else \"conv2d_{}\".format(cnn_layer_index),\n inputs=layers[-1],\n filters=layer.filters,\n kernel_size=(layer.kernel_size.x, layer.kernel_size.y),\n padding=\"same\",\n activation=tf.nn.relu,\n reuse=reuse_variables,\n ))\n cnn_layer_index += 1\n last_num_filters = layer.filters\n elif layer.type == LayerParams.MAX_POOLING:\n layers.append(tf.layers.max_pooling2d(\n inputs=layers[-1],\n pool_size=(layer.kernel_size.x, layer.kernel_size.y),\n strides=(layer.stride.x, layer.stride.y),\n padding=\"same\",\n ))\n\n shape = (tf.to_int32(shape[0] // layer.stride.x),\n shape[1] // layer.stride.y)\n factor *= layer.stride.x\n else:\n raise Exception(\"Unknown layer of type %s\" % layer.type)\n\n lstm_seq_len, lstm_num_features = shape\n rnn_inputs = tf.reshape(layers[-1],\n [batch_size, tf.shape(layers[-1])[1],\n last_num_filters * lstm_num_features])\n\n lstm_num_features = last_num_filters * lstm_num_features\n else:\n rnn_inputs = inputs\n lstm_seq_len = seq_len\n lstm_num_features = network_proto.features\n\n lstm_layers = [l for l in network_proto.layers if l.type == LayerParams.LSTM]\n\n # Time major inputs required for lstm\n time_major_inputs = tf.transpose(rnn_inputs, [1, 0, 2])\n\n if len(lstm_layers) > 0:\n for i, lstm in enumerate(lstm_layers):\n if lstm.hidden_nodes != lstm_layers[0].hidden_nodes:\n raise Exception(\"Currently all lstm layers must have an equal number of hidden nodes. \"\n \"Got {} != {}\".format(lstm.hidden_nodes, lstm_layers[0].hidden_nodes))\n\n def cpu_cudnn_compatible_lstm_backend(time_major_inputs, hidden_nodes):\n def get_lstm_cell(num_hidden):\n return cudnn_rnn.CudnnCompatibleLSTMCell(num_hidden, reuse=reuse_variables)\n\n fw, bw = zip(*[(get_lstm_cell(hidden_nodes), get_lstm_cell(hidden_nodes)) for _ in lstm_layers])\n\n time_major_outputs, output_fw, output_bw \\\n = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(list(fw), list(bw), time_major_inputs,\n sequence_length=lstm_seq_len,\n dtype=tf.float32,\n scope=\"cudnn_lstm/stack_bidirectional_rnn\",\n time_major=True,\n )\n\n return time_major_outputs\n\n def gpu_cudnn_lstm_backend(time_major_inputs, hidden_nodes):\n # Create the Cudnn LSTM factory\n rnn_lstm = cudnn_rnn.CudnnLSTM(len(lstm_layers), hidden_nodes,\n direction='bidirectional',\n kernel_initializer=tf.initializers.random_uniform(-0.1, 0.1))\n\n # TODO: Check if the models are loadable from meta Graph, maybe the next line fixed this\n rnn_lstm._saveable_cls = cudnn_rnn.CudnnLSTMSaveable\n\n # Apply the lstm to the inputs\n time_major_outputs, (output_h, output_c) = rnn_lstm(time_major_inputs)\n return time_major_outputs\n\n if network_proto.backend.cudnn:\n if gpu_enabled:\n print(\"Using CUDNN LSTM backend on GPU\")\n time_major_outputs = gpu_cudnn_lstm_backend(time_major_inputs, lstm_layers[0].hidden_nodes)\n else:\n print(\"Using CUDNN compatible LSTM backend on CPU\")\n time_major_outputs = cpu_cudnn_compatible_lstm_backend(time_major_inputs, lstm_layers[0].hidden_nodes)\n else:\n raise Exception(\"Only cudnn based backend supported yet.\")\n\n # Set the output size\n output_size = lstm_layers[-1].hidden_nodes * 2\n else:\n output_size = lstm_num_features\n time_major_outputs = time_major_inputs\n\n # flatten to (T * N, F) for matrix multiplication. This will be reversed later\n time_major_outputs = tf.reshape(time_major_outputs, [-1, time_major_outputs.shape.as_list()[2]])\n\n if network_proto.dropout > 0:\n time_major_outputs = tf.nn.dropout(time_major_outputs, 1 - dropout_rate, name=\"dropout\")\n\n # we need to turn off validate_shape so we can resize the variable on a codec resize\n w = tf.get_variable('W', validate_shape=False, initializer=tf.random_uniform([output_size, network_proto.classes], -0.1, 0.1))\n b = tf.get_variable('B', validate_shape=False, initializer=tf.constant(0., shape=[network_proto.classes]))\n\n # the output layer\n time_major_logits = tf.matmul(time_major_outputs, w) + b\n\n # reshape back\n time_major_logits = tf.reshape(time_major_logits, [-1, batch_size, tf.shape(w)[-1]],\n name=\"time_major_logits\")\n\n time_major_softmax = tf.nn.softmax(time_major_logits, -1, \"time_major_softmax\")\n\n logits = tf.transpose(time_major_logits, [1, 0, 2], name=\"logits\")\n softmax = tf.transpose(time_major_softmax, [1, 0, 2], name=\"softmax\")\n\n lstm_seq_len = tf.identity(lstm_seq_len, \"seq_len_out\")\n\n # DECODER\n # ================================================================\n if network_proto.ctc == NetworkParams.CTC_DEFAULT:\n decoded, log_prob = ctc_ops.ctc_greedy_decoder(time_major_logits, lstm_seq_len, merge_repeated=network_proto.ctc_merge_repeated)\n elif network_proto.ctc == NetworkParams.CTC_FUZZY:\n decoded, log_prob = self.fuzzy_module['decoder_op'](softmax, lstm_seq_len)\n else:\n raise Exception(\"Unknown ctc model: '%s'. Supported are Default and Fuzzy\" % network_proto.ctc)\n\n decoded = decoded[0]\n sparse_decoded = (\n tf.identity(decoded.indices, name=\"decoded_indices\"),\n tf.identity(decoded.values, name=\"decoded_values\"),\n tf.identity(decoded.dense_shape, name=\"decoded_shape\"),\n )\n\n return lstm_seq_len, time_major_logits, time_major_softmax, logits, softmax, decoded, sparse_decoded, factor\n\n def create_placeholders(self):\n with tf.variable_scope(\"cnn_lstm\", reuse=False) as scope:\n inputs = tf.placeholder(tf.float32, shape=(None, None, self.network_proto.features), name=\"inputs\")\n seq_len = tf.placeholder(tf.int32, shape=(None,), name=\"seq_len\")\n targets = tf.sparse_placeholder(tf.int32, shape=(None, None), name=\"targets\")\n dropout_rate = tf.placeholder(tf.float32, shape=(), name=\"dropout_rate\")\n serialized_params = tf.placeholder(tf.string, shape=(None,), name='serialized_params')\n\n return inputs, seq_len, targets, dropout_rate, None, serialized_params\n\n def create_dataset_inputs(self, batch_size, line_height, max_buffer_size=1000):\n buffer_size = len(self.input_dataset) if self.input_dataset else 10\n buffer_size = min(max_buffer_size, buffer_size) if max_buffer_size > 0 else buffer_size\n input_channels = self.input_channels\n\n with tf.variable_scope(\"cnn_lstm\", reuse=False):\n def gen():\n epochs = 1\n for i, l, d in self.input_dataset.generator(epochs):\n if self.graph_type == \"train\" and len(l) == 0:\n continue\n\n l = self.codec.encode(l) if l else []\n\n # gray or binary input, add missing axis\n if len(i.shape) == 2:\n i = np.expand_dims(i, axis=-1)\n\n if i.shape[-1] != input_channels:\n raise ValueError(\"Expected {} channels but got {}. Shape of input {}\".format(\n input_channels, i.shape[-1], i.shape))\n\n yield i, l, [len(i)], [len(l)], [json.dumps(d)]\n\n def convert_to_sparse(data, labels, len_data, len_labels, ser_data):\n indices = tf.where(tf.not_equal(labels, -1))\n values = tf.gather_nd(labels, indices) - 1\n shape = tf.shape(labels, out_type=tf.int64)\n return data / 255, tf.SparseTensor(indices, values, shape), len_data, len_labels, ser_data\n\n dataset = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32, tf.int32, tf.int32, tf.string))\n if self.graph_type == \"train\":\n dataset = dataset.repeat().shuffle(buffer_size, seed=self.network_proto.backend.random_seed)\n else:\n pass\n\n dataset = dataset.padded_batch(batch_size, ([None, line_height, input_channels], [None], [1], [1], [1]),\n padding_values=(np.float32(0), np.int32(-1), np.int32(0), np.int32(0), ''))\n dataset = dataset.map(convert_to_sparse)\n\n data_initializer = dataset.prefetch(5).make_initializable_iterator()\n inputs = data_initializer.get_next()\n dropout_rate = tf.placeholder(tf.float32, shape=(), name=\"dropout_rate\")\n return inputs[0], tf.reshape(inputs[2], [-1]), inputs[1], dropout_rate, data_initializer, inputs[4]\n\n def create_cer(self, decoded, targets):\n # character error rate\n cer = tf.reduce_mean(tf.edit_distance(tf.cast(decoded, tf.int32), targets), name='ler')\n return cer\n\n def create_solver(self, targets, time_major_logits, batch_major_logits, seq_len, decoded):\n # ctc predictions\n cer = self.create_cer(decoded, targets)\n\n # Note for codec change: the codec size is derived upon creation, therefore the ctc ops must be created\n # using the true codec size (the W/B-Matrix may change its shape however during loading/codec change\n # to match the true codec size\n if self.network_proto.ctc == NetworkParams.CTC_DEFAULT:\n loss = ctc_ops.ctc_loss(targets,\n time_major_logits,\n seq_len,\n time_major=True,\n ctc_merge_repeated=self.network_proto.ctc_merge_repeated,\n ignore_longer_outputs_than_inputs=True)\n elif self.network_proto.ctc == NetworkParams.CTC_FUZZY:\n loss, deltas = self.fuzzy_module['module'].fuzzy_ctc_loss(\n batch_major_logits, targets.indices,\n targets.values,\n seq_len,\n ignore_longer_outputs_than_inputs=True)\n else:\n raise Exception(\"Unknown ctc model: '%s'. Supported are Default and Fuzzy\" % self.network_proto.ctc)\n\n cost = tf.reduce_mean(loss, name='cost')\n if self.network_proto.solver == NetworkParams.MOMENTUM_SOLVER:\n optimizer = tf.train.MomentumOptimizer(self.network_proto.learning_rate, self.network_proto.momentum)\n elif self.network_proto.solver == NetworkParams.ADAM_SOLVER:\n optimizer = tf.train.AdamOptimizer(self.network_proto.learning_rate)\n else:\n raise Exception(\"Unknown solver of type '%s'\" % self.network_proto.solver)\n\n gvs = optimizer.compute_gradients(cost)\n\n training_ops = []\n if self.network_proto.clipping_mode == NetworkParams.CLIP_NONE:\n pass\n elif self.network_proto.clipping_mode == NetworkParams.CLIP_AUTO:\n # exponentially follow the global average of gradients to set clipping\n ema = tf.train.ExponentialMovingAverage(decay=0.999)\n\n max_l2 = 1000\n max_grads = 1000\n\n grads = [grad for grad, _ in gvs]\n l2 = tf.minimum(tf.global_norm([grad for grad in grads]), max_l2)\n l2_ema_op, l2_ema = ema.apply([l2]), ema.average(l2)\n grads, _ = tf.clip_by_global_norm(grads,\n clip_norm=tf.minimum(l2_ema / max_l2 * max_grads, max_grads))\n gvs = zip(grads, [var for _, var in gvs])\n training_ops.append(l2_ema_op)\n elif self.network_proto.clipping_mode == NetworkParams.CLIP_CONSTANT:\n clip = self.network_proto.clipping_constant\n if clip <= 0:\n raise Exception(\"Invalid clipping constant. Must be greater than 0, but got {}\".format(clip))\n\n grads = [grad for grad, _ in gvs]\n grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip)\n gvs = zip(grads, [var for _, var in gvs])\n else:\n raise Exception(\"Unsupported clipping mode {}\".format(self.network_proto.clipping_mode))\n\n training_ops.append(optimizer.apply_gradients(gvs, name='grad_update_op'))\n train_op = tf.group(training_ops, name=\"train_op\")\n\n return train_op, cost, cer\n\n def uninitialized_variables(self):\n with self.graph.as_default():\n global_vars = tf.global_variables()\n is_not_initialized = self.session.run([tf.is_variable_initialized(var) for var in global_vars])\n not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\n return not_initialized_vars\n\n def reset_data(self):\n with self.graph.as_default():\n if self.data_iterator:\n self.session.run([self.data_iterator.initializer])\n\n def prepare(self, uninitialized_variables_only=True, reset_queues=True):\n super().prepare()\n if reset_queues:\n self.reset_data()\n with self.graph.as_default():\n # only create the initializers once, else the graph is growing...\n if not self.uninitialized_variable_initializer:\n self.uninitialized_variable_initializer = tf.variables_initializer(self.uninitialized_variables())\n if not self.all_variable_initializer:\n self.all_variable_initializer = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n # run the desired initializer\n if uninitialized_variables_only:\n self.session.run(self.uninitialized_variable_initializer)\n else:\n self.session.run(self.all_variable_initializer)\n\n def load_weights(self, filepath, restore_only_trainable=True):\n with self.graph.as_default() as g:\n # reload trainable variables only (e. g. omitting solver specific variables)\n if restore_only_trainable:\n saver = tf.train.Saver(tf.trainable_variables())\n else:\n saver = tf.train.Saver()\n\n # Restore variables from disk.\n # This will possible load a weight matrix with wrong shape, thus a codec resize is necessary\n saver.restore(self.session, filepath)\n\n def realign_model_labels(self, indices_to_delete, indices_to_add):\n W = self.graph.get_tensor_by_name(\"cnn_lstm/W:0\")\n B = self.graph.get_tensor_by_name(\"cnn_lstm/B:0\")\n\n # removed desired entries from the data\n # IMPORTANT: Blank index is last in tensorflow but 0 in indices!\n W_val, B_val = self.session.run((W, B))\n W_val = np.delete(W_val, [i - 1 for i in indices_to_delete], axis=1)\n B_val = np.delete(B_val, [i - 1 for i in indices_to_delete], axis=0)\n\n # add new indices at the end\n if list(range(W_val.shape[1], W_val.shape[1] + len(indices_to_add))) != list(sorted(indices_to_add)):\n raise Exception(\"Additional labels must be added at the end, but got label indices {} != {}\".format(\n range(W_val.shape[1], W_val.shape[1] + len(indices_to_add)), sorted(indices_to_add)))\n\n W_val = np.concatenate((W_val[:, :-1], np.random.uniform(-0.1, 0.1, (W_val.shape[0], len(indices_to_add))), W_val[:, -1:]), axis=1)\n B_val = np.concatenate((B_val[:-1], np.zeros((len(indices_to_add), )), B_val[-1:]), axis=0)\n\n # reassign values\n op_W = tf.assign(W, W_val, validate_shape=False)\n op_B = tf.assign(B, B_val, validate_shape=False)\n self.session.run((op_W, op_B))\n\n def save_checkpoint(self, output_file):\n with self.graph.as_default() as g:\n saver = tf.train.Saver()\n saver.save(self.session, output_file)\n\n def train_batch(self, x, len_x, y):\n out = self.session.run(\n [self.loss, self.train_op, self.logits, self.output_seq_len, self.cer, self.decoded],\n feed_dict={\n self.inputs: x,\n self.input_seq_len: len_x,\n self.targets: y,\n self.dropout_rate: self.network_proto.dropout,\n }\n )\n\n if np.isfinite(out[0]):\n # only update gradients if finite loss\n self.session.run(\n [self.train_op],\n feed_dict={\n self.inputs: x,\n self.input_seq_len: len_x,\n self.targets: y,\n self.dropout_rate: self.network_proto.dropout,\n }\n )\n else:\n print(\"WARNING: Infinite loss. Skipping batch.\", file=sys.stderr)\n\n return out\n\n def train_dataset(self):\n out = self.session.run(\n [self.loss, self.softmax, self.output_seq_len, self.cer, self.decoded, self.targets],\n feed_dict={\n self.dropout_rate: self.network_proto.dropout,\n }\n )\n\n if np.isfinite(out[0]):\n # only update gradients if finite loss\n self.session.run(\n [self.train_op],\n feed_dict={\n self.dropout_rate: self.network_proto.dropout,\n }\n )\n else:\n print(\"WARNING: Infinite loss. Skipping batch.\", file=sys.stderr)\n\n return out\n\n def predict_raw_batch(self, x: np.array, len_x: np.array) -> Generator[NetworkPredictionResult, None, None]:\n out = self.session.run(\n [self.softmax, self.output_seq_len, self.decoded],\n feed_dict={\n self.inputs: x / 255,\n self.input_seq_len: len_x,\n self.dropout_rate: 0,\n })\n out = out[0:2] + [TensorflowModel.__sparse_to_lists(out[2])]\n for sm, sl, dec in zip(*out):\n pred = NetworkPredictionResult(softmax=sm,\n output_length=sl,\n decoded=dec,\n )\n pred.softmax = np.roll(pred.softmax, 1, axis=1)\n l, s = pred.softmax, pred.output_length\n pred.decoded = self.ctc_decoder.decode(l[:s])\n yield pred\n\n def predict_dataset(self) -> Generator[NetworkPredictionResult, None, None]:\n out = self.session.run(\n [self.softmax, self.output_seq_len, self.serialized_params, self.decoded, self.targets],\n feed_dict={\n self.dropout_rate: 0,\n })\n out = out[0:3] + list(map(TensorflowModel.__sparse_to_lists, out[3:5]))\n for sm, length, param, dec, gt in zip(*out):\n # decode encoded params from json. On python<=3.5 this are bytes, else it already is a str\n enc_param = param[0]\n enc_param = json.loads(enc_param.decode(\"utf-8\") if isinstance(enc_param, bytes) else enc_param)\n # return prediction result\n yield NetworkPredictionResult(softmax=sm,\n output_length=length,\n decoded=dec,\n params=enc_param,\n ground_truth=self.codec.decode(gt) if gt is not None else None,\n )\n\n def train(self):\n cost, probs, seq_len, ler, decoded, gt = self.train_dataset()\n gt = TensorflowModel.__sparse_to_lists(gt)\n decoded = TensorflowModel.__sparse_to_lists(decoded)\n\n probs = np.roll(probs, 1, axis=2)\n return {\n \"loss\": cost,\n \"probabilities\": probs,\n \"ler\": ler,\n \"decoded\": decoded,\n \"gt\": gt,\n \"logits_lengths\": seq_len,\n }\n\n def predict(self) -> Generator[NetworkPredictionResult, None, None]:\n try:\n while True:\n for pred in self.predict_dataset():\n pred.softmax = np.roll(pred.softmax, 1, axis=1)\n l, s = pred.softmax, pred.output_length\n pred.decoded = self.ctc_decoder.decode(l[:s])\n yield pred\n\n except tf.errors.OutOfRangeError as e:\n # no more data available\n pass\n\n @staticmethod\n def __to_sparse_matrix(y, shift_values=-1):\n batch_size = len(y)\n indices = np.concatenate([np.concatenate(\n [\n np.full((len(y[i]), 1), i),\n np.reshape(range(len(y[i])), (-1, 1))\n ], 1) for i in range(batch_size)], 0)\n values = np.concatenate(y, 0) + shift_values\n dense_shape = np.asarray([batch_size, max([len(yi) for yi in y])])\n assert(len(indices) == len(values))\n\n return indices, values, dense_shape\n\n @staticmethod\n def __sparse_data_to_dense(x):\n batch_size = len(x)\n len_x = [xb.shape[0] for xb in x]\n max_line_length = max(len_x)\n\n # transform into batch (batch size, T, height)\n full_x = np.zeros((batch_size, max_line_length, x[0].shape[1]))\n for batch, xb in enumerate(x):\n full_x[batch, :len(xb)] = xb\n\n # return full_x, len_x\n return full_x, [l for l in len_x]\n\n @staticmethod\n def __sparse_to_lists(sparse, shift_values=1):\n indices, values, dense_shape = sparse\n return TensorflowModel._convert_targets_to_lists(indices, values, dense_shape, shift_values=shift_values)\n\n @staticmethod\n def _convert_targets_to_lists(indices, values, dense_shape, shift_values=1):\n out = [[] for _ in range(dense_shape[0])]\n\n for index, value in zip(indices, values):\n x, y = tuple(index)\n assert(len(out[x]) == y) # consistency check\n out[x].append(value + shift_values)\n\n return [np.asarray(o, dtype=np.int64) for o in out]\n\n def output_to_input_position(self, x):\n return x * self.scale_factor\n", "id": "10162831", "language": "Python", "matching_score": 7.574606418609619, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/tensorflow_backend/tensorflow_model.py" }, { "content": "import numpy as np\nfrom abc import ABC, abstractmethod\n\nfrom calamari_ocr.proto import NetworkParams\nfrom .ctc_decoder.default_ctc_decoder import DefaultCTCDecoder\nfrom .ctc_decoder.fuzzy_ctc_decoder import FuzzyCTCDecoder\nfrom calamari_ocr.ocr.datasets import InputDataset\nfrom calamari_ocr.ocr import Codec\n\nfrom typing import Any, Generator, List\n\n\nclass NetworkPredictionResult:\n def __init__(self,\n softmax: np.array,\n output_length: int,\n decoded: np.array,\n params: Any = None,\n ground_truth: np.array = None):\n self.softmax = softmax\n self.output_length = output_length\n self.decoded = decoded\n self.params = params\n self.ground_truth = ground_truth\n\n\nclass ModelInterface(ABC):\n def __init__(self, network_proto, graph_type, batch_size, input_dataset: InputDataset = None, codec: Codec = None,\n processes=1):\n \"\"\" Interface for a neural net\n\n Interface above the actual DNN implementation to abstract training and prediction.\n\n Parameters\n ----------\n network_proto : NetworkParams\n Parameters that define the network\n graph_type : {\"train\", \"test\", \"deploy\"}\n Type of the graph, depending on the type different parts must be added (e.g. the solver)\n batch_size : int\n Number of examples to train/predict in parallel\n \"\"\"\n self.network_proto = network_proto\n self.input_channels = network_proto.channels if network_proto.channels > 0 else 1\n self.graph_type = graph_type\n self.batch_size = batch_size\n self.input_dataset = input_dataset\n self.codec = codec\n self.processes = processes\n\n self.ctc_decoder = {\n NetworkParams.CTC_FUZZY: FuzzyCTCDecoder(),\n NetworkParams.CTC_DEFAULT: DefaultCTCDecoder(),\n }[network_proto.ctc]\n\n def output_to_input_position(self, x):\n return x\n\n def set_input_dataset(self, input_dataset: InputDataset, codec: Codec):\n \"\"\" Set the networks data generator\n\n Parameters\n ----------\n data_generator : Generator[Tuple[np.array, np.array, Any], None, None]\n List of all raw labels to be used for training\n Returns\n -------\n None\n \"\"\"\n self.input_dataset = input_dataset\n self.codec = codec\n\n def train_step(self):\n \"\"\" Performs a training step of the model.\n Returns\n -------\n None\n \"\"\"\n\n return self.train()\n\n def iters_per_epoch(self, batch_size):\n size = len(self.input_dataset)\n r = size % batch_size\n n = size // batch_size\n return n if r == 0 else n + 1\n\n def predict_raw(self, x: List[np.array]) -> Generator[NetworkPredictionResult, None, None]:\n for r in self.predict_raw_batch(*self.zero_padding(x)):\n yield r\n\n def prediction_step(self) -> Generator[NetworkPredictionResult, None, None]:\n return self.predict()\n\n def reset_data(self):\n \"\"\" Called if the data changed\n \"\"\"\n pass\n\n def prepare(self, uninitialized_variables_only=True, reset_queues=True):\n pass\n\n @abstractmethod\n def predict_raw_batch(self, x: np.array, len_x: np.array) -> Generator[NetworkPredictionResult, None, None]:\n pass\n\n @abstractmethod\n def train(self):\n return []\n\n @abstractmethod\n def predict(self) -> Generator[NetworkPredictionResult, None, None]:\n \"\"\" Predict the current data\n\n Parameters\n ----------\n with_gt : bool\n Also output the gt if available in the dataset\n\n Returns\n -------\n list of Prediction\n\n See Also\n --------\n set_data\n \"\"\"\n return []\n\n @abstractmethod\n def save_checkpoint(self, filepath):\n \"\"\" Save the current network state to `filepath`\n\n Parameters\n ----------\n filepath : str\n Where to store the checkpoint\n \"\"\"\n pass\n\n @abstractmethod\n def load_weights(self, filepath, restore_only_trainable=True):\n \"\"\" Load the weights stored a the given `filepath`\n\n Parameters\n ----------\n filepath : str\n File to load\n restore_only_trainable : bool\n If False e.g. the solver state is loaded, which might not be desired.\n \"\"\"\n pass\n\n @abstractmethod\n def realign_model_labels(self, indices_to_delete, indices_to_add):\n \"\"\" Realign the output matrix to the given labels\n\n On a codec resize some output labels can be added or deleted.\n Thus, the corresponding vectors in the output matrix of the DNN must be adapted accordingly.\n\n Parameters\n ----------\n indices_to_delete : list of int\n labels to be deleted\n indices_to_add : list of int\n labels to be added (usually at the end)\n\n \"\"\"\n pass\n\n def zero_padding(self, data):\n len_x = [len(x) for x in data]\n out = np.zeros((len(data), max(len_x), self.network_proto.features), dtype=np.uint8)\n for i, x in enumerate(data):\n out[i, 0:len(x)] = x\n\n return np.expand_dims(out, axis=-1), np.array(len_x, dtype=np.int32)\n\n", "id": "5301049", "language": "Python", "matching_score": 2.7521584033966064, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/model_interface.py" }, { "content": "from abc import ABC, abstractmethod\nimport random\nimport numpy as np\nfrom .model_interface import ModelInterface\n\n\nclass BackendInterface(ABC):\n def __init__(self,\n network_proto,\n ):\n self.network_proto = network_proto\n self.implementation_handles_batching = False\n seed = network_proto.backend.random_seed\n if seed >= 0:\n random.seed(seed)\n np.random.seed(seed)\n\n super().__init__()\n\n @abstractmethod\n def create_net(self, dataset, codec, restore, weights, graph_type, batch_size=-1) -> ModelInterface:\n pass\n", "id": "10111901", "language": "Python", "matching_score": 3.1024606227874756, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/backend_interface.py" }, { "content": "import tensorflow as tf\n\nfrom calamari_ocr.ocr.backends.backend_interface import BackendInterface\nfrom calamari_ocr.ocr.backends.tensorflow_backend.tensorflow_model import TensorflowModel\n\n\nclass TensorflowBackend(BackendInterface):\n def __init__(self,\n network_proto,\n restore,\n weights,\n processes=-1):\n super().__init__(network_proto)\n self.graph = tf.Graph()\n self.session = tf.Session(graph=self.graph,\n config=tf.ConfigProto(\n intra_op_parallelism_threads=network_proto.backend.num_intra_threads,\n inter_op_parallelism_threads=network_proto.backend.num_inter_threads,\n ))\n self.restore = restore\n self.weights = weights\n self.first_model = True\n self.processes = processes if processes > 0 else 1\n\n def create_net(self, dataset, codec, restore, weights, graph_type, batch_size=-1, stream_input=True):\n model = TensorflowModel(self.network_proto, self.graph, self.session, graph_type, batch_size,\n reuse_weights=not self.first_model,\n input_dataset=dataset,\n codec=codec,\n processes=self.processes,\n )\n self.first_model = False\n if weights:\n model.load_weights(weights, restore_only_trainable=True)\n\n if restore:\n try:\n model.load_weights(restore, restore_only_trainable=False)\n except tf.errors.NotFoundError as e:\n if \"opaque_kernel\" in e.message:\n print(e)\n raise Exception(\"This exception probabily occurred when loading a CPU model on the GPU. This is currently not supported by TensorFlow\")\n\n # this might be cudnn related, try again, but skip non trainable and opaque kernel\n with self.graph.as_default():\n saver = tf.train.Saver(tf.trainable_variables())\n saver.restore(self.session, restore)\n\n return model\n\n\n", "id": "8273324", "language": "Python", "matching_score": 2.496469020843506, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/tensorflow_backend/tensorflow_backend.py" }, { "content": "from calamari_ocr.proto import BackendParams\n\n\ndef create_backend_from_proto(network_params, restore=None, weights=None, processes=-1):\n \"\"\"\n Create a Backend implementation object based on NetworkParameters.\n\n Parameters\n ----------\n network_params : NetworkParameters\n the network parameters that define the new Backend\n restore : str\n path to a file to restore if a network is created\n weights : str\n path to a file to copy weights\n processes : int\n number of processes to use for all nets created by this backend.\n A negative number uses the default params as suggestest by the backend.\n\n Returns\n -------\n A net backend implementation object\n\n \"\"\"\n # TODO: Change parameter to backend_params?\n # TODO: remove restore and weights\n if network_params.backend.type == BackendParams.TENSORFLOW:\n from calamari_ocr.ocr.backends.tensorflow_backend.tensorflow_backend import TensorflowBackend\n return TensorflowBackend(network_params, restore, weights, processes=processes)\n else:\n raise Exception(\"Unknown backend type '{}'\".format(network_params.backend.type))\n\n", "id": "5054470", "language": "Python", "matching_score": 2.140571355819702, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/factory.py" }, { "content": "from calamari_ocr.ocr.backends.factory import create_backend_from_proto\n", "id": "10020508", "language": "Python", "matching_score": 0.9890739917755127, "max_stars_count": 0, "path": "calamari_ocr/ocr/backends/__init__.py" } ]
1.919979
ionutcatalinsandu
[ { "content": "#users list\nusers = [\n {\"id\": 0, \"name\": \"Hero\"},\n {\"id\": 1, \"name\": \"Dunn\"},\n {\"id\": 2, \"name\": \"Sue\"},\n {\"id\": 3, \"name\": \"Chi\"},\n {\"id\": 4, \"name\": \"Thor\"},\n {\"id\": 5, \"name\": \"Clive\"},\n {\"id\": 6, \"name\": \"Hicks\"},\n {\"id\": 7, \"name\": \"Devin\"},\n {\"id\": 8, \"name\": \"Kate\"},\n {\"id\": 9, \"name\": \"Klein\"}\n]\n\nfriendship_pairs = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),\n (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]\n\nfriendships = { user['id']: [] for user in users }\nfor i,j in friendship_pairs:\n friendships[i].append(j)\n friendships[j].append(i)\n\nprint(friendships)\n", "id": "5077615", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "chapter1/users.py" } ]
0
mgoldenisc
[ { "content": "from gensim.models import fasttext as ft\nfrom gensim.models.word2vec import Word2Vec\nfrom gensim.models.keyedvectors import KeyedVectors as KV\nimport os\n\n\nclass IKSimilarityTools(object):\n \n def __init__(self, pmodel_name, wv):\n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n self.model_name = pmodel_name\n self.wordvectors = wv\n\n \n def most_similar(self, term, num_similar=5):\n \"\"\" Finds the num_similar most similar words to the supplied word argument. If a \n term is multiple words, input as a string with each word separated by spaces. \n For example \"acute pulmonary hypertension\"\n\n This method will check if a tokenized version of the input term is in the dictionary.\n If it is, it will use that to evaluate synonyms, otherwise it will provide a list of\n the words in the term and find words that correspond with that list.\n\n Parameters\n -----------\n term (str) - The term (word, entity, etc) for which we want to find the most similar matches.\n \n num_similar (int, optional) - The number of top similar words to be found (e.g. 5 returns the 5 most similar words \n to the specified word based on the current model). Default 5\n\n\n Returns\n -----------\n A list of the top words\n\n Throws\n -----------\n KeyError - if term is not found in the vocabulary\n \"\"\"\n if term.replace(\" \", \"_\") in self.wordvectors.key_to_index:\n gensimpairs = self.wordvectors.most_similar(term.replace(\" \", \"_\"), topn=num_similar)\n else:\n try:\n positives = term.split()\n gensimpairs = self.wordvectors.most_similar(positive=positives, topn=num_similar)\n except KeyError as err:\n # KeyError raised when term (fastText: and all ngrams) are NOT found in the vector space\n raise KeyError(\"{} is not in the vocabulary and a vector could not be found/computed\".format(term)) from err\n\n return [pair[0].replace(\"_\", \" \") for pair in gensimpairs]\n \n\n def get_similarity(self, term1, term2):\n \"\"\" Gets cosine similarity of word1 and word2\n\n Parameters\n -----------\n term1, term2 (str) - The two terms for which one wishes to calculate cosine similarity.\n If using a tokenized model, these can be multi-word phrases (which will be tokenized by \n this method), otherwise must be a word.\n\n Returns\n -----------\n Float [0,1] where 0 = not similar and 1 = identical\n\n Throws\n -----------\n KeyError: if term1 and/or term2 not in the model's vocabulary\n \"\"\"\n try:\n term1 = term1.replace(\" \", \"_\")\n term2 = term2.replace(\" \", \"_\")\n sim = self.wordvectors.similarity(term1, term2)\n return sim\n except KeyError as err:\n raise KeyError(\"{} is not in the vocabulary, cosine similarity could not be computed\".format(err.args[0])) from err\n \n\n def evaluate_word_pairs(self, word_pairs, delimiter, case_insensitive):\n \"\"\" Calls gensim's evaluate_word_pairs method and returns the Spearman coefficient \n and out-of-vocabulary (OOV) ratio.\n\n Parameters\n -----------\n word_pairs (str) - Path to file where each line has three values: the first two \n are the words in the word pair and the third is the human-assigned similarity rating\n\n delimiter (str, optional) - The character that delimits the three values in each line (default = \\t)\n\n case_insensitive (bool, optional) - If True, convert all tokens to uppercase to before evaluation\n\n\n Returns\n -----------\n Spearman coefficient (float) - The Spearman coefficient between human similarity judgments of the word pairs\n and the model assigned scores\n \n OOV ratio (float) - The ratio of words that were out of vocabulary\n\n NOTE: This is to give some quantitative sense of the efficacy of the model. \n \"\"\"\n score = self.wordvectors.evaluate_word_pairs(pairs=word_pairs, delimiter=delimiter, case_insensitive=case_insensitive)\n return (score[1][0], score[2])\n \n\n def synonym_dict_from_string(self, source_text, use_iknow_entities=True, num_similar=5):\n \"\"\" Uses currently loaded model to determine a dictionary of synonyms for each word or\n entity in a provided string.\n\n Parameters\n --------------\n source_text (str) - A free string text source\n \n use_iknow_entities (bool) - whether to find synonyms for iKnow entities in the string (as opposed to words)\n\n num_similar (int) - Number of similar words that will be returned for each term in the source text (if exist).\n Higher num_similar ~ less strict similarity, lower num_similar ~ more strict similarity\n\n\n Returns\n --------------\n A dictionary of synonyms for each entity or word in the source\n\n NOTE: Right now, using iKnow entities will only check for synoyms of the iKnow entities, not for \n their individual components. So it is one or the other\n \"\"\"\n dictionary = {}\n if use_iknow_entities:\n iknowpy = __import__(\"iknowpy\") # not a global import to avoid making user install iknowpy unless they need to use functionality\n # index the source with iknow entities\n engine = iknowpy.iKnowEngine()\n engine.index(source_text, 'en')\n # Populate dictionary with keys for each term, all with empty list for value\n for s in engine.m_index['sentences']:\n for e in s['entities']:\n if (e['type'] in ('PathRelevant', 'NonRelevant')) or (e['index'] in dictionary):\n continue\n else:\n try:\n dictionary[e['index']] = [self.most_similar(e['index'], num_similar=num_similar)] \\\n if num_similar == 1 else self.most_similar(e['index'], num_similar=num_similar)\n except KeyError:\n continue\n else:\n # use words instead of entities\n words = source_text.split(' ')\n for word in words:\n if word in dictionary: continue\n else:\n try:\n dictionary[word] = [self.most_similar(word, num_similar=num_similar)] \\\n if num_similar == 1 else self.most_similar(word, num_similar=num_similar)\n except KeyError:\n continue\n\n return dictionary\n\n\n def synonym_dict_from_file(self, source_text, use_iknow_entities=True, num_similar=5):\n \"\"\" Uses currently loaded model to determine a dictionary of synonyms for each word or\n entity in a provided text file.\n\n Parameters\n --------------\n source_text (str) - The path to a file containing the source text\n \n use_iknow_entities (bool) - whether to find synonyms for iKnow entities (as opposed to words)\n\n num_similar (int) - Number of similar words that will be returned for each term in the source text (if exist).\n Higher num_similar ~ less strict similarity, lower num_similar ~ more strict similarity\n\n\n Returns\n --------------\n a dictionary of synonyms for each entity or word in the source\n\n NOTE: Right now, using iKnow entities will only check for synoyms of the iKnow entities, not for \n their individual components. So it is one or the other.\n \"\"\"\n dictionary = {}\n if use_iknow_entities:\n iknowpy = __import__(\"iknowpy\") # not a global import to avoid making user install iknowpy unless they need to use functionality\n # index the source with iknow entities\n engine = iknowpy.iKnowEngine()\n for line in open(source_text , 'r'):\n engine.index(line, 'en')\n # Populate dictionary with keys for each term, all with empty list for value\n for s in engine.m_index['sentences']:\n for e in s['entities']:\n if (e['type'] in ('PathRelevant', 'NonRelevant')) or (e['index'] in dictionary):\n continue\n else:\n try:\n dictionary[e['index']] = [self.most_similar(e['index'], num_similar=num_similar)] \\\n if num_similar == 1 else self.most_similar(e['index'], num_similar=num_similar)\n except KeyError:\n continue\n else:\n # use words instead of entities\n for line in open(source_text, 'r'):\n words = line.split(' ')\n for word in words:\n if word in dictionary: continue\n else:\n try:\n dictionary[word] = [self.most_similar(word, num_similar=num_similar)] \\\n if num_similar == 1 else self.most_similar(word, num_similar=num_similar)\n except KeyError:\n continue\n return dictionary\n\n\n\nclass IKFastTextTools(IKSimilarityTools):\n \"\"\" Subclass of IKSimilarityTools\n\n Contains methods to access exisiting FastText models and evaluate similarity between terms.\n\n Methods:\n load_vectors(self, pmodel_name): Load into memory the vectors of a previously trained model.\n\n Inherited methods:\n most_similar(self, term, num_similar=5): Return num_similar most similar terms to term in the model\n\n get_similarity(self, term1, term2): Compute cosine similarity of term1 and term2\n\n evaluate_word_pairs(self, word_pairs, delimiter, case_insensitive): Compute Spearman coeff. and \n out-of-vocab ratio for a model\n\n synonym_dict_from_string(self, source_text, use_iknow_entities=True, num_similar=5):\n Get a dictionary of synonyms, where each key is a term in the source_text string and each\n entry is the top num_similar most similar words to that key in the model. \n\n synonym_dict_from_file(self, source_text, use_iknow_entities=True, num_similar=5)\n Get a dictionary of synonyms, where each key is a term in the source_text FILE and each\n entry is the top num_similar most similar words to that key in the model. \n \"\"\"\n\n __PATH_PREFIX__ = os.path.join(os.path.dirname(__file__),'models', 'fasttext')\n\n def __init__(self, pmodel_name): \n\n self.load_vectors(pmodel_name)\n\n self.model_name = pmodel_name\n\n\n def load_vectors(self, pmodel_name):\n \"\"\" Loads the VECTORS of an already trained model. It is much quicker and \n less cumbersome to use just vectors than to use the model itself, but\n still comes with the various important syntactic/semantic tools.\n\n If the vectors of the specified model are not found but another model's vectors\n are already loaded, this instance will continue to use the already loaded vectors.\n\n Parameters\n -----------\n pmodel_name (str) - Name of the model to load vectors from\n\n Throws\n -----------\n FileNotFoundError - If specified model is not found.\n \"\"\"\n try:\n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n self.wordvectors = ft.load_facebook_vectors(os.path.join(self.__PATH_PREFIX__, pmodel_name))\n except FileNotFoundError as err:\n raise FileNotFoundError(\"Model with name {} not found.\".format(pmodel_name[:-4])) from err\n\n\n\nclass IKWord2VecTools(IKSimilarityTools):\n \"\"\" Subclass of IKSimilarityTools\n\n Contains methods to access existing Word2Vec models and evaluate similarity between terms.\n\n Methods:\n load_vectors(self, pmodel_name): Load into memory the vectors of a previously trained model.\n\n Inherited methods:\n most_similar(self, term, num_similar=5): Return num_similar most similar terms to term in the model\n\n get_similarity(self, term1, term2): Compute cosine similarity of term1 and term2\n\n evaluate_word_pairs(self, word_pairs, delimiter, case_insensitive): Compute Spearman coeff. and \n out-of-vocab ratio for a model\n\n synonym_dict_from_string(self, source_text, use_iknow_entities=True, num_similar=5):\n Get a dictionary of synonyms, where each key is a term in the source_text string and each\n entry is the top num_similar most similar words to that key in the model. \n\n synonym_dict_from_file(self, source_text, use_iknow_entities=True, num_similar=5)\n Get a dictionary of synonyms, where each key is a term in the source_text FILE and each\n entry is the top num_similar most similar words to that key in the model. \n \"\"\"\n __PATH_PREFIX__ = os.path.join(os.path.dirname(__file__),'models', 'word2vec', 'vectors')\n\n def __init__(self, pmodel_name):\n \n self.load_vectors(pmodel_name)\n\n self.model_name = pmodel_name # Keeps track of what model is at use\n\n\n def load_vectors(self, pmodel_name):\n \"\"\" Loads the VECTORS of an already trained model. It is much quicker and \n less cumbersome to use just vectors than to use the model itself, but\n still comes with the various important syntactic/semantic tools.\n\n If the vectors of the specified model are not found but another model's vectors\n are already loaded, this instance will continue to use the already loaded vectors.\n \n Parameters\n -----------\n pmodel_name (str) - Name of the model to load vectors from\n\n Throws\n -----------\n FileNotFoundError - if specified model (pmodel_name) is not found\n \"\"\"\n\n try:\n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n self.wordvectors = KV.load_word2vec_format(os.path.join(self.__PATH_PREFIX__, pmodel_name), binary=True)\n except FileNotFoundError as err:\n raise FileNotFoundError(\"Model with name {} not found.\".format(pmodel_name[:-4])) from err\n\n\n\n\nclass IKSimilarityModeling():\n\n @classmethod\n def create_new_model(cls, corpus_path, model, epochs=5, use_iknow_entities=True, tokenize_concepts=True):\n print('Building vocabulary...\\n')\n # build vocabulary\n try:\n corpus_path = SentenceIterator(corpus_path=corpus_path, use_iknow_entities=use_iknow_entities, tokenize_concepts=tokenize_concepts)\n model.build_vocab(corpus_iterable=corpus_path)\n except FileNotFoundError as err:\n raise FileNotFoundError('No corpus found at %s' % corpus_path) from err\n except RuntimeError as err:\n raise RuntimeError(\"Model could not be trained. If you are attempting to continue training an exisiting model, call update_model()\") from err\n \n print('Finished building vocabulary.\\n')\n print('Training model...\\n')\n # train the model\n model.train(\n corpus_iterable=corpus_path, epochs=epochs, total_examples=model.corpus_count\n )\n\n print('Finished training model.\\n')\n\n @classmethod\n def update_model(cls, corpus_path, model, use_iknow_entities, tokenize_concepts):\n # Must update the vocabulary of unique words in the corpus prior to training\n # Note that you MUST pass in update=True to not destroy existing form of the model\n sentences = SentenceIterator(corpus_path, use_iknow_entities=use_iknow_entities, tokenize_concepts=tokenize_concepts)\n\n model.build_vocab(corpus_iterable=sentences, update=True)\n \n model.train(\n corpus_iterable=sentences, total_examples=model.corpus_count, \n epochs=model.epochs\n )\n\n \nclass IKFastTextModeling(IKSimilarityModeling):\n \"\"\" Subclass of IKSimilarityModeling\n\n Contains methods to train (create new) and retrain (update existing) models.\n\n Methods (class methods):\n create_new_model(cls, corpus_path, pmodel_name, epochs=5, pmin_count=10, psize=150):\n Creates a new model pmodel_name, trained on corpus found at corpus_path. Makes epochs number of passes\n over the corpus. Words in the corpus must appear pmin_count times to be considered. Vectors will be of \n dimension psize.\n\n update_model(cls, corpus_path, pmodel_name, use_iknow_entities=True, tokenize_concepts=True):\n Updates the model pmodel_name, training on corpus found at corpus_path. If use_iknow_entities, will use \n iKnow entities in training. If tokenize_concepts, will turn iKnow entities into singular tokens, joined by underscores\n (_).\n\n NOTE: Update is currently non-functional.\n \"\"\"\n\n __PATH_PREFIX__ = os.path.join(os.path.dirname(__file__),'models', 'fasttext')\n\n\n @classmethod\n def create_new_model(cls, corpus_path, pmodel_name, epochs=5, pmin_count=10, psize=150):\n \"\"\" Creates and trains (and optionally saves) a model using gensim's implementation \n of the fastText algorithm, and then loads the KeyedVectors associated with that model.\n \n For CREATION/first time training only. To continue training an already existing\n model, use update_model().\n\n Parameters\n -----------\n corpus_path (str) - path to the corpus you wish to train the model with\n \n pmodel_name (str) - the name to be assigned to the model when saved. Must be unique\n or error will be raised to avoid overwriting an existing model\n\n epochs (int, optional) - Number of times to iterate over training corpus during training\n\n pmin_count (int, optional) - Minimum frequency for a word to be used in training\n\n psize (int, optional) - Size of vectors for training\n\n Returns:\n -----------\n True if model created/trained, False if could not be created\n\n Throws\n -----------\n FileNotFoundError - If corpus_path not found\n RuntimeError - If training an already existing model that makes it past first if statement. This\n is because build_vocab raises RuntimeError if building existing vocab without update=True (see update_model)\n \"\"\"\n\n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n\n if os.path.exists(os.path.join(IKFastTextModeling.__PATH_PREFIX__, pmodel_name)):\n raise FileExistsError(\"Model named {} already exists, model could not be created\".format(pmodel_name[:-4]))\n \n model = ft.FastText(vector_size=psize, sg=1, min_count=pmin_count)\n\n super().create_new_model(corpus_path, model, epochs)\n\n ft.save_facebook_model(model, path=os.path.join(IKFastTextModeling.__PATH_PREFIX__, pmodel_name))\n return True\n\n\n @classmethod\n def update_model(cls, corpus_path, pmodel_name, use_iknow_entities=True, tokenize_concepts=True):\n \"\"\" Updates an already existing model by continuing its training\n on a new corpus.\n\n Parameters\n -----------\n corpus_path (str) - path to the corpus being used to update the model\n \n pmodel_name (str, optional) - The name of the model to be updated, defaults to the\n model currently in use\n\n Return\n -----------\n True if model was updated, else False\n\n Throws\n -----------\n FileNotFoundError - if corpus or model not found\n \"\"\"\n\n try:\n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n path = os.path.join(IKFastTextModeling.__PATH_PREFIX__, pmodel_name)\n model = ft.load_facebook_model(path)\n\n super().update_model(corpus_path, model, use_iknow_entities, tokenize_concepts)\n\n # Clear current contents of folders storing model and KeyedVectors files as gensim doesn't do it\n os.remove(path)\n \n ft.save_facebook_model(model, path=path)\n \n except FileNotFoundError as err:\n raise FileNotFoundError(\"Model could not be updated, check specified corpus and model names\") from err\n\n\n\nclass IKWord2VecModeling(IKSimilarityModeling):\n \"\"\" Subclass of IKSimilarityModeling\n\n Contains methods to train (create new) and retrain (update existing) models.\n\n Methods (class methods):\n create_new_model(cls, corpus_path, pmodel_name, updateable=True, epochs=5, pmin_count=5, psize=300):\n Creates a new model pmodel_name, trained on corpus found at corpus_path. If updateable, will save the entire model\n to be reloaded for continued training (updating) at a later time, otherwise only save vectors. Makes epochs number of passes\n over the corpus. Words in the corpus must appear pmin_count times to be considered. Vectors will be of \n dimension psize.\n\n update_model(cls, corpus_path, pmodel_name, use_iknow_entities=True, tokenize_concepts=True):\n Updates the model pmodel_name, training on corpus found at corpus_path. If use_iknow_entities, will use \n iKnow entities in training. If tokenize_concepts, will turn iKnow entities into singular tokens, joined by underscores\n (_).\n \"\"\"\n \n __MODEL_PATH_PREFIX__ = os.path.join(os.path.dirname(__file__),'models', 'word2vec', 'trained_models')\n __VECTOR_PATH_PREFIX__ = os.path.join(os.path.dirname(__file__),'models', 'word2vec', 'vectors')\n\n \n @classmethod\n def create_new_model(cls, corpus_path, pmodel_name, updateable=True, epochs=5, pmin_count=5, psize=150):\n \"\"\" Creates and trains (and optionally saves) a model using gensim's implementation \n of the fastText algorithm, and then loads the KeyedVectors associated with that model.\n \n For CREATION/first time training only. ONLY VECTORS CAN BE STORED CURRENTLY. The storage\n required to store entire trained models is large. \n\n Parameters\n -----------\n corpus_path (str) - path to the corpus you wish to train the model with\n \n pmodel_name (str) - the name to be assigned to the model when saved. Must be unique\n or the method will return without creating the model to avoid overwriting an existing model\n\n Returns:\n -----------\n True if model created/trained, False if could not be created\n\n Throws\n -----------\n FileNotFoundError - If corpus_path not found\n RuntimeError - If training an already existing model that makes it past first if statement. This\n is because build_vocab raises RuntimeError if building existing vocab without update=True (see update_model)\n \"\"\"\n \n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n\n # check if same name model exists by checking for vectors because vectors are always saved\n if os.path.exists(os.path.join(IKWord2VecModeling.__VECTOR_PATH_PREFIX__, pmodel_name)):\n raise FileExistsError(\"Model named {} already exists, model could not be created\".format(pmodel_name[:-4]))\n \n model = Word2Vec(vector_size=psize, sg=1, min_count=pmin_count)\n\n super().create_new_model(corpus_path, model, epochs)\n\n if updateable:\n model.save(os.path.join(IKWord2VecModeling.__MODEL_PATH_PREFIX__, pmodel_name[:-4]))\n\n model.wv.save_word2vec_format(os.path.join(IKWord2VecModeling.__VECTOR_PATH_PREFIX__, pmodel_name), binary=True)\n return True\n\n\n @classmethod\n def update_model(cls, corpus_path, pmodel_name, use_iknow_entities=True, tokenize_concepts=True):\n \"\"\" Updates an already existing model by continuing its training\n on a new corpus.\n\n Parameters\n -----------\n corpus_path (str) - path to the corpus being used to update the model\n \n pmodel_name (str, optional) - The name of the model to be updated, defaults to the\n model currently in use\n\n Return\n -----------\n True if model was updated, else False\n\n Throws\n -----------\n FileNotFoundError - if corpus or model not found\n \"\"\"\n\n try:\n if pmodel_name[-4:] != '.bin':\n pmodel_name = pmodel_name + '.bin'\n model = Word2Vec.load(os.path.join(IKWord2VecModeling.__MODEL_PATH_PREFIX__, pmodel_name[:-4]))\n \n super().update_model(corpus_path, model, use_iknow_entities, tokenize_concepts)\n\n # Clear current contents of folders storing model and KeyedVectors files as gensim doesn't do it\n os.remove(os.path.join(IKWord2VecModeling.__MODEL_PATH_PREFIX__, pmodel_name[:-4]))\n os.remove(os.path.join(IKWord2VecModeling.__VECTOR_PATH_PREFIX__, pmodel_name))\n \n model.save(fname_or_handle=os.path.join(IKWord2VecModeling.__MODEL_PATH_PREFIX__, pmodel_name[:-4]))\n model.wv.save_word2vec_format(os.path.join(IKWord2VecModeling.__VECTOR_PATH_PREFIX__, pmodel_name), binary=True)\n \n except FileNotFoundError as err:\n raise FileNotFoundError(\"Model could not be updated, check specified corpus and model names\") from err\n\n\n\nclass SentenceIterator(object):\n \"\"\" An iterator to handle a training corpus that is on multiple files with a given\n directory. \n\n This is necessary to interface with gensim when your corpus is split across files.\n\n Parameters\n -----------\n corpus_path (str) - A path to a file or a directory containing multiple corpus files\n\n use_iknow_entities (bool) - If True, preprocess lines of the corpus using iKnow enginge so that\n only path relevant entities will be included for training\n\n tokenize_concepts (bool) - If True, when preprocessing lines from the corpus with iKnow engine, \n replace concepts with singular tokens (e.g. heart attack -> heart_attack)\n \"\"\"\n def __init__(self, corpus_path, use_iknow_entities, tokenize_concepts):\n self.corpus_path = corpus_path\n self.is_dir = os.path.isdir(corpus_path)\n if use_iknow_entities:\n iknowpy = __import__(\"iknowpy\") # not a global import to avoid making user install iknowpy unless they need to use functionality\n self.use_iknow_entities = use_iknow_entities\n self.tokenize_concepts = tokenize_concepts\n self.engine = iknowpy.iKnowEngine()\n \n def __iter__(self):\n if self.is_dir:\n for fname in os.listdir(self.corpus_path):\n for line in open(os.path.join(self.corpus_path, fname)):\n if self.use_iknow_entities:\n self.engine.index(line, 'en')\n for s in self.engine.m_index['sentences']:\n sent = []\n for p in s['path']:\n if s['entities'][p]['type'] == 'Concept' and self.tokenize_concepts:\n sent.append(s['entities'][p]['index'].replace(\" \", \"_\")) \n else:\n sent.append(s['entities'][p]['index'])\n yield sent\n else:\n yield line.split()\n else:\n for line in open(self.corpus_path):\n if self.use_iknow_entities:\n self.engine.index(line, 'en')\n sent = []\n for s in self.engine.m_index['sentences']:\n for p in s['path']:\n if s['entities'][p]['type'] == 'Concept' and self.tokenize_concepts:\n sent.append(s['entities'][p]['index'].replace(\" \", \"_\")) \n else:\n sent.append(s['entities'][p]['index'])\n else:\n yield line.split()", "id": "5228509", "language": "Python", "matching_score": 3.751188278198242, "max_stars_count": 0, "path": "synonymdetection/iksimilarity.py" }, { "content": "import csv\nfrom iksimilarity import IKFastTextTools as IKST, IKWord2VecTools as IKW2V\nimport os\n\n\"\"\" This script will run through 1 or more saved models and evaluate their performance.\n To do so, the code makes use of a few publically available datasets that provide multiple word pairs \n along with average rankings of similarity/relatedness from human subjects. For each such dataset, the \n Spearman correlation coefficient is computed by comparing the human scores for each pair to the scores\n given by the model for each pair. The datasets contain different pairs.\n\n To evaluate model(s), simply run this script. When run, it will list out the names of all models saved \n on disk. It then prompts you, where you can input one model name or a few of the given model names\n separated by commas. \n e.g:\n my_model\n my_first_model, my_second_model\n \n Since this script was written to test performance of a number of different models, it is capable of testing both\n fastText and Word2Vec models. As such, it makes some assumptions: if a model name contains 'w2v' (in some form), \n it will treat the model as a Word2Vec model, otherwise it will treat it as fastText. \n\n To be tested, the model must have vectors saved in the models/word2vec/keyed_vectors directory (for Word2Vec models) or\n in the models/fasttext directory (for fastText models). If the models being tested were created through the iksimilarity\n module, this is where models of each type are stored.\n\n The datasets included are cited below.\n\n SimLex-999 (SimLex-999: Evaluating Semantic Models with (Genuine) Similarity Estimation. 2014. <NAME>, <NAME> and <NAME>.)\n MTURK-771 (<NAME>, <NAME>, <NAME>, <NAME>: Large-scale learning of word relatedness with constraints. KDD 2012: 1406-1414)\n wordsim353 (<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>oroa, A Study on Similarity and Relatedness Using Distributional and WordNet-based Approaches, In Proceedings of NAACL-HLT 2009.)\n Stanford Rare Words Dataset (Luong, Minh-Thang and Socher, Richard and Manning, <NAME>. Better Word Representations with Recursive Neural Networks for Morphology. 2013)\n MEN Test Collection (Multimodal Distributional Semantics <NAME>, <NAME> and <NAME>. Journal of Artificial Intelligence Research 49: 1-47.)\n\n Output\n -----------\n A csv file (for each model) with the correlation coeff. for each dataset used for comparison, saved in the\n datasets directory.\n\n Note that Spearman coefficients are only one way to evaluate such models, they are not certain measures. \n\"\"\"\n\ndef main():\n print(\"Current models available for evaluation:\\n\")\n for folder in os.listdir('models/word2vec/keyed_vectors'):\n if not folder.startswith('.'):\n print(folder)\n for folder in os.listdir('models/fasttext'):\n if not folder.startswith('.'):\n print(folder)\n models = input(\"\\nInput one of the above model names, or multiple names separated by commas. For all, enter 'a': \").split(',')\n if models[0] == 'a':\n models = os.listdir('models/word2vec/keyed_vectors')\n for model_name in models:\n if model_name.startswith('.') or model_name.endswith('.txt'): # If using all models, ignore hidden files in the dir and ignore txt files\n continue\n model_name = model_name.strip()\n if 'w2v' in model_name.lower():\n test_tool = IKW2V(pmodel_name=model_name)\n else:\n test_tool = IKST(pmodel_name=model_name)\n ws353 = test_dataset(test_tool, 'ws353')\n ws353_sim = test_dataset(test_tool, 'ws353_s')\n ws353_rel = test_dataset(test_tool, 'ws353_r')\n rw = test_dataset(test_tool, 'rw')\n men = test_dataset(test_tool, 'men')\n mturk771 = test_dataset(test_tool, 'mturk771')\n simlex999 = test_dataset(test_tool, 'simlex999')\n outputfile = 'datasets/testoutput{}.csv'.format(model_name)\n with open(outputfile, 'a+', newline='') as output:\n writer = csv.writer(output, delimiter=',')\n rows = [['Dataset', 'Spearman', 'OOV Ratio'],['WordSim 353', ws353[0], ws353[1]],['WordSim 353 Similarity', ws353_sim[0], ws353_sim[1]],\n ['WordSim 353 Relatedness', ws353_rel[0], ws353_rel[1]], ['Rare Words', rw[0], rw[1]], ['MEN', men[0], men[1]], ['MTURK-771', mturk771[0], mturk771[1]], \n ['SimLex-999', simlex999[0], simlex999[1]]]\n writer.writerows(rows)\n print(\"Evaluation of model %s complete, see datasets folder for output.\" % model_name)\n\ndef test_dataset(test_tool, dataset):\n if dataset == 'ws353_s':\n word_pairs = 'datasets/wordsim353_sim_rel/wordsim_similarity_goldstandard.txt'\n delim = '\\t'\n elif dataset == 'ws353_r': \n word_pairs = 'datasets/wordsim353_sim_rel/wordsim_relatedness_goldstandard.txt'\n delim = '\\t'\n elif dataset == 'ws353':\n word_pairs = 'datasets/wordsim353_sim_rel/wordsim353_agreed.txt'\n delim = '\\t'\n elif dataset == 'rw':\n word_pairs = 'datasets/rw/rw.txt'\n delim = '\\t'\n elif dataset == 'men':\n word_pairs = 'datasets/MEN/MEN_dataset_natural_form_full'\n delim = ' '\n elif dataset == 'mturk771':\n word_pairs = 'datasets/MTURK-771.csv'\n delim = ','\n elif dataset == 'simlex999':\n word_pairs = 'datasets/SimLex-999/SimLex-999.txt'\n delim = '\\t'\n\n result = test_tool.evaluate_word_pairs(word_pairs=word_pairs, delimiter=delim, case_insensitive=True)\n\n return (result[0], result[1])\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "1444594", "language": "Python", "matching_score": 0.3511701822280884, "max_stars_count": 0, "path": "synonymdetection/modelevaluation.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 8 16:48:22 2021\n@author: sdebergh\n\n# This Python file uses the following encoding: utf-8\n\n find_examples_for_rule.py is a tool to find sentences in which a given rule is applied. \n Usage: \"python find_examples_for_rule.py <text files directory> <output file> <language> <rule number>\"\n Example (on Windows): \"python find_examples_for_rule.py C:\\TextCorpus\\English\\Financial\\ C:\\output\\ en 531\"\n -> find examples for rule 531 of the English language model\n\"\"\"\n\nimport sys, os\n\n# do \"pip install iknowpy\" if iknowpy is not installed\nimport iknowpy\n\n# read command line\nin_path_par = sys.argv[1]\nout_path_par = sys.argv[2]\nlanguage_par = sys.argv[3]\nrule_number = sys.argv[4]\n\n\n# initiate variables \nmapping_file = language_par + \"_compiler_report.log\" # detect applicable xx_compiler_report.log based on language code\nmapping_table = {} \nf_rec = []\nengine = iknowpy.iKnowEngine()\n \n\nprint('Looking for examples for rule ' + rule_number + ' of the ' + language_par + ' language model in ' + in_path_par)\n\n\n# functions\n# add a line in the output file\ndef write_ln(file_,text_):\n file_.write((text_+\"\\r\\n\").encode('utf8'))\n\n# create a mapping table for rule numbers, based on xx_compiler_report.log\ndef create_mapping_table(mapping_file):\n read_mapping_file = open(mapping_file, encoding='utf-8')\n for line in read_mapping_file:\n if line != '\\n':\n mapping = line.split()[0]\n if ':' in mapping:\n mapping_table[mapping.split(':')[0]] = mapping.split(':')[1]\n\n# find the matching number in the mapping table\ndef extract_rule_id(rule_order):\n rule_id = mapping_table[rule_order]\n return rule_id\n\n\n\n# make a list of input file (recursive list of files, .txt only) - copied from https://stackoverflow.com/questions/18394147/recursive-sub-folder-search-and-return-files-in-a-list-python\nf_rec = [os.path.join(dp, f) for dp, dn, filenames in os.walk(in_path_par) for f in filenames if\n os.path.splitext(f)[1].lower() == '.txt']\n\n\n# create mapping table for rule numbers\ncreate_mapping_table(mapping_file)\n\n\n# open output file and add UTF-8 BOM and information about the content of the file\nif os.path.exists(out_path_par):\n os.remove(out_path_par)\nf_output = open(out_path_par, \"ab\")\nf_output.write(b'\\xef\\xbb\\xbf') # Utf8 BOM\nwrite_ln(f_output, 'Examples for rule ' + rule_number + ' of the ' + language_par + ' language model in ' + in_path_par + '\\n')\n\n# read input files one by one\nfor text_file in f_rec:\n print('processing ' + text_file)\n f_text = open(text_file, \"rb\")\n header = f_text.read(3)\n if (header == b'\\xef\\xbb\\xbf'): # check for Utf8 BOM\n header = b'' # remove BOM\n text = header + f_text.read() # read text, must be utf8 encoded\n text = text.decode('utf8') # decode text to Unicode\n f_text.close()\n\n # index input file\n engine.index(text, language_par, traces=True)\n\n # read trace output\n for trace in engine.m_traces:\n# print(trace)\n key, value = trace.split(':', 1)[0],trace.split(':', 1)[1]\n # store the sentence\n if (key == \"SentenceFound\"):\n Sentence = value.split('\"')[7]\n # check if the demanded rule is applied to process the sentence \n elif (key == \"RuleApplication\"):\n # rule_id in trace refers actually to rule order -> retrieve rule order value\n rule_order = value.split(';')[0].split('=')[1]\n # extract the number that corresponds to the rule id in rules.csv from compiler_report.log\n rule_id = extract_rule_id(rule_order)\n # if the rule id corresponds to the demanded rule number, look for the concerned lexreps \n if rule_id == rule_number:\n lexreps = value.split(';')[3:]\n lexreps = str(lexreps)\n lexreps_indexes = ''\n while 'index=' in lexreps:\n lexreps_indexes = lexreps_indexes + ' ' + lexreps[lexreps.find('index=\\\"')+7:lexreps.find('labels=')-2]\n lexreps = lexreps[lexreps.find('labels=')+7:] # cut off left part of lexreps information in order to julp to the next lexrep\n # add the concerned lexrep(s) and the sentence to the output\n #print(lexreps_indexes.lstrip())\n write_ln(f_output, lexreps_indexes.lstrip() + ';' + Sentence)\n\n\nf_output.close()", "id": "7495452", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "language_development/find_examples_for_rule.py" }, { "content": "import setuptools\n\n\n# For compatibility with build front-ends that don't support PEP 517\nsetuptools.setup()\n", "id": "7343168", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "setup.py" } ]
0.175585
7a6163
[ { "content": "import os\n\nimport click\nfrom tabulate import tabulate\n\nfrom kobodl import actions, cli\nfrom kobodl.globals import Globals\n\n\ndef decorators(book):\n append = ''\n if book.Audiobook:\n append += ' (🎧 Audiobook)'\n if book.Archived:\n append += ' (🗄️ Archived)'\n return append\n\n\[email protected](name='book', short_help='list and download books')\ndef book():\n pass\n\n\[email protected](name='get', short_help='download book')\[email protected](\n '-u',\n '--user',\n type=click.STRING,\n help='Required when multiple accounts exist. Use either Email or UserKey',\n)\[email protected](\n '-o',\n '--output-dir',\n type=click.Path(file_okay=False, dir_okay=True, writable=True),\n default='kobo_downloads',\n)\[email protected]('-a', '--get-all', is_flag=True)\[email protected]('product-id', nargs=-1, type=click.STRING)\[email protected]_obj\ndef get(ctx, user, output_dir, get_all, product_id):\n if len(Globals.Settings.UserList.users) == 0:\n click.echo('error: no users found. Did you `kobodl user add`?', err=True)\n exit(1)\n\n if not user:\n if len(Globals.Settings.UserList.users) > 1:\n click.echo('error: must provide --user option when more than 1 user exists.')\n exit(1)\n # Exactly 1 user account exists\n usercls = Globals.Settings.UserList.users[0]\n else:\n # A user was passed\n usercls = Globals.Settings.UserList.getUser(user)\n if not usercls:\n click.echo(f'error: could not find user with name or id {user}')\n exit(1)\n\n if get_all and len(product_id):\n click.echo(\n 'error: cannot pass product IDs when --get-all is used. Use one or the other.',\n err=True,\n )\n exit(1)\n if not get_all and len(product_id) == 0:\n click.echo('error: must pass at least one Product ID, or use --get-all', err=True)\n exit(1)\n\n os.makedirs(output_dir, exist_ok=True)\n if get_all:\n actions.GetBookOrBooks(usercls, output_dir)\n else:\n for pid in product_id:\n output = actions.GetBookOrBooks(usercls, output_dir, productId=pid)\n\n\[email protected](name='list', help='list books')\[email protected](\n '-u',\n '--user',\n type=click.STRING,\n required=False,\n help='Limit list to a single user. Use either Email or UserKey',\n)\[email protected]('--read', is_flag=True, help='include books marked as read')\[email protected](\n '--export-library',\n type=click.File(mode='w'),\n help='filepath to write raw JSON library data to.',\n)\[email protected]_obj\ndef list(ctx, user, read, export_library):\n userlist = Globals.Settings.UserList.users\n if user:\n userlist = [Globals.Settings.UserList.getUser(user)]\n books = actions.ListBooks(userlist, read, export_library)\n headers = ['Title', 'Author', 'RevisionId', 'Owner']\n data = sorted(\n [\n (book.Title + decorators(book), book.Author, book.RevisionId, book.Owner.Email,)\n for book in books\n ]\n )\n click.echo(tabulate(data, headers, tablefmt=ctx['fmt']))\n\n\ncli.add_command(book)\n", "id": "8170829", "language": "Python", "matching_score": 3.9755208492279053, "max_stars_count": 0, "path": "kobodl/commands/book.py" }, { "content": "import click\nfrom tabulate import tabulate\n\nfrom kobodl import actions, cli\nfrom kobodl.globals import Globals\nfrom kobodl.kobo import Kobo\nfrom kobodl.settings import User\n\n\[email protected](name='user', short_help='show and create users')\ndef user():\n pass\n\n\[email protected](name='list', help='list all users')\[email protected]_obj\ndef list(ctx):\n userlist = Globals.Settings.UserList.users\n headers = ['Email', 'UserKey', 'DeviceId']\n data = sorted([(user.Email, user.UserKey, user.DeviceId,) for user in userlist])\n click.echo(tabulate(data, headers, tablefmt=ctx['fmt']))\n\n\[email protected](name='rm', help='remove user by Email, UserKey, or DeviceID')\[email protected]('identifier', type=click.STRING)\[email protected]_obj\ndef list(ctx, identifier):\n removed = Globals.Settings.UserList.removeUser(identifier)\n if removed:\n Globals.Settings.Save()\n click.echo(f'Removed {removed.Email}')\n else:\n click.echo(f'No user with email, key, or device id that matches \"{identifier}\"')\n\n\[email protected](name='add', help='add new user')\[email protected]('--email', prompt=True, hide_input=False, type=click.STRING, help=\"kobo.com email.\")\[email protected]_option(help=\"kobo.com password (not stored)\")\[email protected]_obj\ndef add(ctx, email, password):\n user = User(Email=email)\n click.echo(\n \"\"\"\n Open https://authorize.kobo.com/signin in a private/incognito window in your browser, wait till the page\n loads (do not login!) then open the developer tools (use F12 in Firefox/Chrome), select the console tab,\n and paste the following code there and then press Enter there in the browser.\n\n var newCaptchaDiv = document.createElement( \"div\" );\n newCaptchaDiv.id = \"new-grecaptcha-container\";\n document.getElementById( \"grecaptcha-container\" ).insertAdjacentElement( \"afterend\", newCaptchaDiv );\n grecaptcha.render( newCaptchaDiv.id, {\n sitekey: \"<KEY>\",\n callback: function( response ) { console.log( \"Captcha response:\" ); console.log( response ); }\n } );\n\n A captcha should show up below the Sign-in form. Once you solve the captcha its response will be written\n below the pasted code in the browser's console. Copy the response (the line below \"Captcha response:\")\n and paste it here.\n \"\"\"\n )\n captcha = input('Captcha response: ').strip()\n actions.Login(user, password, captcha)\n Globals.Settings.UserList.users.append(user)\n Globals.Settings.Save()\n click.echo('Login Success. Try to list your books with `kobodl book list`')\n\n\ncli.add_command(user)\n", "id": "11232985", "language": "Python", "matching_score": 0.8041431307792664, "max_stars_count": 0, "path": "kobodl/commands/user.py" }, { "content": "import os\n\nfrom setuptools import find_packages, setup\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='kobodl',\n author='<NAME>',\n version='0.5.0',\n author_email='<EMAIL>',\n url=\"https://github.com/subdavis/kobo-book-downloader\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'bs4',\n 'click',\n 'dataclasses',\n 'dataclasses-json',\n 'flask',\n 'pycryptodome',\n 'requests',\n 'tabulate',\n ],\n license='MIT',\n entry_points={'console_scripts': ['kobodl = kobodl:cli'],},\n python_requires='>=3.6',\n setup_requires=['setuptools-git'],\n)\n", "id": "9340356", "language": "Python", "matching_score": 1.0601379871368408, "max_stars_count": 0, "path": "setup.py" }, { "content": "import click\nfrom plex_trakt_sync.factory import factory\nfrom plex_trakt_sync.version import git_version_info\n\n\[email protected]()\[email protected]('input')\ndef inspect(input):\n \"\"\"\n Inspect details of an object\n \"\"\"\n\n git_version = git_version_info() or 'Unknown version'\n print(f\"PlexTraktSync inspect [{git_version}]\")\n\n plex = factory.plex_api()\n trakt = factory.trakt_api()\n\n if input.isnumeric():\n input = int(input)\n\n m = plex.fetch_item(input)\n print(f\"Inspecting: {m}\")\n\n url = plex.media_url(m)\n print(f\"URL: {url}\")\n\n media = m.item\n print(f\"Media.Guid: '{media.guid}'\")\n print(f\"Media.Guids: {media.guids}\")\n\n if media.type in [\"episode\", \"movie\"]:\n audio = media.media[0].parts[0].audioStreams()[0]\n print(f\"Audio: '{audio.audioChannelLayout}', '{audio.displayTitle}'\")\n\n video = media.media[0].parts[0].videoStreams()[0]\n print(f\"Video: '{video.codec}'\")\n\n print(\"Guids:\")\n for guid in m.guids:\n print(f\" Guid: {guid}, Id: {guid.id}, Provider: {guid.provider}\")\n\n print(f\"Metadata: {m.to_json()}\")\n\n try:\n tm = trakt.find_by_media(m)\n print(f\"Trakt match: {tm}\")\n except Exception as e:\n print(f\"Error: {e}\")\n", "id": "2267832", "language": "Python", "matching_score": 0.7118090987205505, "max_stars_count": 0, "path": "plex_trakt_sync/commands/inspect.py" }, { "content": "import dataclasses\nimport os\nfrom typing import List, Union\n\nfrom dataclasses_json import dataclass_json\n\n\n@dataclass_json\[email protected]\nclass User:\n Email: str\n DeviceId: str = \"\"\n AccessToken: str = \"\"\n RefreshToken: str = \"\"\n UserId: str = \"\"\n UserKey: str = \"\"\n\n def AreAuthenticationSettingsSet(self) -> bool:\n return len(self.DeviceId) > 0 and len(self.AccessToken) > 0 and len(self.RefreshToken) > 0\n\n def IsLoggedIn(self) -> bool:\n return len(self.UserId) > 0 and len(self.UserKey) > 0\n\n\n@dataclass_json\[email protected]\nclass UserList:\n users: List[User] = dataclasses.field(default_factory=list)\n\n def getUser(self, identifier: str) -> Union[User, None]:\n for user in self.users:\n if (\n user.Email == identifier\n or user.UserKey == identifier\n or user.DeviceId == identifier\n ):\n return user\n return None\n\n def removeUser(self, identifier: str) -> Union[User, None]:\n \"\"\"returns the removed user\"\"\"\n user = self.getUser(identifier)\n if user:\n i = self.users.index(user)\n return self.users.pop(i)\n return None\n\n\nclass Settings:\n def __init__(self, configpath=None):\n self.SettingsFilePath = configpath or Settings.__GetCacheFilePath()\n self.UserList = self.Load()\n\n def Load(self) -> UserList:\n if not os.path.isfile(self.SettingsFilePath):\n return UserList()\n with open(self.SettingsFilePath, \"r\") as f:\n jsonText = f.read()\n return UserList.from_json(jsonText)\n\n def Save(self) -> None:\n with open(self.SettingsFilePath, \"w\") as f:\n f.write(self.UserList.to_json(indent=4))\n\n @staticmethod\n def __GetCacheFilePath() -> str:\n cacheHome = os.environ.get(\"XDG_CONFIG_HOME\")\n if (cacheHome is None) or (not os.path.isdir(cacheHome)):\n home = os.path.expanduser(\"~\")\n cacheHome = os.path.join(home, \".config\")\n if not os.path.isdir(cacheHome):\n cacheHome = home\n\n return os.path.join(cacheHome, \"kobodl.json\")\n", "id": "12274932", "language": "Python", "matching_score": 1.5518443584442139, "max_stars_count": 1, "path": "kobodl/settings.py" }, { "content": "import os\n\nfrom flask import Flask, abort, redirect, render_template, request, send_from_directory\n\nfrom kobodl import actions\nfrom kobodl.globals import Globals\nfrom kobodl.settings import User\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return redirect('/user')\n\n\[email protected]('/user', methods=['GET', 'POST'])\ndef users():\n error = None\n if request.method == 'POST':\n print(request.form)\n email = request.form.get('email')\n password = request.form.get('password')\n captcha = request.form.get('captcha')\n print(email, password, captcha)\n if email and password and captcha:\n user = User(Email=email)\n try:\n actions.Login(user, password, captcha)\n Globals.Settings.UserList.users.append(user)\n Globals.Settings.Save()\n except Exception as err:\n error = str(err)\n else:\n error = 'email, password, or captcha missing'\n users = Globals.Settings.UserList.users\n return render_template('users.j2', users=users, error=error)\n\n\[email protected]('/user/<userid>/book', methods=['GET'])\ndef getUserBooks(userid):\n user = Globals.Settings.UserList.getUser(userid)\n if not user:\n abort(404)\n books = actions.ListBooks([user], False, None)\n return render_template('books.j2', books=books)\n\n\[email protected]('/user/<userid>/book/<productid>', methods=['GET'])\ndef downloadBook(userid, productid):\n user = Globals.Settings.UserList.getUser(userid)\n if not user:\n abort(404)\n outputDir = app.config.get('output_dir')\n os.makedirs(outputDir, exist_ok=True)\n # GetBookOrBooks always returns an absolute path\n outputFileName = actions.GetBookOrBooks(user, outputDir, productId=productid)\n absOutputDir, tail = os.path.split(outputFileName)\n # send_from_directory must be given an absolute path to avoid confusion\n # (relative paths are relative to root_path, not working dir)\n return send_from_directory(absOutputDir, tail, as_attachment=True, attachment_filename=tail)\n\n\[email protected]('/book', methods=['GET'])\ndef books():\n userlist = Globals.Settings.UserList.users\n books = actions.ListBooks(userlist, False, None)\n return render_template('books.j2', books=books)\n", "id": "2937650", "language": "Python", "matching_score": 1.7207893133163452, "max_stars_count": 1, "path": "kobodl/app.py" }, { "content": "from kobodl.settings import Settings\n\n\nclass Globals:\n Settings = None\n", "id": "4936628", "language": "Python", "matching_score": 0.31405413150787354, "max_stars_count": 1, "path": "kobodl/globals.py" } ]
1.060138
theonedemon
[ { "content": "from ..base_notifier import BaseNotifier\nfrom ..utils import HttpClient\n\n\nclass TelegramNotifier(BaseNotifier):\n \"\"\"Telegram bot notifier. See instructions how to create bot at https://core.telegram.org/bots/api\"\"\"\n\n alias: str = 'telegram'\n url: str = 'https://api.telegram.org/bot'\n\n def __init__(self, token: str, chat_id: str):\n \"\"\"\n :param token: Telegram's bot token\n :param chat_id: Telegram's chat ID\n\n \"\"\"\n self.token = token\n self.chat_id = chat_id\n self.client = HttpClient(\n silence_exceptions=True,\n dump_fname_tpl=f'%(ts)s_{self.__class__.__name__}.json',\n json=True,\n )\n super().__init__()\n\n def make_message(self, torrent_data: dict) -> str:\n return (\n 'The following torrents were updated:\\n%s' %\n '\\n'.join(map(lambda t: t['name'], torrent_data.values())))\n\n def test_configuration(self) -> bool:\n response = self.client.request(f'{self.url}{self.token}/getMe')\n return response.get('ok', False)\n\n def send_message(self, msg: str):\n\n url = f'{self.url}{self.token}/sendMessage'\n\n client = self.client\n json_data = client.request(url, data={'chat_id': self.chat_id, 'text': msg})\n\n if json_data is None:\n self.log_error(f'Failed to send Telegram message: {client.last_error}')\n return\n\n response = client.last_response\n\n if response.ok:\n\n if json_data['ok']:\n self.log_debug(f'Telegram message was sent to user {self.chat_id}')\n\n else:\n self.log_error(f\"Telegram notification not send: {json_data['description']}\")\n\n return\n\n self.log_error(\n 'Telegram notification not sent. '\n f'Response code: {response.status_code} ({response.reason})')\n", "id": "8482017", "language": "Python", "matching_score": 2.893599033355713, "max_stars_count": 82, "path": "torrt/notifiers/telegram.py" }, { "content": "import socket\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP, SMTPAuthenticationError\nfrom typing import Union\n\nfrom ..base_notifier import BaseNotifier\n\n\nclass EmailNotifier(BaseNotifier):\n\n alias: str = 'email'\n\n def __init__(\n self,\n email: str,\n host: str = 'localhost',\n port: Union[str, int] = 25,\n user: str = None,\n password: str = None,\n use_tls: Union[str, bool] = False,\n sender: str = None\n ):\n\n self.email = email\n self.sender = sender\n\n self.host = host\n self.port = int(port)\n self.user = user\n self.password = password\n self.use_tls = str(use_tls) == 'True'\n\n self.connection = self.get_connection()\n\n super().__init__()\n\n def get_connection(self):\n\n try:\n connection = SMTP(self.host, self.port)\n connection.ehlo()\n\n except socket.error as e:\n self.log_error(f'Could not connect to SMTP server: {e}')\n return\n\n if self.use_tls:\n\n try:\n connection.starttls()\n connection.ehlo()\n\n except Exception as e:\n\n self.log_error(f'{e}')\n return\n\n if self.user and self.password:\n\n try:\n connection.login(self.user, self.password)\n\n except SMTPAuthenticationError as e:\n self.log_error(f'{e}')\n return\n\n return connection\n\n def send_message(self, msg: str):\n self.connection.sendmail(self.sender, [self.email], msg)\n\n def test_configuration(self) -> bool:\n return bool(self.connection)\n\n def make_message(self, torrent_data: dict) -> str:\n\n text = (\n 'The following torrents were updated:\\n'\n '%s\\n\\n'\n 'Best regards,\\n'\n 'torrt.' %\n '\\n'.join(map(lambda torrent: torrent['name'], torrent_data.values()))\n )\n\n msg = MIMEText(text)\n\n msg['Subject'] = 'New torrents were added to download queue.'\n msg['From'] = self.sender\n msg['To'] = self.email\n\n self.log_info(f'Notification message was sent to user {self.email}')\n\n return msg.as_string()\n", "id": "11394286", "language": "Python", "matching_score": 1.5253187417984009, "max_stars_count": 82, "path": "torrt/notifiers/mail.py" }, { "content": "from typing import List, Any\nfrom urllib.parse import urljoin\n\nfrom ..base_rpc import BaseRPC\nfrom ..exceptions import TorrtRPCException\nfrom ..utils import make_soup, TorrentData\n\n\nclass UTorrentRPC(BaseRPC):\n \"\"\"See http://www.utorrent.com/community/developers/webapi for protocol spec details.\n\n idle sign: What a shame - uTorrent API is a load of mess.\n\n \"\"\"\n alias: str = 'utorrent'\n\n token_page_path: str = 'token.html'\n\n def __init__(\n self,\n url: str = None,\n host: str = 'localhost',\n port: int = 8080,\n user: str = None,\n password: str = None,\n enabled: bool = False\n ):\n self.user = user\n self.password = password\n self.enabled = enabled\n self.host = host\n self.port = port\n self.csrf_token = ''\n\n if url is not None:\n self.url = url\n\n else:\n self.url = f'http://{host}:{port}/gui/'\n\n super().__init__()\n\n def login(self):\n\n try:\n response = self.client.request(\n urljoin(self.url, self.token_page_path),\n auth=(self.user, self.password),\n json=False,\n silence_exceptions=False,\n )\n\n self.csrf_token = make_soup(response.text).find(id='token').text\n\n if not self.csrf_token:\n raise UTorrentRPCException('Unable to fetch CSRF token.')\n\n self.logged_in = True\n\n except Exception as e:\n\n self.log_error(f'Failed to login using `{self.url}` RPC: {e}')\n raise UTorrentRPCException(str(e))\n\n def build_params(self, action: str = None, params: dict = None) -> dict:\n\n document = {'action': action}\n\n if params is not None:\n document.update(params)\n\n return document\n\n def get_request_url(self, params: dict) -> str:\n\n rest = []\n join = lambda l: '&'.join(l)\n\n for param_name, param_val in params.items():\n\n if param_val is None:\n continue\n\n val = param_val\n\n if isinstance(param_val, list):\n val = join(param_val)\n\n rest.append(f'{param_name}={val}')\n\n return f'{self.url}?token={self.csrf_token}&{join(rest)}'\n\n def query(self, data: dict, files: dict = None) -> dict:\n\n action = data['action'] or 'list'\n self.log_debug(f'RPC action `{action}` ...', )\n\n if not self.logged_in:\n self.login()\n\n url = self.get_request_url(data)\n\n request_kwargs = {}\n\n if files is not None:\n request_kwargs['files'] = files\n\n try:\n response = self.client.request(\n url=url, auth=(self.user, self.password), **request_kwargs)\n\n if self.client.last_response.status_code != 200:\n raise UTorrentRPCException(response.text.strip())\n\n except Exception as e:\n\n self.log_error(f'Failed to query RPC `{url}`: {e}')\n raise UTorrentRPCException(str(e))\n\n return response\n\n def method_get_torrents(self, hashes: List[str] = None) -> List[dict]:\n\n result = self.query(self.build_params(params={'list': 1}))\n\n torrents_info = []\n\n for torrent_data in result['torrents']:\n hash_ = torrent_data[0]\n\n if hashes is None or hash_ in hashes:\n\n torrents_info.append({\n 'hash': hash_,\n 'name': torrent_data[2],\n 'download_to': torrent_data[26]\n })\n\n return torrents_info\n\n def method_add_torrent(self, torrent: TorrentData, download_to: str = None, params: dict = None) -> Any:\n\n # NB: `download_to` is ignored, as existing API approach to it is crippled.\n file_data = {'torrent_file': ('from_torrt.torrent', torrent.raw)}\n\n return self.query(self.build_params(action='add-file'), file_data)\n\n def method_remove_torrent(self, hash_str: str, with_data: bool = False) -> Any:\n\n action = 'remove'\n\n if with_data:\n action = 'removedata'\n\n return self.query(self.build_params(action=action, params={'hash': hash_str}))\n\n def method_get_version(self) -> str:\n result = self.query(self.build_params(action='getversion'))\n return result['version']['ui_version']\n\n\nclass UTorrentRPCException(TorrtRPCException):\n \"\"\"\"\"\"\n", "id": "1492004", "language": "Python", "matching_score": 5.898915767669678, "max_stars_count": 0, "path": "torrt/rpc/utorrent.py" }, { "content": "from typing import Dict, Any, List\n\nfrom ..base_rpc import BaseRPC\nfrom ..exceptions import TorrtRPCException\nfrom ..utils import base64encode, TorrentData\n\n\nclass DelugeRPC(BaseRPC):\n \"\"\"Requires deluge-webapi plugin to function.\n https://github.com/idlesign/deluge-webapi\n\n \"\"\"\n alias: str = 'deluge'\n\n torrent_fields_map: Dict[str, str] = {\n 'save_path': 'download_to',\n }\n\n def __init__(\n self,\n url: str = None,\n host: str = 'localhost',\n port: int = 8112,\n user: str = None,\n password: str = None,\n enabled: bool = False\n ):\n self.user = user\n self.password = password\n self.enabled = enabled\n self.host = host\n self.port = port\n\n if url is not None:\n self.url = url\n\n else:\n self.url = f'http://{host}:{port}/json'\n\n super().__init__()\n\n def method_login(self) -> bool:\n\n self.log_debug('Logging in ...')\n\n data = self.build_request_payload('auth.login', [self.password])\n\n response = self.query_(data)\n\n if response['result']:\n self.logged_in = True\n return self.method_is_connected()\n\n self.log_error('Login failed')\n\n return False\n\n def method_is_connected(self):\n\n result = self.query(self.build_request_payload('auth.check_session'))\n\n if not result:\n raise DelugeRPCException('Deluge WebUI is not connected to a daemon')\n\n return result\n\n def query_(self, data: dict) -> dict:\n\n response = self.client.request(url=self.url, data=data)\n\n if response is None:\n raise DelugeRPCException(self.client.last_error)\n\n return response\n\n def query(self, data: dict) -> Any:\n\n if not self.logged_in:\n self.method_login()\n\n self.log_debug(f\"RPC method `{data['method']}` ...\")\n\n response = self.query_(data)\n\n if response['error'] is not None:\n raise DelugeRPCException(response['error'])\n\n return response['result']\n\n @staticmethod\n def build_request_payload(method: str, params: list = None) -> dict:\n\n document = {\n 'id': 1,\n 'method': method,\n }\n\n if params is None:\n params = []\n\n document.update({'params': params})\n\n return document\n\n def method_get_torrents(self, hashes: List[str] = None) -> List[dict]:\n\n fields = ['name', 'comment', 'hash', 'save_path']\n\n result = self.query(self.build_request_payload(\n 'webapi.get_torrents', [hashes, fields]))\n\n for torrent_info in result['torrents']:\n self.normalize_field_names(torrent_info)\n\n return result['torrents']\n\n def method_add_torrent(self, torrent: TorrentData, download_to: str = None, params: dict = None) -> Any:\n\n torrent_dump = base64encode(torrent.raw).decode()\n\n return self.query(\n self.build_request_payload(\n 'webapi.add_torrent', [torrent_dump, {'download_location': download_to}]\n )\n )\n\n def method_remove_torrent(self, hash_str: str, with_data: bool = False) -> Any:\n return self.query(self.build_request_payload('webapi.remove_torrent', [hash_str, with_data]))\n\n def method_get_version(self) -> str:\n return self.query(self.build_request_payload('webapi.get_api_version'))\n\n\nclass DelugeRPCException(TorrtRPCException):\n \"\"\"\"\"\"\n", "id": "4079263", "language": "Python", "matching_score": 2.552215337753296, "max_stars_count": 82, "path": "torrt/rpc/deluge.py" }, { "content": "import logging\nfrom time import time\nfrom typing import Optional, List, Dict\n\nfrom .base_tracker import GenericPrivateTracker\nfrom .exceptions import TorrtException, TorrtRPCException\nfrom .utils import (\n RPCClassesRegistry, TrackerClassesRegistry, config, get_url_from_string,\n get_iso_from_timestamp, import_classes, structure_torrent_data, get_torrent_from_url, iter_rpc,\n NotifierClassesRegistry, iter_notifiers, BotClassesRegistry, iter_bots, configure_entity,\n TorrentData\n)\n\ntry:\n from envbox import get_environment\n # Allow env vars from .env files.\n environ = get_environment()\n\nexcept ImportError:\n from os import environ\n\nif False: # pragma: nocover\n from .base_rpc import BaseRPC # noqa\n from .base_tracker import BaseTracker # noqa\n from .base_notifier import BaseNotifier # noqa\n from .base_bot import BaseBot # noqa\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef tunnel():\n \"\"\"Try to setup a tunnel for requests.\"\"\"\n tunnel_through = environ.get('TORRT_TUNNEL')\n\n if tunnel_through:\n\n if tunnel_through == 'local':\n # pip install requests[socks]\n tunnel_through = 'socks5://127.0.0.1:9150'\n\n # Instruct `requests` https://requests.readthedocs.io/en/master/user/advanced/#socks\n environ['HTTP_PROXY'] = tunnel_through\n environ['HTTPS_PROXY'] = tunnel_through\n\n\ntunnel()\n\n\ndef configure_logging(log_level: int = logging.INFO, show_logger_names: bool = False):\n \"\"\"Performs basic logging configuration.\n\n :param log_level: logging level, e.g. logging.DEBUG\n :param show_logger_names: flag to show logger names in output\n\n \"\"\"\n format_str = '%(levelname)s: %(message)s'\n\n if show_logger_names:\n format_str = '%(name)s\\t\\t ' + format_str\n\n logging.basicConfig(format=format_str, level=log_level)\n requests_logger = logging.getLogger('requests')\n requests_logger.setLevel(logging.ERROR)\n\n\ndef configure_rpc(rpc_alias: str, settings_dict: dict) -> Optional['BaseRPC']:\n \"\"\"Configures RPC using given settings.\n Saves successful configuration.\n\n :param rpc_alias: RPC alias\n :param settings_dict: settings dictionary to configure RPC with\n\n \"\"\"\n def enable(rpc: 'BaseRPC'):\n rpc.enabled = True\n\n return configure_entity('RPC', RPCClassesRegistry, rpc_alias, settings_dict, before_save=enable)\n\n\ndef configure_tracker(tracker_alias: str, settings_dict: dict) -> Optional['BaseTracker']:\n \"\"\"Configures tracker using given settings.\n Saves successful configuration.\n\n :param tracker_alias: tracker alias\n :param settings_dict: settings dictionary to configure tracker with\n\n \"\"\"\n return configure_entity('Tracker', TrackerClassesRegistry, tracker_alias, settings_dict)\n\n\ndef configure_notifier(notifier_alias: str, settings_dict: dict) -> Optional['BaseNotifier']:\n \"\"\"Configures notifier using given settings.\n Saves successful configuration.\n\n :param notifier_alias: notifier alias\n :param settings_dict: settings dictionary to configure notifier with\n\n \"\"\"\n return configure_entity('Notifier', NotifierClassesRegistry, notifier_alias, settings_dict)\n\n\ndef configure_bot(bot_alias: str, settings_dict: dict) -> Optional['BaseBot']:\n \"\"\"Configures bot using given settings.\n Saves successful configuration.\n\n :param bot_alias: bot alias\n :param settings_dict: settings dictionary to configure bot with\n\n \"\"\"\n return configure_entity('Bot', BotClassesRegistry, bot_alias, settings_dict)\n\n\ndef remove_notifier(alias: str):\n \"\"\"Removes notifier by alias\n\n :param alias: Notifier alias to remove.\n\n \"\"\"\n LOGGER.info(f'Removing `{alias}` notifier ...')\n\n config.drop_section('notifiers', alias)\n\n\ndef remove_bot(alias: str):\n \"\"\"Removes bot by alias\n\n :param alias: Bot alias to remove.\n\n \"\"\"\n LOGGER.info(f'Removing `{alias}` bot ...')\n\n config.drop_section('bots', alias)\n\n\ndef init_object_registries():\n \"\"\"Initializes RPC and tracker objects registries with settings from configuration file.\"\"\"\n\n LOGGER.debug('Initializing objects registries from configuration file ...')\n\n cfg = config.load()\n\n settings_to_registry_map = {\n 'rpc': RPCClassesRegistry,\n 'notifiers': NotifierClassesRegistry,\n 'bots': BotClassesRegistry,\n }\n\n for settings_entry, registry_cls in settings_to_registry_map.items():\n\n for alias, settings in cfg[settings_entry].items():\n registry_obj = registry_cls.get(alias)\n registry_obj and registry_obj.spawn_with_settings(settings).register()\n\n # Special case for trackers to initialize public trackers automatically.\n for alias, tracker_cls in TrackerClassesRegistry.get().items():\n\n settings = cfg['trackers'].get(alias)\n\n if settings is None:\n\n if issubclass(tracker_cls, GenericPrivateTracker):\n # No use in registering a private tracker without credentials.\n continue\n\n # Considered public tracker. Use default settings.\n\n tracker_cls.spawn_with_settings(settings or {}).register()\n\n\ndef get_registered_torrents() -> dict:\n \"\"\"Returns hash-indexed dictionary with information on torrents\n registered for updates.\n\n \"\"\"\n return config.load()['torrents']\n\n\ndef bootstrap():\n \"\"\"Bootstraps torrt environment,\n Populates RPC and Trackers registries with objects instantiated with settings from config.\n\n \"\"\"\n LOGGER.debug('Bootstrapping torrt environment ...')\n\n import_classes()\n init_object_registries()\n\n\ndef register_torrent(hash_str: str, torrent_data: TorrentData = None, url: str = None):\n \"\"\"Registers torrent within torrt. Used to register torrents that already exists\n in torrent clients.\n\n :param hash_str: torrent identifying hash\n :param torrent_data:\n :param url: fallback url that will be used in case torrent comment doesn't contain url\n\n \"\"\"\n LOGGER.debug(f'Registering `{hash_str}` torrent ...')\n\n if torrent_data is None:\n torrent_data = TorrentData()\n\n if url:\n torrent_data.url = url\n\n cfg = {'torrents': {}}\n structure_torrent_data(cfg['torrents'], hash_str, torrent_data)\n config.update(cfg)\n\n\ndef unregister_torrent(hash_str: str):\n \"\"\"Unregisters torrent from torrt. That doesn't remove torrent\n from torrent clients.\n\n :param hash_str: torrent identifying hash\n\n \"\"\"\n LOGGER.debug(f'Unregistering `{hash_str}` torrent ...')\n\n config.drop_section('torrents', hash_str)\n\n\ndef add_torrent_from_url(url: str, download_to: str = None):\n \"\"\"Adds torrent from a given URL to torrt and torrent clients,\n\n :param url: torrent URL\n :param download_to: path to download files from torrent into (in terms of torrent client filesystem)\n\n \"\"\"\n LOGGER.debug(f'Adding torrent from `{url}` ...')\n\n torrent_data = get_torrent_from_url(url)\n\n if torrent_data is None:\n LOGGER.error(f'Unable to add torrent from `{url}`')\n return\n\n for rpc_alias, rpc_object in iter_rpc():\n rpc_object.method_add_torrent(torrent_data, download_to=download_to)\n register_torrent(torrent_data.hash, torrent_data)\n\n LOGGER.info(f'Torrent from `{url}` is added within `{rpc_alias}`')\n\n\ndef remove_torrent(hash_str: str, with_data: bool = False):\n \"\"\"Removes torrent by its hash from torrt and torrent clients,\n\n :param hash_str: torrent identifying hash\n :param with_data: flag to also remove files from torrent\n\n \"\"\"\n LOGGER.info(f'Removing torrent `{hash_str}` (with data = {with_data}) ...')\n\n for _, rpc_object in iter_rpc():\n LOGGER.info(f'Removing torrent using `{rpc_object.alias}` RPC ...')\n rpc_object.method_remove_torrent(hash_str, with_data=with_data)\n\n unregister_torrent(hash_str)\n\n\ndef set_walk_interval(interval_hours: int):\n \"\"\"Sets torrent updates checks interval (in hours).\n\n :param interval_hours: hours interval\n\n \"\"\"\n config.update({'walk_interval_hours': int(interval_hours)})\n\n\ndef toggle_rpc(alias: str, enabled: bool = True):\n \"\"\"Enables or disables a given RPC.\n\n :param alias: PRC alias\n :param enabled: flag to enable or disable\n\n \"\"\"\n rpc = RPCClassesRegistry.get(alias)\n\n if rpc is not None:\n config.update({'rpc': {alias: {'enabled': enabled}}})\n\n LOGGER.info(f'RPC `{alias}` enabled = {enabled}')\n\n else:\n LOGGER.info(f'RPC `{alias}` class is not registered')\n\n\ndef walk(forced: bool = False, silent: bool = False, remove_outdated: bool = True):\n \"\"\"Performs updates check for the registered torrents.\n\n :param forced: flag to not to count walk interval setting\n :param silent: flag to suppress possible torrt exceptions\n :param remove_outdated: flag to remove torrents that are superseded by a new ones\n\n \"\"\"\n LOGGER.info('Torrent walk is triggered')\n\n now = int(time())\n cfg = config.load()\n\n next_time = cfg['time_last_check'] + (cfg['walk_interval_hours'] * 3600)\n\n if forced or now >= next_time:\n LOGGER.info('Torrent walk is started')\n\n updated = {}\n\n try:\n updated = update_torrents(cfg['torrents'], remove_outdated=remove_outdated)\n\n except TorrtException as e:\n if not silent:\n raise\n\n LOGGER.error(f'Walk failed. Reason: {e}')\n\n new_cfg = {\n 'time_last_check': now\n }\n\n if updated:\n\n for old_hash, new_data in updated.items():\n\n try:\n cfg['torrents'].pop(old_hash)\n\n except KeyError:\n # May be already deleted by `update_torrents` if `remove_outdated` is used.\n pass\n\n cfg['torrents'][new_data['hash']] = new_data\n\n new_cfg['torrents'] = cfg['torrents']\n\n for _, notifier in iter_notifiers():\n notifier.send(updated)\n\n # Save updated torrents data into config.\n config.update(new_cfg)\n\n LOGGER.info('Torrent walk is finished')\n\n else:\n LOGGER.info(\n 'Torrent walk postponed '\n f'till {get_iso_from_timestamp(next_time)} '\n f'(now {get_iso_from_timestamp(now)})'\n )\n\n\ndef update_torrents(torrents: Dict[str, dict], remove_outdated: bool = True) -> Dict[str, dict]:\n \"\"\"Performs torrent updates.\n Returns hash-indexed dictionary with information on updated torrents\n\n :param torrents: torrents data indexed with hashes\n :param remove_outdated: flag to remove outdated torrents from torrent clients\n\n \"\"\"\n updated_by_hashes = {}\n download_cache: Dict[str, TorrentData] = {}\n hashes = list(torrents)\n\n for _, rpc_object in iter_rpc():\n\n LOGGER.info(f'Getting torrents from `{rpc_object.alias}` ...')\n rpc_torrents = rpc_object.method_get_torrents(hashes)\n\n if not rpc_torrents:\n LOGGER.info(' No relevant torrents found')\n\n for rpc_torrent in rpc_torrents:\n LOGGER.info(f\" Processing `{rpc_torrent['name']}`...\")\n\n page_url = get_url_from_string(rpc_torrent['comment'])\n if not page_url:\n page_url = torrents[rpc_torrent['hash']].get('url', None) if torrents else None\n\n if not page_url:\n LOGGER.warning(f\" Torrent `{rpc_torrent['name']}` has no link in comment. Skipped\")\n continue\n\n if page_url in download_cache:\n tracker_torrent = download_cache[page_url]\n\n else:\n tracker_torrent = get_torrent_from_url(page_url)\n download_cache[page_url] = tracker_torrent\n\n if tracker_torrent is None:\n LOGGER.error(f' Unable to get torrent from `{page_url}`')\n continue\n\n if rpc_torrent['hash'] == tracker_torrent.hash:\n LOGGER.info(' No updates')\n continue\n\n LOGGER.debug(' Update is available')\n\n try:\n rpc_object.method_add_torrent(\n tracker_torrent,\n rpc_torrent['download_to'],\n params=rpc_torrent.get('params', None)\n )\n tracker_torrent.url = page_url\n\n LOGGER.info(' Torrent is updated')\n\n structure_torrent_data(updated_by_hashes, rpc_torrent['hash'], tracker_torrent)\n\n except TorrtRPCException as e:\n LOGGER.error(f' Unable to replace torrent: {e}')\n\n else:\n unregister_torrent(rpc_torrent['hash'])\n\n if remove_outdated:\n rpc_object.method_remove_torrent(rpc_torrent['hash'])\n\n return updated_by_hashes\n\n\ndef run_bots(aliases: List[str] = None):\n \"\"\"Run aliased bots one after another.\n\n :param aliases:\n\n \"\"\"\n aliases = aliases or []\n\n for alias, bot_object in iter_bots():\n\n if aliases and alias not in aliases:\n continue\n\n bot_object.run()\n", "id": "9991141", "language": "Python", "matching_score": 3.2603352069854736, "max_stars_count": 82, "path": "torrt/toolbox.py" }, { "content": "from .utils import WithSettings, NotifierObjectsRegistry, NotifierClassesRegistry\n\n\nclass BaseNotifier(WithSettings):\n \"\"\"Base Notifier class. All Notifier classes should inherit from this.\"\"\"\n\n config_entry_name: str = 'notifiers'\n\n def __init_subclass__(cls, **kwargs):\n if cls.alias:\n NotifierClassesRegistry.add(cls)\n\n def register(self):\n \"\"\"Adds this object into NotificationObjectsRegistry.\"\"\"\n\n NotifierObjectsRegistry.add(self)\n\n def send_message(self, msg: str): # pragma: nocover\n \"\"\"Send prepared message\n\n :param msg: Prepared by notifier backend message\n\n \"\"\"\n raise NotImplementedError\n\n def make_message(self, torrent_data: dict) -> str: # pragma: nocover\n \"\"\"Creates message in format suitable for notifier backend\n\n :param: torrent_data: dictionary with updated torrents data during the walk operation\n\n \"\"\"\n raise NotImplementedError\n\n def test_configuration(self) -> bool:\n \"\"\"This should implement a configuration test, for example check given credentials.\"\"\"\n\n return False\n\n def send(self, updated_data):\n \"\"\"Send message to user\n\n :param: updated_data: dict - dictionary with updated torrents data during the walk operation\n\n \"\"\"\n msg = self.make_message(updated_data)\n self.send_message(msg)\n", "id": "12363325", "language": "Python", "matching_score": 4.130136489868164, "max_stars_count": 82, "path": "torrt/base_notifier.py" }, { "content": "from .utils import WithSettings, BotObjectsRegistry, BotClassesRegistry\n\n\nclass BaseBot(WithSettings):\n\n config_entry_name: str = 'bots'\n\n def __init_subclass__(cls, **kwargs):\n if cls.alias:\n BotClassesRegistry.add(cls)\n\n def register(self):\n \"\"\"Adds this object intoBotObjectsRegistry.\"\"\"\n\n BotObjectsRegistry.add(self)\n\n def test_configuration(self) -> bool:\n \"\"\"This should implement a configuration test, for example check given credentials.\"\"\"\n return False\n\n def run(self):\n \"\"\"Run bot to receive incoming commands.\"\"\"\n", "id": "6760344", "language": "Python", "matching_score": 3.6388742923736572, "max_stars_count": 82, "path": "torrt/base_bot.py" } ]
3.260335
kktsubota
[ { "content": "import argparse\nimport functools\n\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torch import nn\nfrom torchvision import transforms\n\nfrom utils.screentone import ToneLabel\n\n\nclass UnetGenerator(nn.Module):\n \"\"\"Create a Unet-based generator\n\n we modify the output layer from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix\n Copyright (c) 2016, <NAME> and <NAME>\n \"\"\"\n\n def __init__(\n self,\n input_nc,\n output_nc,\n num_downs,\n ngf=64,\n norm_layer=nn.BatchNorm2d,\n use_dropout=False,\n last_act=\"tanh\",\n ):\n \"\"\"Construct a Unet generator\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,\n image of size 128x128 will become of size 1x1 # at the bottleneck\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n\n We construct the U-Net from the innermost layer to the outermost layer.\n It is a recursive process.\n \"\"\"\n super(UnetGenerator, self).__init__()\n # construct unet structure\n unet_block = UnetSkipConnectionBlock(\n ngf * 8,\n ngf * 8,\n input_nc=None,\n submodule=None,\n norm_layer=norm_layer,\n innermost=True,\n ) # add the innermost layer\n for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(\n ngf * 8,\n ngf * 8,\n input_nc=None,\n submodule=unet_block,\n norm_layer=norm_layer,\n use_dropout=use_dropout,\n )\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(\n ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer\n )\n unet_block = UnetSkipConnectionBlock(\n ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer\n )\n unet_block = UnetSkipConnectionBlock(\n ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer\n )\n self.model = UnetSkipConnectionBlock(\n output_nc,\n ngf,\n input_nc=input_nc,\n submodule=unet_block,\n outermost=True,\n norm_layer=norm_layer,\n last_act=last_act,\n ) # add the outermost layer\n\n def forward(self, input):\n \"\"\"Standard forward\"\"\"\n return self.model(input)\n\n\nclass UnetSkipConnectionBlock(nn.Module):\n \"\"\"Defines the Unet submodule with skip connection.\n X -------------------identity----------------------\n |-- downsampling -- |submodule| -- upsampling --|\n\n we modify the output layer from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix\n Copyright (c) 2016, <NAME> and <NAME>\n \"\"\"\n\n def __init__(\n self,\n outer_nc,\n inner_nc,\n input_nc=None,\n submodule=None,\n outermost=False,\n innermost=False,\n norm_layer=nn.BatchNorm2d,\n use_dropout=False,\n last_act=\"tanh\",\n ):\n \"\"\"Construct a Unet submodule with skip connections.\n\n Parameters:\n outer_nc (int) -- the number of filters in the outer conv layer\n inner_nc (int) -- the number of filters in the inner conv layer\n input_nc (int) -- the number of channels in input images/features\n submodule (UnetSkipConnectionBlock) -- previously defined submodules\n outermost (bool) -- if this module is the outermost module\n innermost (bool) -- if this module is the innermost module\n norm_layer -- normalization layer\n user_dropout (bool) -- if use dropout layers.\n \"\"\"\n super(UnetSkipConnectionBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n downconv = nn.Conv2d(\n input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias\n )\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n down = [downconv]\n\n # original code\n # if last_act == 'tanh':\n # upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n # kernel_size=4, stride=2,\n # padding=1)\n # up = [uprelu, upconv, nn.Tanh()]\n\n # 64 * 2 => 32\n upconv = nn.ConvTranspose2d(\n inner_nc * 2,\n inner_nc // 2,\n kernel_size=4,\n stride=2,\n padding=1,\n bias=use_bias,\n )\n upnorm = norm_layer(inner_nc // 2)\n lastconv = nn.Conv2d(inner_nc // 2, outer_nc, kernel_size=1)\n up = [uprelu, upconv, upnorm, uprelu, lastconv]\n if last_act == \"tanh\":\n up += [nn.Tanh()]\n elif last_act == \"logSoftmax\":\n up += [nn.LogSoftmax(dim=1)]\n else:\n raise NotImplementedError\n\n model = down + [submodule] + up\n elif innermost:\n upconv = nn.ConvTranspose2d(\n inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias\n )\n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n else:\n upconv = nn.ConvTranspose2d(\n inner_nc * 2,\n outer_nc,\n kernel_size=4,\n stride=2,\n padding=1,\n bias=use_bias,\n )\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n\n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else: # add skip connections\n return torch.cat([x, self.model(x)], 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"line\", help=\"line drawing\")\n parser.add_argument(\"--model_path\")\n parser.add_argument(\n \"--out\", default=\"label.png\", help=\"output path of a screentone label\"\n )\n args = parser.parse_args()\n\n with Image.open(args.line) as f:\n img = f.convert(\"L\")\n\n transform = transforms.Compose(\n [\n transforms.Resize((256, 256), transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ]\n )\n img_t = transform(img)\n\n norm_layer = functools.partial(\n nn.BatchNorm2d, affine=True, track_running_stats=True\n )\n net = UnetGenerator(\n 1, 120, 8, 64, norm_layer=norm_layer, use_dropout=True, last_act=\"logSoftmax\"\n )\n if args.model_path is not None:\n state_dict = torch.load(args.model_path)\n net.load_state_dict(state_dict)\n\n # We do not use eval mode to generate dirverse output.\n # So, the output can differ for each run.\n # net.eval()\n\n with torch.no_grad():\n out = net(img_t[None])[0]\n label_data = out.argmax(dim=0)\n\n label = ToneLabel(label_data.numpy().astype(np.uint8))\n label.save(args.out)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11424183", "language": "Python", "matching_score": 3.8725228309631348, "max_stars_count": 2, "path": "apply_gen.py" }, { "content": "import argparse\n\nimport numpy as np\nfrom PIL import Image\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torchvision import transforms\n\nfrom utils.screentone import ToneLabel\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_size: int, out_size: int, mid_size=None) -> None:\n super(ResBlock, self).__init__()\n if mid_size is None:\n mid_size = out_size\n\n self.layer_1 = nn.Sequential(\n nn.Conv2d(in_size, mid_size, 3, padding=1),\n nn.BatchNorm2d(mid_size),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_size, out_size, 3, padding=1),\n nn.BatchNorm2d(out_size),\n )\n self.layer_2 = nn.Sequential(\n nn.Conv2d(in_size, out_size, 3, padding=1), nn.BatchNorm2d(out_size)\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return F.relu(self.layer_1(x) + self.layer_2(x), inplace=True)\n\n\nclass DownResBlock(nn.Module):\n def __init__(self, in_size: int, out_size: int, mid_size=None) -> None:\n super(DownResBlock, self).__init__()\n if mid_size is None:\n mid_size = out_size\n\n self.layer_1 = nn.Sequential(\n nn.Conv2d(in_size, mid_size, 3, padding=1, stride=2),\n nn.BatchNorm2d(mid_size),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_size, out_size, 3, padding=1),\n nn.BatchNorm2d(out_size),\n )\n self.layer_2 = nn.Sequential(\n nn.Conv2d(in_size, out_size, 3, padding=1, stride=2),\n nn.BatchNorm2d(out_size),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return F.relu(self.layer_1(x) + self.layer_2(x), inplace=True)\n\n\nclass UpResBlock(nn.Module):\n def __init__(self, in_size: int, out_size: int, mid_size=None) -> None:\n super(UpResBlock, self).__init__()\n if mid_size is None:\n mid_size = out_size\n\n self.layer_1 = nn.Sequential(\n nn.ConvTranspose2d(\n in_size, mid_size, 3, padding=1, stride=2, output_padding=1\n ),\n nn.BatchNorm2d(mid_size),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_size, out_size, 3, padding=1),\n nn.BatchNorm2d(out_size),\n )\n self.layer_2 = nn.Sequential(\n nn.ConvTranspose2d(\n in_size, out_size, 3, padding=1, stride=2, output_padding=1\n ),\n nn.BatchNorm2d(out_size),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return F.relu(self.layer_1(x) + self.layer_2(x), inplace=True)\n\n\ndef init_weight(layer):\n if isinstance(layer, nn.Linear) or isinstance(layer, nn.Conv2d):\n nn.init.kaiming_normal_(layer.weight, nonlinearity=\"relu\")\n\n\nclass ResidualUNet(nn.Module):\n \"\"\"Residual U-Net\"\"\"\n\n def __init__(\n self,\n in_size: int = 1,\n n_class: int = 11,\n pretrained_model=None,\n feature_scale=1,\n ) -> None:\n super(ResidualUNet, self).__init__()\n self.n_class = n_class\n self.feature_scale = feature_scale\n\n filters = [64, 128, 256, 512, 1024]\n self.filters = [int(x / self.feature_scale) for x in filters]\n\n # downsampling\n self.cb1 = nn.Sequential(\n ResBlock(in_size, self.filters[0]),\n ResBlock(self.filters[0], self.filters[0]),\n )\n self.cb2 = nn.Sequential(\n DownResBlock(self.filters[0], self.filters[1]),\n ResBlock(self.filters[1], self.filters[1]),\n )\n self.cb3 = nn.Sequential(\n DownResBlock(self.filters[1], self.filters[2]),\n ResBlock(self.filters[2], self.filters[2]),\n )\n self.cb4 = nn.Sequential(\n DownResBlock(self.filters[2], self.filters[3]),\n ResBlock(self.filters[3], self.filters[3]),\n )\n self.cb5 = nn.Sequential(\n DownResBlock(self.filters[3], self.filters[4]),\n ResBlock(self.filters[4], self.filters[4]),\n ResBlock(self.filters[4], self.filters[4]),\n )\n\n # upsampling\n self.cb6 = nn.Sequential(\n UpResBlock(self.filters[4], self.filters[3]),\n ResBlock(self.filters[3], self.filters[3]),\n )\n self.cb7 = nn.Sequential(\n UpResBlock(self.filters[3], self.filters[2]),\n ResBlock(self.filters[2], self.filters[2]),\n )\n self.cb8 = nn.Sequential(\n UpResBlock(self.filters[2], self.filters[1]),\n ResBlock(self.filters[1], self.filters[1]),\n )\n self.cb9 = nn.Sequential(\n UpResBlock(self.filters[1], self.filters[0]),\n ResBlock(self.filters[0], self.filters[0]),\n )\n\n self.cb10 = nn.Sequential(\n ResBlock(self.filters[0], self.filters[0]),\n )\n\n # final conv (without any concat)\n self.conv_classifier = nn.Conv2d(self.filters[0], n_class, 1)\n\n if pretrained_model:\n self.load_state_dict(torch.load(pretrained_model))\n\n else:\n self.apply(init_weight)\n\n def __call__(self, x: torch.Tensor, with_feat=False, output_label=False):\n\n # (1, 256, 256) -> (64, 256, 256)\n cb1 = self.cb1(x)\n # (64, 256, 256) -> (128, 128, 128)\n cb2 = self.cb2(cb1)\n # (128, 128, 128) -> (256, 64, 64)\n cb3 = self.cb3(cb2)\n # (256, 64, 64) -> (512, 32, 32)\n cb4 = self.cb4(cb3)\n # (512, 32, 32) -> (1024, 16, 16)\n cb5 = self.cb5(cb4)\n cb4 = cb4 + self.cb6(cb5)\n cb3 = cb3 + self.cb7(cb4)\n cb2 = cb2 + self.cb8(cb3)\n cb1 = cb1 + self.cb9(cb2)\n h = self.cb10(cb1)\n y = self.conv_classifier(h)\n\n if output_label:\n y = torch.argmax(y, dim=1)\n\n if with_feat:\n return y, h\n else:\n return y\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", help=\"path for a manga image\")\n parser.add_argument(\n \"--out\", default=\"label-c.png\", help=\"output path of a screentone label\"\n )\n parser.add_argument(\"--n_class\", default=120)\n parser.add_argument(\"--model_path\", default=\"unet.pth\")\n args = parser.parse_args()\n\n model = ResidualUNet(n_class=args.n_class, pretrained_model=args.model_path)\n model = model.eval()\n\n with Image.open(args.path) as img_pil:\n W, H = img_pil.size\n\n H_pad: int = (H + 15) // 16 * 16 - H\n W_pad: int = (W + 15) // 16 * 16 - W\n transform = transforms.Compose(\n [\n transforms.Grayscale(num_output_channels=1),\n transforms.Pad((0, 0, H_pad, W_pad), fill=255),\n transforms.ToTensor(),\n transforms.Normalize((0.0,), (1 / 255.0,)),\n ]\n )\n img_t: torch.Tensor = transform(img_pil)\n\n # (1, n_class, H_pad, W_pad) -> (n_class, H_pad, W_pad) -> (H_pad, W_pad) -> (H, W)\n with torch.no_grad():\n label: torch.Tensor = model(img_t[None])[0].argmax(dim=0)[0:H, 0:W]\n tone_label: ToneLabel = ToneLabel(label.numpy().astype(np.uint8))\n tone_label.save(args.out)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "5775646", "language": "Python", "matching_score": 3.4044840335845947, "max_stars_count": 2, "path": "classify_tone.py" }, { "content": "import argparse\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nfrom utils.post_proc import uniform_label\nfrom utils.screentone import ToneLabel, ToneImageGenerator\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"line\", help=\"line drawing\")\n parser.add_argument(\"label\", help=\"screentone label\")\n parser.add_argument(\"--out\", default=\"out.png\")\n parser.add_argument(\n \"--out-label\",\n default=\"label-vis.png\",\n help=\"visualization of the screentone label\",\n )\n args: argparse.Namespace = parser.parse_args()\n\n tone_gen: ToneImageGenerator = ToneImageGenerator()\n\n # read a tone label\n label: ToneLabel = ToneLabel.load(args.label)\n label.visualize().save(args.out_label)\n # width, height\n size: tuple = (label.shape[1], label.shape[0])\n\n # read a line drawing\n if args.line is None:\n line = None\n else:\n with Image.open(args.line) as f:\n line = f.convert(\"L\")\n line = line.resize(size, Image.BICUBIC)\n line = np.asarray(line, dtype=np.uint8)\n\n # post-process\n if args.line is not None:\n label.data = uniform_label(label.data, line, thresh=144)\n # render a manga image\n img_rec: np.ndarray = tone_gen.render(label, line)\n # save the manga image\n cv2.imwrite(args.out, img_rec)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11163909", "language": "Python", "matching_score": 1.9389371871948242, "max_stars_count": 2, "path": "render.py" }, { "content": "from pathlib import Path\nimport warnings\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport torch\n\nfrom .colormap import voc_colormap\n\n\nclass ToneLabel:\n def __init__(self, label: np.ndarray, ignore: set = {0}) -> None:\n assert label.dtype == np.uint8\n self.data = label\n self.ignore = ignore\n\n @classmethod\n def load(cls, path: str, dtype=np.uint8):\n data = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n return cls(data.astype(dtype))\n\n @property\n def shape(self) -> tuple:\n return self.data.shape\n\n def save(self, path: str):\n return cv2.imwrite(path, self.data)\n\n def get_tensor(self) -> torch.Tensor:\n return torch.from_numpy(self.data.astype(np.int32))\n\n def visualize(self) -> Image.Image:\n colors = voc_colormap(range(200)).astype(np.uint8)\n return Image.fromarray(colors[self.data])\n\n\nclass ToneParameter:\n def __init__(self, tone_index: int, tone_type: str, mask, param: dict) -> None:\n self.tone_index = tone_index\n self.tone_type = tone_type\n self.mask = mask\n self.param = param\n\n def __repr__(self) -> str:\n return (\n self.__class__.__name__\n + \"(tone_index={}, tone_type={}, param={})\".format(\n self.tone_index, self.tone_type, self.param\n )\n )\n\n\nclass ToneImageGenerator:\n def __init__(self, data_root: str = \"./data/\") -> None:\n self.tone_dataset = ToneDataset(data_root)\n\n def render(self, label: ToneLabel, line=None) -> np.ndarray:\n params = self.label_to_params(label)\n if line is None:\n # prepare a white image\n shape = params[0]\n img = np.ones(shape, dtype=np.uint8) * 255\n else:\n img = line.copy()\n\n for tone_param in params[1:]:\n if tone_param.tone_type == \"unlabeled\":\n continue\n tone = self.generate(tone_param)\n img = np.minimum(tone, img)\n\n return img.astype(np.uint8)\n\n def label_index_to_param(self, lb: int) -> ToneParameter:\n tone_index = lb - 1\n for tone_type in ToneDataset.tone_types:\n if 0 <= tone_index < len(self.tone_dataset.data[tone_type]):\n break\n tone_index -= len(self.tone_dataset.data[tone_type])\n\n else:\n tone_type = \"unlabeled\"\n assert tone_index == 0\n\n param = dict()\n if tone_type in {\"gradation\", \"dark\"}:\n pass\n elif tone_type == \"effect\":\n param[\"scale\"] = 1.0\n elif tone_type == \"light\":\n param[\"scale_inv\"] = 1.0\n param[\"angle\"] = 0\n\n param[\"value_scale\"] = 1.0\n tone_param = ToneParameter(tone_index, tone_type, None, param)\n\n return tone_param\n\n def label_to_params(self, label: ToneLabel) -> list:\n params = list()\n params.append(label.shape)\n\n # prepare label_set that renders\n label_set = set(np.unique(label.data))\n label_set -= label.ignore\n\n for lb in label_set:\n mask = label.data == lb\n tone_param = self.label_index_to_param(lb)\n tone_param.mask = mask * 255.0\n params.append(tone_param)\n\n return params\n\n def generate(self, tone_param: ToneParameter) -> np.ndarray:\n \"\"\"generate screentones from tone_param\n\n modified from the code by <NAME>.\n \"\"\"\n tile = self.tone_dataset.get(tone_param.tone_index, tone_param.tone_type)\n mask = tone_param.mask\n\n if tone_param.tone_type == \"gradation\":\n result = np.ones(mask.shape, np.float32) * 255.0\n h_tile = tile.shape[0]\n\n mask_bin = mask == 255.0\n xmin, xmax = np.where(np.any(mask_bin, axis=0))[0][[0, -1]]\n ymin, ymax = np.where(np.any(mask_bin, axis=1))[0][[0, -1]]\n\n h_box, w_box = ymax - ymin, xmax - xmin\n # NOTE: (h_box: height of a contour rectangular) + 1 <= (h_tile: height of a tone image)\n if h_tile >= h_box:\n crop = tile[0:h_box, 0:w_box]\n result[ymin:ymax, xmin:xmax] = crop\n else:\n warnings.warn(\n \"Unexpected label. Unable to paste gradation.\", RuntimeWarning\n )\n\n elif tone_param.tone_type == \"effect\":\n # height, width for resize\n height_t, width_t = tile.shape\n height, width = mask.shape\n\n scaler = height / float(height_t)\n scalec = width / float(width_t)\n\n scale = (max(scaler, scalec) + 1) / 2\n\n height_t = max(height, int(height_t * scale) + 1)\n width_t = max(width, int(width_t * scale) + 1)\n\n newtile = np.ones((height_t + 1, width_t + 1), np.float32) * 255.0\n effect = cv2.resize(tile, (width_t, height_t), interpolation=cv2.INTER_AREA)\n newtile[0:height_t, 0:width_t] = effect\n\n # center crop\n rr = (0 + height_t - height + 1) // 2\n rc = (0 + width_t - width + 1) // 2\n result = newtile[rr : rr + height, rc : rc + width]\n\n elif tone_param.tone_type == \"dark\":\n height, width = mask.shape\n result = cv2.resize(tile, (width, height), interpolation=cv2.INTER_AREA)\n\n elif tone_param.tone_type == \"light\":\n scale_inv = tone_param.param[\"scale_inv\"]\n angle = tone_param.param[\"angle\"]\n\n height, width = mask.shape\n\n assert scale_inv == 1.0 and (not angle)\n rowtiles = height // tile.shape[0] + 1\n coltiles = width // tile.shape[1] + 1\n tile_dest = np.tile(tile, (rowtiles, coltiles))\n result = tile_dest[:height, :width]\n\n else:\n raise NotImplementedError\n\n # edit\n value_scale = tone_param.param[\"value_scale\"]\n tile = result * value_scale\n tile[tile > 255] = 255\n\n # apply mask\n tile = mask * tile / 255.0 + (255 - mask)\n return tile\n\n\nclass ToneDataset:\n\n tone_types = (\"gradation\", \"effect\", \"light\", \"dark\")\n\n def __init__(self, root: str, grayscale: bool = True) -> None:\n self.root = Path(root)\n data_root = {\n \"light\": \"screenPatterns/light/\",\n \"effect\": \"secretsanta2011/effects/\",\n \"gradation\": \"secretsanta2011/gradations/\",\n }\n self.data = dict()\n for tone_type in self.tone_types:\n if tone_type == \"dark\":\n self.data[tone_type] = [np.zeros((10, 10), dtype=np.uint8)]\n else:\n if tone_type in {\"light\", \"effect\"}:\n paths = sorted((self.root / data_root[tone_type]).glob(\"*\"))\n else:\n paths = sorted((self.root / data_root[tone_type]).glob(\"*/*/*\"))\n color = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED\n self.data[tone_type] = [\n cv2.imread(path.as_posix(), color) for path in paths\n ]\n\n def __len__(self) -> int:\n return sum(len(self.data[tone_type]) for tone_type in self.tone_types)\n\n def get(self, index: int, tone_type) -> np.ndarray:\n return self.data[tone_type][index]\n", "id": "1856492", "language": "Python", "matching_score": 2.164641857147217, "max_stars_count": 2, "path": "utils/screentone.py" }, { "content": "import cv2\nimport numpy as np\nimport scipy.stats\n\n\ndef uniform_label(label: np.ndarray, img_line: np.ndarray, thresh: int = 144) -> np.ndarray:\n \"\"\"uniform label in each closed region\n \n Arguments:\n label {np.ndarray} -- shape [H, W]\n img_line {np.ndarray} -- shape [H, W]\n \n Keyword Arguments:\n thresh {int} -- threshold value for binarization (default: {144})\n \n Returns:\n label -- shape [H, W]\n \"\"\"\n ret, img_bin = cv2.threshold(img_line, thresh, 255, 0)\n label = label.copy()\n contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n for i in range(len(contours)):\n mask = np.zeros(img_line.shape, dtype=np.uint8)\n\n cv2.drawContours(mask, contours, i, (255,), -1)\n j = hierarchy[0, i, 2]\n while(j != -1):\n cv2.drawContours(mask, contours, j,(0,), -1)\n j = hierarchy[0, j, 0] # next\n\n mask = mask.astype(np.bool)\n result = scipy.stats.mode(label[mask])\n label[mask] = result.mode\n \n return label\n", "id": "9140407", "language": "Python", "matching_score": 0.02410399727523327, "max_stars_count": 2, "path": "utils/post_proc.py" }, { "content": "# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom compressai.ans import BufferedRansEncoder, RansDecoder\nfrom compressai.entropy_models import EntropyBottleneck, GaussianConditional\nfrom compressai.layers import GDN, MaskedConv2d\n\nfrom .utils import conv, deconv, update_registered_buffers\n\n__all__ = [\n \"CompressionModel\",\n \"FactorizedPrior\",\n \"ScaleHyperprior\",\n \"MeanScaleHyperprior\",\n \"JointAutoregressiveHierarchicalPriors\",\n]\n\n\nclass CompressionModel(nn.Module):\n \"\"\"Base class for constructing an auto-encoder with at least one entropy\n bottleneck module.\n\n Args:\n entropy_bottleneck_channels (int): Number of channels of the entropy\n bottleneck\n \"\"\"\n\n def __init__(self, entropy_bottleneck_channels, init_weights=True):\n super().__init__()\n self.entropy_bottleneck = EntropyBottleneck(entropy_bottleneck_channels)\n\n if init_weights:\n self._initialize_weights()\n\n def aux_loss(self):\n \"\"\"Return the aggregated loss over the auxiliary entropy bottleneck\n module(s).\n \"\"\"\n aux_loss = sum(\n m.loss() for m in self.modules() if isinstance(m, EntropyBottleneck)\n )\n return aux_loss\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n nn.init.kaiming_normal_(m.weight)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n\n def forward(self, *args):\n raise NotImplementedError()\n\n def update(self, force=False):\n \"\"\"Updates the entropy bottleneck(s) CDF values.\n\n Needs to be called once after training to be able to later perform the\n evaluation with an actual entropy coder.\n\n Args:\n force (bool): overwrite previous values (default: False)\n\n Returns:\n updated (bool): True if one of the EntropyBottlenecks was updated.\n\n \"\"\"\n updated = False\n for m in self.children():\n if not isinstance(m, EntropyBottleneck):\n continue\n rv = m.update(force=force)\n updated |= rv\n return updated\n\n def load_state_dict(self, state_dict):\n # Dynamically update the entropy bottleneck buffers related to the CDFs\n update_registered_buffers(\n self.entropy_bottleneck,\n \"entropy_bottleneck\",\n [\"_quantized_cdf\", \"_offset\", \"_cdf_length\"],\n state_dict,\n )\n super().load_state_dict(state_dict)\n\n\nclass FactorizedPrior(CompressionModel):\n r\"\"\"Factorized Prior model from <NAME>, <NAME>, <NAME>, <NAME>,\n <NAME>: `\"Variational Image Compression with a Scale Hyperprior\"\n <https://arxiv.org/abs/1802.01436>`_, Int Conf. on Learning Representations\n (ICLR), 2018.\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N, M, **kwargs):\n super().__init__(entropy_bottleneck_channels=M, **kwargs)\n\n self.g_a = nn.Sequential(\n conv(3, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, M),\n )\n\n self.g_s = nn.Sequential(\n deconv(M, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, 3),\n )\n\n self.N = N\n self.M = M\n\n @property\n def downsampling_factor(self) -> int:\n return 2 ** 4\n\n def forward(self, x):\n y = self.g_a(x)\n y_hat, y_likelihoods = self.entropy_bottleneck(y)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\n \"y\": y_likelihoods,\n },\n }\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"g_a.0.weight\"].size(0)\n M = state_dict[\"g_a.6.weight\"].size(0)\n net = cls(N, M)\n net.load_state_dict(state_dict)\n return net\n\n def compress(self, x):\n y = self.g_a(x)\n y_strings = self.entropy_bottleneck.compress(y)\n return {\"strings\": [y_strings], \"shape\": y.size()[-2:]}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 1\n y_hat = self.entropy_bottleneck.decompress(strings[0], shape)\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n\n# From Balle's tensorflow compression examples\nSCALES_MIN = 0.11\nSCALES_MAX = 256\nSCALES_LEVELS = 64\n\n\ndef get_scale_table(min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS):\n return torch.exp(torch.linspace(math.log(min), math.log(max), levels))\n\n\nclass ScaleHyperprior(CompressionModel):\n r\"\"\"Scale Hyperprior model from <NAME>, <NAME>, <NAME>, <NAME>,\n <NAME>: `\"Variational Image Compression with a Scale Hyperprior\"\n <https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations\n (ICLR), 2018.\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N, M, **kwargs):\n super().__init__(entropy_bottleneck_channels=N, **kwargs)\n\n self.g_a = nn.Sequential(\n conv(3, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, N),\n GDN(N),\n conv(N, M),\n )\n\n self.g_s = nn.Sequential(\n deconv(M, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, N),\n GDN(N, inverse=True),\n deconv(N, 3),\n )\n\n self.h_a = nn.Sequential(\n conv(M, N, stride=1, kernel_size=3),\n nn.ReLU(inplace=True),\n conv(N, N),\n nn.ReLU(inplace=True),\n conv(N, N),\n )\n\n self.h_s = nn.Sequential(\n deconv(N, N),\n nn.ReLU(inplace=True),\n deconv(N, N),\n nn.ReLU(inplace=True),\n conv(N, M, stride=1, kernel_size=3),\n nn.ReLU(inplace=True),\n )\n\n self.gaussian_conditional = GaussianConditional(None)\n self.N = int(N)\n self.M = int(M)\n\n @property\n def downsampling_factor(self) -> int:\n return 2 ** (4 + 2)\n\n def forward(self, x):\n y = self.g_a(x)\n z = self.h_a(torch.abs(y))\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n scales_hat = self.h_s(z_hat)\n y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods},\n }\n\n def load_state_dict(self, state_dict):\n update_registered_buffers(\n self.gaussian_conditional,\n \"gaussian_conditional\",\n [\"_quantized_cdf\", \"_offset\", \"_cdf_length\", \"scale_table\"],\n state_dict,\n )\n super().load_state_dict(state_dict)\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"g_a.0.weight\"].size(0)\n M = state_dict[\"g_a.6.weight\"].size(0)\n net = cls(N, M)\n net.load_state_dict(state_dict)\n return net\n\n def update(self, scale_table=None, force=False):\n if scale_table is None:\n scale_table = get_scale_table()\n updated = self.gaussian_conditional.update_scale_table(scale_table, force=force)\n updated |= super().update(force=force)\n return updated\n\n def compress(self, x):\n y = self.g_a(x)\n z = self.h_a(torch.abs(y))\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n scales_hat = self.h_s(z_hat)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_strings = self.gaussian_conditional.compress(y, indexes)\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:]}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n scales_hat = self.h_s(z_hat)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_hat = self.gaussian_conditional.decompress(strings[0], indexes, z_hat.dtype)\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n\nclass MeanScaleHyperprior(ScaleHyperprior):\n r\"\"\"Scale Hyperprior with non zero-mean Gaussian conditionals from D.\n Minnen, <NAME>, <NAME>: `\"Joint Autoregressive and Hierarchical\n Priors for Learned Image Compression\" <https://arxiv.org/abs/1809.02736>`_,\n Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N, M, **kwargs):\n super().__init__(N, M, **kwargs)\n\n self.h_a = nn.Sequential(\n conv(M, N, stride=1, kernel_size=3),\n nn.LeakyReLU(inplace=True),\n conv(N, N),\n nn.LeakyReLU(inplace=True),\n conv(N, N),\n )\n\n self.h_s = nn.Sequential(\n deconv(N, M),\n nn.LeakyReLU(inplace=True),\n deconv(M, M * 3 // 2),\n nn.LeakyReLU(inplace=True),\n conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),\n )\n\n def forward(self, x):\n y = self.g_a(x)\n z = self.h_a(y)\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n gaussian_params = self.h_s(z_hat)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods},\n }\n\n def compress(self, x):\n y = self.g_a(x)\n z = self.h_a(y)\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n gaussian_params = self.h_s(z_hat)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_strings = self.gaussian_conditional.compress(y, indexes, means=means_hat)\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:]}\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n gaussian_params = self.h_s(z_hat)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n y_hat = self.gaussian_conditional.decompress(\n strings[0], indexes, means=means_hat\n )\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n\nclass JointAutoregressiveHierarchicalPriors(MeanScaleHyperprior):\n r\"\"\"Joint Autoregressive Hierarchical Priors model from <NAME>, <NAME>, <NAME>: `\"Joint Autoregressive and Hierarchical\n Priors for Learned Image Compression\" <https://arxiv.org/abs/1809.02736>`_,\n Adv. in Neural Information Processing Systems 31 (NeurIPS 2018).\n\n Args:\n N (int): Number of channels\n M (int): Number of channels in the expansion layers (last layer of the\n encoder and last layer of the hyperprior decoder)\n \"\"\"\n\n def __init__(self, N=192, M=192, **kwargs):\n super().__init__(N=N, M=M, **kwargs)\n\n self.g_a = nn.Sequential(\n conv(3, N, kernel_size=5, stride=2),\n GDN(N),\n conv(N, N, kernel_size=5, stride=2),\n GDN(N),\n conv(N, N, kernel_size=5, stride=2),\n GDN(N),\n conv(N, M, kernel_size=5, stride=2),\n )\n\n self.g_s = nn.Sequential(\n deconv(M, N, kernel_size=5, stride=2),\n GDN(N, inverse=True),\n deconv(N, N, kernel_size=5, stride=2),\n GDN(N, inverse=True),\n deconv(N, N, kernel_size=5, stride=2),\n GDN(N, inverse=True),\n deconv(N, 3, kernel_size=5, stride=2),\n )\n\n self.h_a = nn.Sequential(\n conv(M, N, stride=1, kernel_size=3),\n nn.LeakyReLU(inplace=True),\n conv(N, N, stride=2, kernel_size=5),\n nn.LeakyReLU(inplace=True),\n conv(N, N, stride=2, kernel_size=5),\n )\n\n self.h_s = nn.Sequential(\n deconv(N, M, stride=2, kernel_size=5),\n nn.LeakyReLU(inplace=True),\n deconv(M, M * 3 // 2, stride=2, kernel_size=5),\n nn.LeakyReLU(inplace=True),\n conv(M * 3 // 2, M * 2, stride=1, kernel_size=3),\n )\n\n self.entropy_parameters = nn.Sequential(\n nn.Conv2d(M * 12 // 3, M * 10 // 3, 1),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(M * 10 // 3, M * 8 // 3, 1),\n nn.LeakyReLU(inplace=True),\n nn.Conv2d(M * 8 // 3, M * 6 // 3, 1),\n )\n\n self.context_prediction = MaskedConv2d(\n M, 2 * M, kernel_size=5, padding=2, stride=1\n )\n\n self.gaussian_conditional = GaussianConditional(None)\n self.N = int(N)\n self.M = int(M)\n\n @property\n def downsampling_factor(self) -> int:\n return 2 ** (4 + 2)\n\n def forward(self, x):\n y = self.g_a(x)\n z = self.h_a(y)\n z_hat, z_likelihoods = self.entropy_bottleneck(z)\n params = self.h_s(z_hat)\n\n y_hat = self.gaussian_conditional.quantize(\n y, \"noise\" if self.training else \"dequantize\"\n )\n ctx_params = self.context_prediction(y_hat)\n gaussian_params = self.entropy_parameters(\n torch.cat((params, ctx_params), dim=1)\n )\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n _, y_likelihoods = self.gaussian_conditional(y, scales_hat, means=means_hat)\n x_hat = self.g_s(y_hat)\n\n return {\n \"x_hat\": x_hat,\n \"likelihoods\": {\"y\": y_likelihoods, \"z\": z_likelihoods},\n }\n\n @classmethod\n def from_state_dict(cls, state_dict):\n \"\"\"Return a new model instance from `state_dict`.\"\"\"\n N = state_dict[\"g_a.0.weight\"].size(0)\n M = state_dict[\"g_a.6.weight\"].size(0)\n net = cls(N, M)\n net.load_state_dict(state_dict)\n return net\n\n def compress(self, x):\n if next(self.parameters()).device != torch.device(\"cpu\"):\n warnings.warn(\n \"Inference on GPU is not recommended for the autoregressive \"\n \"models (the entropy coder is run sequentially on CPU).\"\n )\n\n y = self.g_a(x)\n z = self.h_a(y)\n\n z_strings = self.entropy_bottleneck.compress(z)\n z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])\n\n params = self.h_s(z_hat)\n\n s = 4 # scaling factor between z and y\n kernel_size = 5 # context prediction kernel size\n padding = (kernel_size - 1) // 2\n\n y_height = z_hat.size(2) * s\n y_width = z_hat.size(3) * s\n\n y_hat = F.pad(y, (padding, padding, padding, padding))\n\n y_strings = []\n for i in range(y.size(0)):\n string = self._compress_ar(\n y_hat[i : i + 1],\n params[i : i + 1],\n y_height,\n y_width,\n kernel_size,\n padding,\n )\n y_strings.append(string)\n\n return {\"strings\": [y_strings, z_strings], \"shape\": z.size()[-2:]}\n\n def _compress_ar(self, y_hat, params, height, width, kernel_size, padding):\n cdf = self.gaussian_conditional.quantized_cdf.tolist()\n cdf_lengths = self.gaussian_conditional.cdf_length.tolist()\n offsets = self.gaussian_conditional.offset.tolist()\n\n encoder = BufferedRansEncoder()\n symbols_list = []\n indexes_list = []\n\n # Warning, this is slow...\n # TODO: profile the calls to the bindings...\n masked_weight = self.context_prediction.weight * self.context_prediction.mask\n for h in range(height):\n for w in range(width):\n y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]\n ctx_p = F.conv2d(\n y_crop,\n masked_weight,\n bias=self.context_prediction.bias,\n )\n\n # 1x1 conv for the entropy parameters prediction network, so\n # we only keep the elements in the \"center\"\n p = params[:, :, h : h + 1, w : w + 1]\n gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))\n gaussian_params = gaussian_params.squeeze(3).squeeze(2)\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n\n y_crop = y_crop[:, :, padding, padding]\n y_q = self.gaussian_conditional.quantize(y_crop, \"symbols\", means_hat)\n y_hat[:, :, h + padding, w + padding] = y_q + means_hat\n\n symbols_list.extend(y_q.squeeze().tolist())\n indexes_list.extend(indexes.squeeze().tolist())\n\n encoder.encode_with_indexes(\n symbols_list, indexes_list, cdf, cdf_lengths, offsets\n )\n\n string = encoder.flush()\n return string\n\n def decompress(self, strings, shape):\n assert isinstance(strings, list) and len(strings) == 2\n\n if next(self.parameters()).device != torch.device(\"cpu\"):\n warnings.warn(\n \"Inference on GPU is not recommended for the autoregressive \"\n \"models (the entropy coder is run sequentially on CPU).\"\n )\n\n # FIXME: we don't respect the default entropy coder and directly call the\n # range ANS decoder\n\n z_hat = self.entropy_bottleneck.decompress(strings[1], shape)\n params = self.h_s(z_hat)\n\n s = 4 # scaling factor between z and y\n kernel_size = 5 # context prediction kernel size\n padding = (kernel_size - 1) // 2\n\n y_height = z_hat.size(2) * s\n y_width = z_hat.size(3) * s\n\n # initialize y_hat to zeros, and pad it so we can directly work with\n # sub-tensors of size (N, C, kernel size, kernel_size)\n y_hat = torch.zeros(\n (z_hat.size(0), self.M, y_height + 2 * padding, y_width + 2 * padding),\n device=z_hat.device,\n )\n\n for i, y_string in enumerate(strings[0]):\n self._decompress_ar(\n y_string,\n y_hat[i : i + 1],\n params[i : i + 1],\n y_height,\n y_width,\n kernel_size,\n padding,\n )\n\n y_hat = F.pad(y_hat, (-padding, -padding, -padding, -padding))\n x_hat = self.g_s(y_hat).clamp_(0, 1)\n return {\"x_hat\": x_hat}\n\n def _decompress_ar(\n self, y_string, y_hat, params, height, width, kernel_size, padding\n ):\n cdf = self.gaussian_conditional.quantized_cdf.tolist()\n cdf_lengths = self.gaussian_conditional.cdf_length.tolist()\n offsets = self.gaussian_conditional.offset.tolist()\n\n decoder = RansDecoder()\n decoder.set_stream(y_string)\n\n # Warning: this is slow due to the auto-regressive nature of the\n # decoding... See more recent publication where they use an\n # auto-regressive module on chunks of channels for faster decoding...\n for h in range(height):\n for w in range(width):\n # only perform the 5x5 convolution on a cropped tensor\n # centered in (h, w)\n y_crop = y_hat[:, :, h : h + kernel_size, w : w + kernel_size]\n ctx_p = F.conv2d(\n y_crop,\n self.context_prediction.weight,\n bias=self.context_prediction.bias,\n )\n # 1x1 conv for the entropy parameters prediction network, so\n # we only keep the elements in the \"center\"\n p = params[:, :, h : h + 1, w : w + 1]\n gaussian_params = self.entropy_parameters(torch.cat((p, ctx_p), dim=1))\n scales_hat, means_hat = gaussian_params.chunk(2, 1)\n\n indexes = self.gaussian_conditional.build_indexes(scales_hat)\n rv = decoder.decode_stream(\n indexes.squeeze().tolist(), cdf, cdf_lengths, offsets\n )\n rv = torch.Tensor(rv).reshape(1, -1, 1, 1)\n rv = self.gaussian_conditional.dequantize(rv, means_hat)\n\n hp = h + padding\n wp = w + padding\n y_hat[:, :, hp : hp + 1, wp : wp + 1] = rv\n", "id": "782157", "language": "Python", "matching_score": 4.445733070373535, "max_stars_count": 1, "path": "compressai/models/priors.py" }, { "content": "import warnings\n\nfrom typing import Any, Callable, List, Optional, Tuple, Union\n\nimport numpy as np\nimport scipy.stats\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch import Tensor\n\nfrom compressai._CXX import pmf_to_quantized_cdf as _pmf_to_quantized_cdf\nfrom compressai.ops import LowerBound\n\n\nclass _EntropyCoder:\n \"\"\"Proxy class to an actual entropy coder class.\"\"\"\n\n def __init__(self, method):\n if not isinstance(method, str):\n raise ValueError(f'Invalid method type \"{type(method)}\"')\n\n from compressai import available_entropy_coders\n\n if method not in available_entropy_coders():\n methods = \", \".join(available_entropy_coders())\n raise ValueError(\n f'Unknown entropy coder \"{method}\"' f\" (available: {methods})\"\n )\n\n if method == \"ans\":\n from compressai import ans\n\n encoder = ans.RansEncoder()\n decoder = ans.RansDecoder()\n elif method == \"rangecoder\":\n import range_coder\n\n encoder = range_coder.RangeEncoder()\n decoder = range_coder.RangeDecoder()\n\n self.name = method\n self._encoder = encoder\n self._decoder = decoder\n\n def encode_with_indexes(self, *args, **kwargs):\n return self._encoder.encode_with_indexes(*args, **kwargs)\n\n def decode_with_indexes(self, *args, **kwargs):\n return self._decoder.decode_with_indexes(*args, **kwargs)\n\n\ndef default_entropy_coder():\n from compressai import get_entropy_coder\n\n return get_entropy_coder()\n\n\ndef pmf_to_quantized_cdf(pmf: Tensor, precision: int = 16) -> Tensor:\n cdf = _pmf_to_quantized_cdf(pmf.tolist(), precision)\n cdf = torch.IntTensor(cdf)\n return cdf\n\n\ndef _forward(self, *args: Any) -> Any:\n raise NotImplementedError()\n\n\nclass EntropyModel(nn.Module):\n r\"\"\"Entropy model base class.\n\n Args:\n likelihood_bound (float): minimum likelihood bound\n entropy_coder (str, optional): set the entropy coder to use, use default\n one if None\n entropy_coder_precision (int): set the entropy coder precision\n \"\"\"\n\n def __init__(\n self,\n likelihood_bound: float = 1e-9,\n entropy_coder: Optional[str] = None,\n entropy_coder_precision: int = 16,\n ):\n super().__init__()\n\n if entropy_coder is None:\n entropy_coder = default_entropy_coder()\n self.entropy_coder = _EntropyCoder(entropy_coder)\n self.entropy_coder_precision = int(entropy_coder_precision)\n\n self.use_likelihood_bound = likelihood_bound > 0\n if self.use_likelihood_bound:\n self.likelihood_lower_bound = LowerBound(likelihood_bound)\n\n # to be filled on update()\n self.register_buffer(\"_offset\", torch.IntTensor())\n self.register_buffer(\"_quantized_cdf\", torch.IntTensor())\n self.register_buffer(\"_cdf_length\", torch.IntTensor())\n\n def __getstate__(self):\n attributes = self.__dict__.copy()\n attributes[\"entropy_coder\"] = self.entropy_coder.name\n return attributes\n\n def __setstate__(self, state):\n self.__dict__ = state\n self.entropy_coder = _EntropyCoder(self.__dict__.pop(\"entropy_coder\"))\n\n @property\n def offset(self):\n return self._offset\n\n @property\n def quantized_cdf(self):\n return self._quantized_cdf\n\n @property\n def cdf_length(self):\n return self._cdf_length\n\n # See: https://github.com/python/mypy/issues/8795\n forward: Callable[..., Any] = _forward\n\n def quantize(\n self, inputs: Tensor, mode: str, means: Optional[Tensor] = None\n ) -> Tensor:\n if mode not in (\"noise\", \"dequantize\", \"symbols\"):\n raise ValueError(f'Invalid quantization mode: \"{mode}\"')\n\n if mode == \"noise\":\n half = float(0.5)\n noise = torch.empty_like(inputs).uniform_(-half, half)\n inputs = inputs + noise\n return inputs\n\n outputs = inputs.clone()\n if means is not None:\n outputs -= means\n\n outputs = torch.round(outputs)\n\n if mode == \"dequantize\":\n if means is not None:\n outputs += means\n return outputs\n\n assert mode == \"symbols\", mode\n outputs = outputs.int()\n return outputs\n\n def _quantize(\n self, inputs: Tensor, mode: str, means: Optional[Tensor] = None\n ) -> Tensor:\n warnings.warn(\"_quantize is deprecated. Use quantize instead.\")\n return self.quantize(inputs, mode, means)\n\n @staticmethod\n def dequantize(\n inputs: Tensor, means: Optional[Tensor] = None, dtype: torch.dtype = torch.float\n ) -> Tensor:\n if means is not None:\n outputs = inputs.type_as(means)\n outputs += means\n else:\n outputs = inputs.type(dtype)\n return outputs\n\n @classmethod\n def _dequantize(cls, inputs: Tensor, means: Optional[Tensor] = None) -> Tensor:\n warnings.warn(\"_dequantize. Use dequantize instead.\")\n return cls.dequantize(inputs, means)\n\n def _pmf_to_cdf(self, pmf, tail_mass, pmf_length, max_length):\n cdf = torch.zeros(\n (len(pmf_length), max_length + 2), dtype=torch.int32, device=pmf.device\n )\n for i, p in enumerate(pmf):\n prob = torch.cat((p[: pmf_length[i]], tail_mass[i]), dim=0)\n _cdf = pmf_to_quantized_cdf(prob, self.entropy_coder_precision)\n cdf[i, : _cdf.size(0)] = _cdf\n return cdf\n\n def _check_cdf_size(self):\n if self._quantized_cdf.numel() == 0:\n raise ValueError(\"Uninitialized CDFs. Run update() first\")\n\n if len(self._quantized_cdf.size()) != 2:\n raise ValueError(f\"Invalid CDF size {self._quantized_cdf.size()}\")\n\n def _check_offsets_size(self):\n if self._offset.numel() == 0:\n raise ValueError(\"Uninitialized offsets. Run update() first\")\n\n if len(self._offset.size()) != 1:\n raise ValueError(f\"Invalid offsets size {self._offset.size()}\")\n\n def _check_cdf_length(self):\n if self._cdf_length.numel() == 0:\n raise ValueError(\"Uninitialized CDF lengths. Run update() first\")\n\n if len(self._cdf_length.size()) != 1:\n raise ValueError(f\"Invalid offsets size {self._cdf_length.size()}\")\n\n def compress(self, inputs, indexes, means=None):\n \"\"\"\n Compress input tensors to char strings.\n\n Args:\n inputs (torch.Tensor): input tensors\n indexes (torch.IntTensor): tensors CDF indexes\n means (torch.Tensor, optional): optional tensor means\n \"\"\"\n symbols = self.quantize(inputs, \"symbols\", means)\n\n if len(inputs.size()) < 2:\n raise ValueError(\n \"Invalid `inputs` size. Expected a tensor with at least 2 dimensions.\"\n )\n\n if inputs.size() != indexes.size():\n raise ValueError(\"`inputs` and `indexes` should have the same size.\")\n\n self._check_cdf_size()\n self._check_cdf_length()\n self._check_offsets_size()\n\n strings = []\n for i in range(symbols.size(0)):\n rv = self.entropy_coder.encode_with_indexes(\n symbols[i].reshape(-1).int().tolist(),\n indexes[i].reshape(-1).int().tolist(),\n self._quantized_cdf.tolist(),\n self._cdf_length.reshape(-1).int().tolist(),\n self._offset.reshape(-1).int().tolist(),\n )\n strings.append(rv)\n return strings\n\n def decompress(\n self,\n strings: str,\n indexes: torch.IntTensor,\n dtype: torch.dtype = torch.float,\n means: torch.Tensor = None,\n ):\n \"\"\"\n Decompress char strings to tensors.\n\n Args:\n strings (str): compressed tensors\n indexes (torch.IntTensor): tensors CDF indexes\n dtype (torch.dtype): type of dequantized output\n means (torch.Tensor, optional): optional tensor means\n \"\"\"\n\n if not isinstance(strings, (tuple, list)):\n raise ValueError(\"Invalid `strings` parameter type.\")\n\n if not len(strings) == indexes.size(0):\n raise ValueError(\"Invalid strings or indexes parameters\")\n\n if len(indexes.size()) < 2:\n raise ValueError(\n \"Invalid `indexes` size. Expected a tensor with at least 2 dimensions.\"\n )\n\n self._check_cdf_size()\n self._check_cdf_length()\n self._check_offsets_size()\n\n if means is not None:\n if means.size()[:2] != indexes.size()[:2]:\n raise ValueError(\"Invalid means or indexes parameters\")\n if means.size() != indexes.size():\n for i in range(2, len(indexes.size())):\n if means.size(i) != 1:\n raise ValueError(\"Invalid means parameters\")\n\n cdf = self._quantized_cdf\n outputs = cdf.new_empty(indexes.size())\n\n for i, s in enumerate(strings):\n values = self.entropy_coder.decode_with_indexes(\n s,\n indexes[i].reshape(-1).int().tolist(),\n cdf.tolist(),\n self._cdf_length.reshape(-1).int().tolist(),\n self._offset.reshape(-1).int().tolist(),\n )\n outputs[i] = torch.tensor(\n values, device=outputs.device, dtype=outputs.dtype\n ).reshape(outputs[i].size())\n outputs = self.dequantize(outputs, means, dtype)\n return outputs\n\n\nclass EntropyBottleneck(EntropyModel):\n r\"\"\"Entropy bottleneck layer, introduced by <NAME>, <NAME>, <NAME>,\n <NAME>, <NAME>, in `\"Variational image compression with a scale\n hyperprior\" <https://arxiv.org/abs/1802.01436>`_.\n\n This is a re-implementation of the entropy bottleneck layer in\n *tensorflow/compression*. See the original paper and the `tensorflow\n documentation\n <https://tensorflow.github.io/compression/docs/entropy_bottleneck.html>`__\n for an introduction.\n \"\"\"\n\n _offset: Tensor\n\n def __init__(\n self,\n channels: int,\n *args: Any,\n tail_mass: float = 1e-9,\n init_scale: float = 10,\n filters: Tuple[int, ...] = (3, 3, 3, 3),\n **kwargs: Any,\n ):\n super().__init__(*args, **kwargs)\n\n self.channels = int(channels)\n self.filters = tuple(int(f) for f in filters)\n self.init_scale = float(init_scale)\n self.tail_mass = float(tail_mass)\n\n # Create parameters\n filters = (1,) + self.filters + (1,)\n scale = self.init_scale ** (1 / (len(self.filters) + 1))\n channels = self.channels\n\n for i in range(len(self.filters) + 1):\n init = np.log(np.expm1(1 / scale / filters[i + 1]))\n matrix = torch.Tensor(channels, filters[i + 1], filters[i])\n matrix.data.fill_(init)\n self.register_parameter(f\"_matrix{i:d}\", nn.Parameter(matrix))\n\n bias = torch.Tensor(channels, filters[i + 1], 1)\n nn.init.uniform_(bias, -0.5, 0.5)\n self.register_parameter(f\"_bias{i:d}\", nn.Parameter(bias))\n\n if i < len(self.filters):\n factor = torch.Tensor(channels, filters[i + 1], 1)\n nn.init.zeros_(factor)\n self.register_parameter(f\"_factor{i:d}\", nn.Parameter(factor))\n\n self.quantiles = nn.Parameter(torch.Tensor(channels, 1, 3))\n init = torch.Tensor([-self.init_scale, 0, self.init_scale])\n self.quantiles.data = init.repeat(self.quantiles.size(0), 1, 1)\n\n target = np.log(2 / self.tail_mass - 1)\n self.register_buffer(\"target\", torch.Tensor([-target, 0, target]))\n\n def _get_medians(self) -> Tensor:\n medians = self.quantiles[:, :, 1:2]\n return medians\n\n def update(self, force: bool = False) -> bool:\n # Check if we need to update the bottleneck parameters, the offsets are\n # only computed and stored when the conditonal model is update()'d.\n if self._offset.numel() > 0 and not force:\n return False\n\n medians = self.quantiles[:, 0, 1]\n\n minima = medians - self.quantiles[:, 0, 0]\n minima = torch.ceil(minima).int()\n minima = torch.clamp(minima, min=0)\n\n maxima = self.quantiles[:, 0, 2] - medians\n maxima = torch.ceil(maxima).int()\n maxima = torch.clamp(maxima, min=0)\n\n self._offset = -minima\n\n pmf_start = medians - minima\n pmf_length = maxima + minima + 1\n\n max_length = pmf_length.max().item()\n device = pmf_start.device\n samples = torch.arange(max_length, device=device)\n\n samples = samples[None, :] + pmf_start[:, None, None]\n\n half = float(0.5)\n\n lower = self._logits_cumulative(samples - half, stop_gradient=True)\n upper = self._logits_cumulative(samples + half, stop_gradient=True)\n sign = -torch.sign(lower + upper)\n pmf = torch.abs(torch.sigmoid(sign * upper) - torch.sigmoid(sign * lower))\n\n pmf = pmf[:, 0, :]\n tail_mass = torch.sigmoid(lower[:, 0, :1]) + torch.sigmoid(-upper[:, 0, -1:])\n\n quantized_cdf = self._pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)\n self._quantized_cdf = quantized_cdf\n self._cdf_length = pmf_length + 2\n return True\n\n def loss(self) -> Tensor:\n logits = self._logits_cumulative(self.quantiles, stop_gradient=True)\n loss = torch.abs(logits - self.target).sum()\n return loss\n\n def _logits_cumulative(self, inputs: Tensor, stop_gradient: bool) -> Tensor:\n # TorchScript not yet working (nn.Mmodule indexing not supported)\n logits = inputs\n for i in range(len(self.filters) + 1):\n matrix = getattr(self, f\"_matrix{i:d}\")\n if stop_gradient:\n matrix = matrix.detach()\n logits = torch.matmul(F.softplus(matrix), logits)\n\n bias = getattr(self, f\"_bias{i:d}\")\n if stop_gradient:\n bias = bias.detach()\n logits += bias\n\n if i < len(self.filters):\n factor = getattr(self, f\"_factor{i:d}\")\n if stop_gradient:\n factor = factor.detach()\n logits += torch.tanh(factor) * torch.tanh(logits)\n return logits\n\n @torch.jit.unused\n def _likelihood(self, inputs: Tensor) -> Tensor:\n half = float(0.5)\n v0 = inputs - half\n v1 = inputs + half\n lower = self._logits_cumulative(v0, stop_gradient=False)\n upper = self._logits_cumulative(v1, stop_gradient=False)\n sign = -torch.sign(lower + upper)\n sign = sign.detach()\n likelihood = torch.abs(\n torch.sigmoid(sign * upper) - torch.sigmoid(sign * lower)\n )\n return likelihood\n\n def forward(\n self, x: Tensor, training: Optional[bool] = None\n ) -> Tuple[Tensor, Tensor]:\n if training is None:\n training = self.training\n\n if not torch.jit.is_scripting():\n # x from B x C x ... to C x B x ...\n perm = np.arange(len(x.shape))\n perm[0], perm[1] = perm[1], perm[0]\n # Compute inverse permutation\n inv_perm = np.arange(len(x.shape))[np.argsort(perm)]\n else:\n # TorchScript in 2D for static inference\n # Convert to (channels, ... , batch) format\n perm = (1, 2, 3, 0)\n inv_perm = (3, 0, 1, 2)\n\n x = x.permute(*perm).contiguous()\n shape = x.size()\n values = x.reshape(x.size(0), 1, -1)\n\n # Add noise or quantize\n\n outputs = self.quantize(\n values, \"noise\" if training else \"dequantize\", self._get_medians()\n )\n\n if not torch.jit.is_scripting():\n likelihood = self._likelihood(outputs)\n if self.use_likelihood_bound:\n likelihood = self.likelihood_lower_bound(likelihood)\n else:\n # TorchScript not yet supported\n likelihood = torch.zeros_like(outputs)\n\n # Convert back to input tensor shape\n outputs = outputs.reshape(shape)\n outputs = outputs.permute(*inv_perm).contiguous()\n\n likelihood = likelihood.reshape(shape)\n likelihood = likelihood.permute(*inv_perm).contiguous()\n\n return outputs, likelihood\n\n @staticmethod\n def _build_indexes(size):\n dims = len(size)\n N = size[0]\n C = size[1]\n\n view_dims = np.ones((dims,), dtype=np.int64)\n view_dims[1] = -1\n indexes = torch.arange(C).view(*view_dims)\n indexes = indexes.int()\n\n return indexes.repeat(N, 1, *size[2:])\n\n @staticmethod\n def _extend_ndims(tensor, n):\n return tensor.reshape(-1, *([1] * n)) if n > 0 else tensor.reshape(-1)\n\n def compress(self, x):\n indexes = self._build_indexes(x.size())\n medians = self._get_medians().detach()\n spatial_dims = len(x.size()) - 2\n medians = self._extend_ndims(medians, spatial_dims)\n medians = medians.expand(x.size(0), *([-1] * (spatial_dims + 1)))\n return super().compress(x, indexes, medians)\n\n def decompress(self, strings, size):\n output_size = (len(strings), self._quantized_cdf.size(0), *size)\n indexes = self._build_indexes(output_size).to(self._quantized_cdf.device)\n medians = self._extend_ndims(self._get_medians().detach(), len(size))\n medians = medians.expand(len(strings), *([-1] * (len(size) + 1)))\n return super().decompress(strings, indexes, medians.dtype, medians)\n\n\nclass GaussianConditional(EntropyModel):\n r\"\"\"Gaussian conditional layer, introduced by <NAME>, <NAME>, <NAME>,\n <NAME>, <NAME>, in `\"Variational image compression with a scale\n hyperprior\" <https://arxiv.org/abs/1802.01436>`_.\n\n This is a re-implementation of the Gaussian conditional layer in\n *tensorflow/compression*. See the `tensorflow documentation\n <https://tensorflow.github.io/compression/docs/api_docs/python/tfc/GaussianConditional.html>`__\n for more information.\n \"\"\"\n\n def __init__(\n self,\n scale_table: Optional[Union[List, Tuple]],\n *args: Any,\n scale_bound: float = 0.11,\n tail_mass: float = 1e-9,\n **kwargs: Any,\n ):\n super().__init__(*args, **kwargs)\n\n if not isinstance(scale_table, (type(None), list, tuple)):\n raise ValueError(f'Invalid type for scale_table \"{type(scale_table)}\"')\n\n if isinstance(scale_table, (list, tuple)) and len(scale_table) < 1:\n raise ValueError(f'Invalid scale_table length \"{len(scale_table)}\"')\n\n if scale_table and (\n scale_table != sorted(scale_table) or any(s <= 0 for s in scale_table)\n ):\n raise ValueError(f'Invalid scale_table \"({scale_table})\"')\n\n self.tail_mass = float(tail_mass)\n if scale_bound is None and scale_table:\n scale_bound = self.scale_table[0]\n if scale_bound <= 0:\n raise ValueError(\"Invalid parameters\")\n self.lower_bound_scale = LowerBound(scale_bound)\n\n self.register_buffer(\n \"scale_table\",\n self._prepare_scale_table(scale_table) if scale_table else torch.Tensor(),\n )\n\n self.register_buffer(\n \"scale_bound\",\n torch.Tensor([float(scale_bound)]) if scale_bound is not None else None,\n )\n\n @staticmethod\n def _prepare_scale_table(scale_table):\n return torch.Tensor(tuple(float(s) for s in scale_table))\n\n def _standardized_cumulative(self, inputs: Tensor) -> Tensor:\n half = float(0.5)\n const = float(-(2 ** -0.5))\n # Using the complementary error function maximizes numerical precision.\n return half * torch.erfc(const * inputs)\n\n @staticmethod\n def _standardized_quantile(quantile):\n return scipy.stats.norm.ppf(quantile)\n\n def update_scale_table(self, scale_table, force=False):\n # Check if we need to update the gaussian conditional parameters, the\n # offsets are only computed and stored when the conditonal model is\n # updated.\n if self._offset.numel() > 0 and not force:\n return False\n device = self.scale_table.device\n self.scale_table = self._prepare_scale_table(scale_table).to(device)\n self.update()\n return True\n\n def update(self):\n multiplier = -self._standardized_quantile(self.tail_mass / 2)\n pmf_center = torch.ceil(self.scale_table * multiplier).int()\n pmf_length = 2 * pmf_center + 1\n max_length = torch.max(pmf_length).item()\n\n device = pmf_center.device\n samples = torch.abs(\n torch.arange(max_length, device=device).int() - pmf_center[:, None]\n )\n samples_scale = self.scale_table.unsqueeze(1)\n samples = samples.float()\n samples_scale = samples_scale.float()\n upper = self._standardized_cumulative((0.5 - samples) / samples_scale)\n lower = self._standardized_cumulative((-0.5 - samples) / samples_scale)\n pmf = upper - lower\n\n tail_mass = 2 * lower[:, :1]\n\n quantized_cdf = torch.Tensor(len(pmf_length), max_length + 2)\n quantized_cdf = self._pmf_to_cdf(pmf, tail_mass, pmf_length, max_length)\n self._quantized_cdf = quantized_cdf\n self._offset = -pmf_center\n self._cdf_length = pmf_length + 2\n\n def _likelihood(\n self, inputs: Tensor, scales: Tensor, means: Optional[Tensor] = None\n ) -> Tensor:\n half = float(0.5)\n\n if means is not None:\n values = inputs - means\n else:\n values = inputs\n\n scales = self.lower_bound_scale(scales)\n\n values = torch.abs(values)\n upper = self._standardized_cumulative((half - values) / scales)\n lower = self._standardized_cumulative((-half - values) / scales)\n likelihood = upper - lower\n\n return likelihood\n\n def forward(\n self,\n inputs: Tensor,\n scales: Tensor,\n means: Optional[Tensor] = None,\n training: Optional[bool] = None,\n ) -> Tuple[Tensor, Tensor]:\n if training is None:\n training = self.training\n outputs = self.quantize(inputs, \"noise\" if training else \"dequantize\", means)\n likelihood = self._likelihood(outputs, scales, means)\n if self.use_likelihood_bound:\n likelihood = self.likelihood_lower_bound(likelihood)\n return outputs, likelihood\n\n def build_indexes(self, scales: Tensor) -> Tensor:\n scales = self.lower_bound_scale(scales)\n indexes = scales.new_full(scales.size(), len(self.scale_table) - 1).int()\n for s in self.scale_table[:-1]:\n indexes -= (scales <= s).int()\n return indexes\n", "id": "8236536", "language": "Python", "matching_score": 4.027756214141846, "max_stars_count": 2, "path": "compressai/entropy_models/entropy_models.py" }, { "content": "import tensorflow.compat.v1 as tf\nimport tensorflow_compression as tfc\n\n\nclass RoundingEntropyBottleneck(tfc.EntropyBottleneck):\n def __init__(\n self,\n init_scale=10,\n filters=(3, 3, 3),\n data_format=\"channels_last\",\n approx=\"STE-Q\",\n **kwargs\n ):\n super(RoundingEntropyBottleneck, self).__init__(\n init_scale=init_scale, filters=filters, data_format=data_format, **kwargs\n )\n assert approx in {\"STE-Q\", \"St-Q\", \"SGA-Q\", \"U-Q\"}\n self.approx = approx\n self.tau = 0.5\n\n def _quantize(self, inputs, mode):\n # Add noise or quantize (and optionally dequantize in one step).\n half = tf.constant(0.5, dtype=self.dtype)\n _, _, _, input_slices = self._get_input_dims()\n\n medians = self._medians[input_slices]\n outputs = tf.math.floor(inputs + (half - medians))\n outputs = tf.cast(outputs, self.dtype)\n\n if mode == \"noise\":\n if self.approx == \"STE-Q\":\n return tf.stop_gradient(outputs + medians - inputs) + inputs\n elif self.approx in {\"St-Q\", \"SGA-Q\"}:\n diff = (inputs - medians) - tf.floor(inputs - medians)\n if self.approx == \"St-Q\":\n probability = diff\n else:\n likelihood_up = tf.exp(-tf.atanh(diff) / self.tau)\n likelihood_down = tf.exp(-tf.atanh(1 - diff) / self.tau)\n probability = likelihood_down / (likelihood_up + likelihood_down)\n delta = tf.cast(\n (probability >= tf.random.uniform(tf.shape(probability))),\n tf.float32,\n )\n outputs = tf.floor(inputs - medians) + delta\n return tf.stop_gradient(outputs + medians - inputs) + inputs\n elif self.approx == \"U-Q\":\n # random value, shape: (N, 1, 1, 1)\n noise = tf.random.uniform(tf.shape(inputs), -half, half)[\n :, 0:1, 0:1, 0:1\n ]\n outputs = tf.round(inputs + noise) - noise\n return tf.stop_gradient(outputs - inputs) + inputs\n else:\n raise NotImplementedError\n elif mode == \"dequantize\":\n return outputs + medians\n else:\n assert mode == \"symbols\", mode\n outputs = tf.cast(outputs, tf.int32)\n return outputs\n", "id": "6935501", "language": "Python", "matching_score": 0.2733365297317505, "max_stars_count": 0, "path": "module.py" }, { "content": "r\"\"\"Implemetation of Total Variation metric, based on article\n remi.flamary.com/demos/proxtv.html and www.wikiwand.com/en/Total_variation_denoising\n\"\"\"\n\nimport torch\nfrom torch.nn.modules.loss import _Loss\nfrom piq.utils import _validate_input, _reduce\n\n\ndef total_variation(x: torch.Tensor, reduction: str = 'mean', norm_type: str = 'l2') -> torch.Tensor:\n r\"\"\"Compute Total Variation metric\n\n Args:\n x: Tensor. Shape :math:`(N, C, H, W)`.\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``\n norm_type: ``'l1'`` | ``'l2'`` | ``'l2_squared'``,\n defines which type of norm to implement, isotropic or anisotropic.\n\n Returns:\n Total variation of a given tensor\n\n References:\n https://www.wikiwand.com/en/Total_variation_denoising\n\n https://remi.flamary.com/demos/proxtv.html\n \"\"\"\n _validate_input([x, ], dim_range=(4, 4), data_range=(0, -1))\n\n if norm_type == 'l1':\n w_variance = torch.sum(torch.abs(x[:, :, :, 1:] - x[:, :, :, :-1]), dim=[1, 2, 3])\n h_variance = torch.sum(torch.abs(x[:, :, 1:, :] - x[:, :, :-1, :]), dim=[1, 2, 3])\n score = (h_variance + w_variance)\n elif norm_type == 'l2':\n w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3])\n h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3])\n score = torch.sqrt(h_variance + w_variance)\n elif norm_type == 'l2_squared':\n w_variance = torch.sum(torch.pow(x[:, :, :, 1:] - x[:, :, :, :-1], 2), dim=[1, 2, 3])\n h_variance = torch.sum(torch.pow(x[:, :, 1:, :] - x[:, :, :-1, :], 2), dim=[1, 2, 3])\n score = (h_variance + w_variance)\n else:\n raise ValueError(\"Incorrect norm type, should be one of {'l1', 'l2', 'l2_squared'}\")\n\n return _reduce(score, reduction)\n\n\nclass TVLoss(_Loss):\n r\"\"\"Creates a criterion that measures the total variation of the\n the given input :math:`x`.\n\n\n If :attr:`norm_type` set to ``'l2'`` the loss can be described as:\n\n .. math::\n TV(x) = \\sum_{N}\\sqrt{\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}|^2 +\n |x_{:, :, i, j+1} - x_{:, :, i, j}|^2)}\n\n Else if :attr:`norm_type` set to ``'l1'``:\n\n .. math::\n TV(x) = \\sum_{N}\\sum_{H, W, C}(|x_{:, :, i+1, j} - x_{:, :, i, j}| +\n |x_{:, :, i, j+1} - x_{:, :, i, j}|)\n\n where :math:`N` is the batch size, `C` is the channel size.\n\n Args:\n norm_type: one of ``'l1'`` | ``'l2'`` | ``'l2_squared'``\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``\n\n Examples:\n\n >>> loss = TVLoss()\n >>> x = torch.rand(3, 3, 256, 256, requires_grad=True)\n >>> output = loss(x)\n >>> output.backward()\n\n References:\n https://www.wikiwand.com/en/Total_variation_denoising\n\n https://remi.flamary.com/demos/proxtv.html\n \"\"\"\n def __init__(self, norm_type: str = 'l2', reduction: str = 'mean'):\n super().__init__()\n\n self.norm_type = norm_type\n self.reduction = reduction\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computation of Total Variation (TV) index as a loss function.\n\n Args:\n x: An input tensor. Shape :math:`(N, C, H, W)`.\n\n Returns:\n Value of TV loss to be minimized.\n \"\"\"\n score = total_variation(x, reduction=self.reduction, norm_type=self.norm_type)\n return score\n", "id": "2399787", "language": "Python", "matching_score": 2.644429922103882, "max_stars_count": 471, "path": "piq/tv.py" }, { "content": "import torch\nimport pytest\n\nfrom piq import TVLoss, total_variation\n\n\[email protected](scope='module')\ndef x() -> torch.Tensor:\n return torch.rand(4, 3, 256, 256)\n\n\n# ================== Test method: `total_variation` ==================\ndef test_tv_works(x) -> None:\n for mode in ['l2', 'l1', 'l2_squared']:\n measure = total_variation(x, norm_type=mode, reduction='none')\n assert (measure > 0).all()\n with pytest.raises(ValueError):\n wrong_mode = 'DEADBEEF'\n total_variation(x, norm_type=wrong_mode)\n\n\n# ================== Test class: `TVLoss` ==================\ndef test_tv_loss_init() -> None:\n TVLoss()\n\n\ndef test_tv_loss_greater_than_zero(x) -> None:\n for mode in ['l2', 'l1', 'l2_squared']:\n res = TVLoss(norm_type=mode)(x)\n assert res > 0\n\n\ndef test_tv_loss_raises_if_tensors_have_different_types() -> None:\n wrong_type_ = list(range(10))\n with pytest.raises(AssertionError):\n TVLoss()(wrong_type_)\n\n\ndef test_tv_loss_check_available_dimensions() -> None:\n custom_x = torch.rand(256, 256)\n for _ in range(10):\n if custom_x.dim() == 4:\n TVLoss()(custom_x)\n else:\n with pytest.raises(AssertionError):\n TVLoss()(custom_x)\n custom_x.unsqueeze_(0)\n\n\ndef test_tv_loss_for_known_answer():\n # Tensor with `l1` TV = (10 - 1) * 2 * 2 = 36\n x = torch.eye(10).reshape((1, 1, 10, 10))\n x.requires_grad_()\n loss = TVLoss(norm_type='l1')\n measure = loss(x)\n measure.backward()\n assert measure == 36., f'TV for this tensors must be 36., got {measure}'\n assert torch.isfinite(x.grad).all(), f'Expected finite gradient values, got {x.grad}'\n", "id": "6165611", "language": "Python", "matching_score": 1.1668556928634644, "max_stars_count": 471, "path": "tests/test_tv.py" }, { "content": "import torch\nimport pytest\nfrom skimage.io import imread\nimport numpy as np\nfrom typing import Any, Tuple\n\nfrom piq import gmsd, multi_scale_gmsd, GMSDLoss, MultiScaleGMSDLoss\n\nLEAF_VARIABLE_ERROR_MESSAGE = 'Expected non None gradient of leaf variable'\n\n\[email protected](scope='module')\ndef x() -> torch.Tensor:\n return torch.rand(2, 3, 96, 96)\n\n\[email protected](scope='module')\ndef y() -> torch.Tensor:\n return torch.rand(2, 3, 96, 96)\n\n\nx_image = [\n torch.tensor(imread('tests/assets/goldhill_jpeg.gif'), dtype=torch.float32).unsqueeze(0).unsqueeze(0),\n torch.tensor(imread('tests/assets/i01_01_5.bmp'), dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)\n]\n\ny_image = [\n torch.tensor(imread('tests/assets/goldhill.gif'), dtype=torch.float32).unsqueeze(0).unsqueeze(0),\n torch.tensor(imread('tests/assets/I01.BMP'), dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)\n]\n\ny_score = [\n torch.tensor(0.138012587141798),\n torch.tensor(0.094124655829098)\n]\n\n\[email protected](params=zip(x_image, y_image, y_score))\ndef input_images_score(request: Any) -> Any:\n return request.param\n\n\n# ================== Test function: `gmsd` ==================\ndef test_gmsd_forward(x, y, device: str) -> None:\n gmsd(x.to(device), y.to(device))\n\n\ndef test_gmsd_zero_for_equal_tensors(x, device: str) -> None:\n y = x.clone()\n measure = gmsd(x.to(device), y.to(device))\n assert measure.abs() <= 1e-6, f'GMSD for equal tensors must be 0, got {measure}'\n\n\ndef test_gmsd_raises_if_tensors_have_different_types(y, device: str) -> None:\n wrong_type_x = [list(range(10)), np.arange(10)]\n for wrong_type_x in wrong_type_x:\n with pytest.raises(AssertionError):\n gmsd(wrong_type_x, y.to(device))\n\n\[email protected](\n \"data_range\", [128, 255],\n)\ndef test_gmsd_supports_different_data_ranges(x, y, data_range, device: str) -> None:\n x_scaled = (x * data_range).type(torch.uint8)\n y_scaled = (y * data_range).type(torch.uint8)\n measure_scaled = gmsd(x_scaled.to(device), y_scaled.to(device), data_range=data_range)\n measure = gmsd(\n x_scaled.to(device) / float(data_range),\n y_scaled.to(device) / float(data_range),\n data_range=1.0\n )\n diff = torch.abs(measure_scaled - measure)\n assert diff <= 1e-6, f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_gmsd_fails_for_incorrect_data_range(x, y, device: str) -> None:\n # Scale to [0, 255]\n x_scaled = (x * 255).type(torch.uint8)\n y_scaled = (y * 255).type(torch.uint8)\n with pytest.raises(AssertionError):\n gmsd(x_scaled.to(device), y_scaled.to(device), data_range=1.0)\n\n\ndef test_gmsd_supports_greyscale_tensors(device: str) -> None:\n y = torch.ones(2, 1, 96, 96)\n x = torch.zeros(2, 1, 96, 96)\n gmsd(x.to(device), y.to(device))\n\n\ndef test_gmsd_modes(x, y, device: str) -> None:\n for reduction in ['mean', 'sum', 'none']:\n gmsd(x.to(device), y.to(device), reduction=reduction)\n\n for reduction in ['DEADBEEF', 'random']:\n with pytest.raises(ValueError):\n gmsd(x.to(device), y.to(device), reduction=reduction)\n\n\ndef test_gmsd_compare_with_matlab(input_images_score: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],\n device: str) -> None:\n x, y, y_value = input_images_score\n score = gmsd(x=x.to(device), y=y.to(device), data_range=255)\n assert torch.isclose(score, y_value.to(score)), f'The estimated value must be equal to MATLAB provided one, ' \\\n f'got {score.item():.8f}, while MATLAB equals {y_value}'\n\n\n# ================== Test class: `GMSDLoss` ==================\ndef test_gmsd_loss_forward_backward(x, y, device: str) -> None:\n x.requires_grad_()\n loss_value = GMSDLoss()(x.to(device), y.to(device))\n loss_value.backward()\n assert torch.isfinite(x.grad).all(), LEAF_VARIABLE_ERROR_MESSAGE\n\n\ndef test_gmsd_loss_zero_for_equal_tensors(x, device: str) -> None:\n loss = GMSDLoss()\n y = x.clone()\n measure = loss(x.to(device), y.to(device))\n assert measure.abs() <= 1e-6, f'GMSD for equal tensors must be 0, got {measure}'\n\n\ndef test_gmsd_loss_raises_if_tensors_have_different_types(y, device: str) -> None:\n wrong_type_x = [list(range(10)), np.arange(10)]\n for wrong_x in wrong_type_x:\n with pytest.raises(AssertionError):\n GMSDLoss()(wrong_x, y.to(device))\n\n\[email protected](\n \"data_range\", [128, 255],\n)\ndef test_gmsd_loss_supports_different_data_ranges(x, y, data_range, device: str) -> None:\n x_scaled = (x * data_range).type(torch.uint8)\n y_scaled = (y * data_range).type(torch.uint8)\n loss_scaled = GMSDLoss(data_range=data_range)\n measure_scaled = loss_scaled(x_scaled.to(device), y_scaled.to(device))\n\n loss = GMSDLoss()\n measure = loss(\n x_scaled.to(device) / float(data_range),\n y_scaled.to(device) / float(data_range),\n )\n diff = torch.abs(measure_scaled - measure)\n assert diff <= 1e-6, f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_gmsd_loss_supports_greyscale_tensors(device: str) -> None:\n loss = GMSDLoss()\n y = torch.ones(2, 1, 96, 96)\n x = torch.zeros(2, 1, 96, 96)\n loss(x.to(device), y.to(device))\n\n\ndef test_gmsd_loss_modes(x, y, device: str) -> None:\n for reduction in ['mean', 'sum', 'none']:\n GMSDLoss(reduction=reduction)(x.to(device), y.to(device))\n\n for reduction in ['DEADBEEF', 'random']:\n with pytest.raises(ValueError):\n GMSDLoss(reduction=reduction)(x.to(device), y.to(device))\n\n\n# ================== Test function: `multi_scale_gmsd` ==================\ndef test_multi_scale_gmsd_forward_backward(x, y, device: str) -> None:\n multi_scale_gmsd(x.to(device), y.to(device), chromatic=True)\n\n\ndef test_multi_scale_gmsd_zero_for_equal_tensors(x, device: str) -> None:\n y = x.clone()\n measure = multi_scale_gmsd(x.to(device), y.to(device))\n assert measure.abs() <= 1e-6, f'MultiScaleGMSD for equal tensors must be 0, got {measure}'\n\n\[email protected](\n \"data_range\", [128, 255],\n)\ndef test_multi_scale_gmsd_supports_different_data_ranges(x, y, data_range, device: str) -> None:\n x_scaled = (x * data_range).type(torch.uint8)\n y_scaled = (y * data_range).type(torch.uint8)\n measure_scaled = multi_scale_gmsd(x_scaled.to(device), y_scaled.to(device), data_range=data_range)\n measure = multi_scale_gmsd(\n x_scaled.to(device) / float(data_range),\n y_scaled.to(device) / float(data_range),\n data_range=1.0\n )\n diff = torch.abs(measure_scaled - measure)\n assert diff <= 1e-6, f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_multi_scale_gmsd_fails_for_incorrect_data_range(x, y, device: str) -> None:\n # Scale to [0, 255]\n x_scaled = (x * 255).type(torch.uint8)\n y_scaled = (y * 255).type(torch.uint8)\n with pytest.raises(AssertionError):\n multi_scale_gmsd(x_scaled.to(device), y_scaled.to(device), data_range=1.0)\n\n\ndef test_multi_scale_gmsd_supports_greyscale_tensors(device: str) -> None:\n y = torch.ones(2, 1, 96, 96)\n x = torch.zeros(2, 1, 96, 96)\n multi_scale_gmsd(x.to(device), y.to(device))\n\n\ndef test_multi_scale_gmsd_fails_for_greyscale_tensors_chromatic_flag(device: str) -> None:\n y = torch.ones(2, 1, 96, 96)\n x = torch.zeros(2, 1, 96, 96)\n with pytest.raises(AssertionError):\n multi_scale_gmsd(x.to(device), y.to(device), chromatic=True)\n\n\ndef test_multi_scale_gmsd_supports_custom_weights(x, y, device: str) -> None:\n multi_scale_gmsd(x.to(device), y.to(device), scale_weights=torch.tensor([3., 4., 2., 1., 2.]))\n\n\ndef test_multi_scale_gmsd_raise_exception_for_small_images(device: str) -> None:\n y = torch.ones(3, 1, 32, 32)\n x = torch.zeros(3, 1, 32, 32)\n with pytest.raises(ValueError):\n multi_scale_gmsd(x.to(device), y.to(device), scale_weights=torch.tensor([3., 4., 2., 1., 2.]))\n\n\ndef test_multi_scale_gmsd_modes(x, y, device: str) -> None:\n for reduction in ['mean', 'sum', 'none']:\n multi_scale_gmsd(x.to(device), y.to(device), reduction=reduction)\n\n for reduction in ['DEADBEEF', 'random']:\n with pytest.raises(ValueError):\n multi_scale_gmsd(x.to(device), y.to(device), reduction=reduction)\n\n\n# ================== Test class: `MultiScaleGMSDLoss` ==================\ndef test_multi_scale_gmsd_loss_forward_backward(x, y, device: str) -> None:\n x.requires_grad_()\n loss_value = MultiScaleGMSDLoss(chromatic=True)(x.to(device), y.to(device))\n loss_value.backward()\n assert torch.isfinite(x.grad).all(), LEAF_VARIABLE_ERROR_MESSAGE\n\n\ndef test_multi_scale_gmsd_loss_zero_for_equal_tensors(x, device: str) -> None:\n loss = MultiScaleGMSDLoss()\n y = x.clone()\n measure = loss(x.to(device), y.to(device))\n assert measure.abs() <= 1e-6, f'MultiScaleGMSD for equal tensors must be 0, got {measure}'\n\n\ndef test_multi_scale_gmsd_loss_supports_different_data_ranges(x, y, device: str) -> None:\n x_255 = x * 255\n y_255 = y * 255\n loss = MultiScaleGMSDLoss()\n measure = loss(x.to(device), y.to(device))\n loss_255 = MultiScaleGMSDLoss(data_range=255)\n measure_255 = loss_255(x_255.to(device), y_255.to(device))\n diff = torch.abs(measure_255 - measure)\n assert diff <= 1e-4, f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_multi_scale_gmsd_loss_supports_greyscale_tensors(device: str) -> None:\n loss = MultiScaleGMSDLoss()\n y = torch.ones(2, 1, 96, 96)\n x = torch.zeros(2, 1, 96, 96)\n loss(x.to(device), y.to(device))\n\n\ndef test_multi_scale_gmsd_loss_fails_for_greyscale_tensors_chromatic_flag(device: str) -> None:\n loss = MultiScaleGMSDLoss(chromatic=True)\n y = torch.ones(2, 1, 96, 96)\n x = torch.zeros(2, 1, 96, 96)\n with pytest.raises(AssertionError):\n loss(x.to(device), y.to(device))\n\n\ndef test_multi_scale_gmsd_loss_supports_custom_weights(x, y, device: str) -> None:\n loss = MultiScaleGMSDLoss(scale_weights=torch.tensor([3., 4., 2., 1., 2.]))\n loss(x.to(device), y.to(device))\n\n\ndef test_multi_scale_gmsd_loss_raise_exception_for_small_images(device: str) -> None:\n y = torch.ones(3, 1, 32, 32)\n x = torch.zeros(3, 1, 32, 32)\n loss = MultiScaleGMSDLoss(scale_weights=torch.tensor([3., 4., 2., 1., 2.]))\n with pytest.raises(ValueError):\n loss(x.to(device), y.to(device))\n\n\ndef test_multi_scale_loss_gmsd_modes(x, y, device: str) -> None:\n for reduction in ['mean', 'sum', 'none']:\n MultiScaleGMSDLoss(reduction=reduction)(x.to(device), y.to(device))\n\n for reduction in ['DEADBEEF', 'random']:\n with pytest.raises(ValueError):\n MultiScaleGMSDLoss(reduction=reduction)(x.to(device), y.to(device))\n", "id": "12106826", "language": "Python", "matching_score": 3.8539316654205322, "max_stars_count": 471, "path": "tests/test_gmsd.py" }, { "content": "import torch\nimport itertools\nimport pytest\nimport tensorflow as tf\nfrom piq import MultiScaleSSIMLoss, multi_scale_ssim\nfrom typing import Tuple, List, Any\nfrom skimage.io import imread\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef raise_nothing(enter_result=None):\n yield enter_result\n\n\[email protected](scope='module')\ndef x() -> torch.Tensor:\n return torch.rand(3, 3, 161, 161)\n\n\[email protected](scope='module')\ndef y() -> torch.Tensor:\n return torch.rand(3, 3, 161, 161)\n\n\[email protected](params=[(3, 3, 161, 161), (3, 3, 161, 161, 2)], scope='module')\ndef x_y_4d_5d(request: Any) -> Tuple[torch.Tensor, torch.Tensor]:\n return torch.rand(request.param), torch.rand(request.param)\n\n\[email protected](params=[(3, 3, 161, 161), (3, 3, 161, 161, 2)], scope='module')\ndef ones_zeros_4d_5d(request: Any) -> Tuple[torch.Tensor, torch.Tensor]:\n return torch.ones(request.param), torch.zeros(request.param)\n\n\[email protected](scope='module')\ndef test_images() -> List[Tuple[torch.Tensor, torch.Tensor]]:\n x_grey = torch.tensor(imread('tests/assets/goldhill_jpeg.gif')).unsqueeze(0).unsqueeze(0)\n y_grey = torch.tensor(imread('tests/assets/goldhill.gif')).unsqueeze(0).unsqueeze(0)\n x_rgb = torch.tensor(imread('tests/assets/I01.BMP')).permute(2, 0, 1).unsqueeze(0)\n y_rgb = torch.tensor(imread('tests/assets/i01_01_5.bmp')).permute(2, 0, 1).unsqueeze(0)\n return [(x_grey, y_grey), (x_rgb, y_rgb)]\n\n\[email protected](scope='module')\ndef scale_weights() -> torch.Tensor:\n return torch.tensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333])\n\n\n# ================== Test function: `multi_scale_ssim` ==================\ndef test_multi_scale_ssim_symmetry(x_y_4d_5d, device: str) -> None:\n x = x_y_4d_5d[0].to(device)\n y = x_y_4d_5d[1].to(device)\n measure = multi_scale_ssim(x, y, data_range=1., reduction='none')\n reverse_measure = multi_scale_ssim(y, x, data_range=1., reduction='none')\n assert torch.allclose(measure, reverse_measure), f'Expect: MS-SSIM(a, b) == MSSSIM(b, a), '\\\n f'got {measure} != {reverse_measure}'\n\n\ndef test_multi_scale_ssim_measure_is_one_for_equal_tensors(x: torch.Tensor, device: str) -> None:\n x = x.to(device)\n y = x.clone()\n measure = multi_scale_ssim(y, x, data_range=1.)\n assert torch.allclose(measure, torch.ones_like(measure)), \\\n f'If equal tensors are passed MS-SSIM must be equal to 1 ' \\\n f'(considering floating point operation error up to 1 * 10^-6), got {measure + 1}'\n\n\ndef test_multi_scale_ssim_measure_is_less_or_equal_to_one(ones_zeros_4d_5d: Tuple[torch.Tensor, torch.Tensor],\n device: str) -> None:\n # Create two maximally different tensors.\n ones = ones_zeros_4d_5d[0].to(device)\n zeros = ones_zeros_4d_5d[1].to(device)\n measure = multi_scale_ssim(ones, zeros, data_range=1.)\n assert (measure <= 1).all(), f'MS-SSIM must be <= 1, got {measure}'\n\n\ndef test_multi_scale_ssim_raises_if_tensors_have_different_shapes(x_y_4d_5d, device: str) -> None:\n y = x_y_4d_5d[1].to(device)\n dims = [[3], [2, 3], [161, 162], [161, 162]]\n if y.dim() == 5:\n dims += [[2, 3]]\n for size in list(itertools.product(*dims)):\n wrong_shape_x = torch.rand(size).to(y)\n if wrong_shape_x.size() == y.size():\n multi_scale_ssim(wrong_shape_x, y)\n else:\n with pytest.raises(AssertionError):\n multi_scale_ssim(wrong_shape_x, y)\n scale_weights = torch.rand(2, 2)\n with pytest.raises(AssertionError):\n multi_scale_ssim(x, y, scale_weights=scale_weights)\n\n\ndef test_multi_scale_ssim_raises_if_tensors_have_different_types(x, y) -> None:\n wrong_type_x = list(range(10))\n with pytest.raises(AssertionError):\n multi_scale_ssim(wrong_type_x, y)\n\n\ndef test_ms_ssim_raises_if_kernel_size_greater_than_image(x_y_4d_5d, device: str) -> None:\n x = x_y_4d_5d[0].to(device)\n y = x_y_4d_5d[1].to(device)\n kernel_size = 11\n levels = 5\n min_size = (kernel_size - 1) * 2 ** (levels - 1) + 1\n wrong_size_x = x[:, :, :min_size - 1, :min_size - 1]\n wrong_size_y = y[:, :, :min_size - 1, :min_size - 1]\n with pytest.raises(ValueError):\n multi_scale_ssim(wrong_size_x, wrong_size_y, kernel_size=kernel_size)\n\n\ndef test_multi_scale_ssim_raise_if_wrong_value_is_estimated(test_images: Tuple[torch.Tensor, torch.Tensor],\n scale_weights: torch.Tensor, device: str) -> None:\n for x, y in test_images:\n piq_ms_ssim = multi_scale_ssim(x.to(device), y.to(device), kernel_size=11, kernel_sigma=1.5,\n data_range=255, reduction='none', scale_weights=scale_weights)\n tf_x = tf.convert_to_tensor(x.permute(0, 2, 3, 1).numpy())\n tf_y = tf.convert_to_tensor(y.permute(0, 2, 3, 1).numpy())\n with tf.device('/CPU'):\n tf_ms_ssim = torch.tensor(tf.image.ssim_multiscale(tf_x, tf_y, max_val=255,\n power_factors=scale_weights.numpy()).numpy()).to(device)\n match_accuracy = 1e-4 + 1e-8\n assert torch.allclose(piq_ms_ssim, tf_ms_ssim, rtol=0, atol=match_accuracy), \\\n f'The estimated value must be equal to tensorflow provided one' \\\n f'(considering floating point operation error up to {match_accuracy}), ' \\\n f'got difference {(piq_ms_ssim - tf_ms_ssim).abs()}'\n\n\[email protected](\n \"data_range\", [128, 255],\n)\ndef test_multi_scale_ssim_supports_different_data_ranges(x_y_4d_5d, data_range, device: str) -> None:\n x, y = x_y_4d_5d\n x_scaled = (x * data_range).type(torch.uint8)\n y_scaled = (y * data_range).type(torch.uint8)\n\n measure_scaled = multi_scale_ssim(x_scaled.to(device), y_scaled.to(device), data_range=data_range)\n measure = multi_scale_ssim(\n x_scaled.to(device) / float(data_range),\n y_scaled.to(device) / float(data_range),\n data_range=1.0\n )\n diff = torch.abs(measure_scaled - measure)\n assert (diff <= 1e-6).all(), f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_multi_scale_ssim_fails_for_incorrect_data_range(x, y, device: str) -> None:\n # Scale to [0, 255]\n x_scaled = (x * 255).type(torch.uint8)\n y_scaled = (y * 255).type(torch.uint8)\n with pytest.raises(AssertionError):\n multi_scale_ssim(x_scaled.to(device), y_scaled.to(device), data_range=1.0)\n\n\n# ================== Test class: `MultiScaleSSIMLoss` ==================\ndef test_multi_scale_ssim_loss_grad(x_y_4d_5d, device: str) -> None:\n x = x_y_4d_5d[0].to(device)\n y = x_y_4d_5d[1].to(device)\n x.requires_grad_()\n loss = MultiScaleSSIMLoss(data_range=1.)(x, y).mean()\n loss.backward()\n assert torch.isfinite(x.grad).all(), f'Expected finite gradient values, got {x.grad}'\n\n\ndef test_multi_scale_ssim_loss_symmetry(x_y_4d_5d, device: str) -> None:\n x = x_y_4d_5d[0].to(device)\n y = x_y_4d_5d[1].to(device)\n loss = MultiScaleSSIMLoss()\n loss_value = loss(x, y)\n reverse_loss_value = loss(y, x)\n assert (loss_value == reverse_loss_value).all(), \\\n f'Expect: MS-SSIM(a, b) == MS-SSIM(b, a), got {loss_value} != {reverse_loss_value}'\n\n\ndef test_multi_scale_ssim_loss_equality(y, device: str) -> None:\n y = y.to(device)\n x = y.clone()\n loss = MultiScaleSSIMLoss()(x, y)\n assert (loss.abs() <= 1e-6).all(), f'If equal tensors are passed SSIM loss must be equal to 0 ' \\\n f'(considering floating point operation error up to 1 * 10^-6), got {loss}'\n\n\ndef test_multi_scale_ssim_loss_is_less_or_equal_to_one(ones_zeros_4d_5d: Tuple[torch.Tensor, torch.Tensor],\n device: str) -> None:\n # Create two maximally different tensors.\n ones = ones_zeros_4d_5d[0].to(device)\n zeros = ones_zeros_4d_5d[1].to(device)\n loss = MultiScaleSSIMLoss()(ones, zeros)\n assert (loss <= 1).all(), f'MS-SSIM loss must be <= 1, got {loss}'\n\n\ndef test_multi_scale_ssim_loss_raises_if_tensors_have_different_shapes(x_y_4d_5d, device: str) -> None:\n y = x_y_4d_5d[1].to(device)\n dims = [[3], [2, 3], [161, 162], [161, 162]]\n if y.dim() == 5:\n dims += [[2, 3]]\n for size in list(itertools.product(*dims)):\n wrong_shape_x = torch.rand(size).to(y)\n if wrong_shape_x.size() == y.size():\n MultiScaleSSIMLoss()(wrong_shape_x, y)\n else:\n with pytest.raises(AssertionError):\n MultiScaleSSIMLoss()(wrong_shape_x, y)\n\n scale_weights = torch.rand(2, 2)\n with pytest.raises(AssertionError):\n MultiScaleSSIMLoss(scale_weights=scale_weights)(x, y)\n\n\ndef test_multi_scale_ssim_loss_raises_if_tensors_have_different_types(x, y) -> None:\n wrong_type_y = list(range(10))\n with pytest.raises(AssertionError):\n MultiScaleSSIMLoss()(wrong_type_y, y)\n\n\ndef test_ms_ssim_loss_raises_if_kernel_size_greater_than_image(x_y_4d_5d, device: str) -> None:\n x = x_y_4d_5d[0].to(device)\n y = x_y_4d_5d[1].to(device)\n kernel_size = 11\n levels = 5\n min_size = (kernel_size - 1) * 2 ** (levels - 1) + 1\n wrong_size_x = x[:, :, :min_size - 1, :min_size - 1]\n wrong_size_y = y[:, :, :min_size - 1, :min_size - 1]\n with pytest.raises(ValueError):\n MultiScaleSSIMLoss(kernel_size=kernel_size)(wrong_size_x, wrong_size_y)\n", "id": "8573605", "language": "Python", "matching_score": 3.194282293319702, "max_stars_count": 471, "path": "tests/test_ms_ssim.py" }, { "content": "from typing import Any\nimport torch\nimport pytest\nfrom skimage.io import imread\nfrom contextlib import contextmanager\n\nfrom piq import fsim, FSIMLoss\n\n\n@contextmanager\ndef raise_nothing():\n yield\n\n\[email protected](scope='module')\ndef x() -> torch.Tensor:\n return torch.rand(3, 3, 256, 256)\n\n\[email protected](scope='module')\ndef y() -> torch.Tensor:\n return torch.rand(3, 3, 256, 256)\n\n\[email protected](scope='module')\ndef x_grey() -> torch.Tensor:\n return torch.rand(3, 1, 256, 256)\n\n\[email protected](scope='module')\ndef y_grey() -> torch.Tensor:\n return torch.rand(3, 1, 256, 256)\n\n\n# ================== Test function: `fsim` ==================\ndef test_fsim_forward(input_tensors, device: str) -> None:\n x, y = input_tensors\n fsim(x.to(device), y.to(device), chromatic=False)\n\n\[email protected](\"chromatic\", [False, True])\ndef test_fsim_symmetry(x, y, chromatic: bool, device: str) -> None:\n measure = fsim(x.to(device), y.to(device), data_range=1., chromatic=chromatic)\n reverse_measure = fsim(y.to(device), x.to(device), data_range=1., chromatic=chromatic)\n assert (measure == reverse_measure).all(), f'Expect: FSIM(a, b) == FSIM(b, a), got {measure} != {reverse_measure}'\n\n\[email protected](\n \"chromatic,expectation\",\n [(False, raise_nothing()),\n (True, pytest.raises(AssertionError))])\ndef test_fsim_chromatic_raises_for_greyscale(x_grey, y_grey, chromatic: bool, expectation: Any) -> None:\n with expectation:\n fsim(x_grey, y_grey, data_range=1., chromatic=chromatic)\n\n\[email protected](\n \"x,y,expectation,value\",\n [\n (torch.rand(4, 3, 128, 128, 2), torch.rand(4, 3, 128, 128, 2), pytest.raises(AssertionError), None),\n (torch.randn(4, 3, 128, 128), torch.randn(4, 3, 128, 128), pytest.raises(AssertionError), None),\n (torch.zeros(4, 3, 128, 128), torch.zeros(4, 3, 128, 128), raise_nothing(), 1.0),\n (torch.ones(4, 3, 128, 128), torch.ones(4, 3, 128, 128), raise_nothing(), 1.0),\n (torch.rand(4, 3, 28, 28), torch.rand(4, 3, 28, 28), raise_nothing(), None),\n ],\n)\ndef test_fsim_for_special_cases(x: torch.Tensor, y: torch.Tensor, expectation: Any, value: float) -> None:\n with expectation:\n if value is None:\n fsim(x, y)\n else:\n score = fsim(x, y)\n assert torch.isclose(score, torch.tensor(value)), \\\n f'Expected loss value to be equal to target value. Got {score} and {value}'\n\n\[email protected](\n \"data_range\", [128, 255],\n)\ndef test_fsim_supports_different_data_ranges(x, y, data_range, device: str) -> None:\n x_scaled = (x * data_range).type(torch.uint8)\n y_scaled = (y * data_range).type(torch.uint8)\n measure_scaled = fsim(x_scaled.to(device), y_scaled.to(device), data_range=data_range)\n measure = fsim(\n x_scaled.to(device) / float(data_range),\n y_scaled.to(device) / float(data_range),\n data_range=1.0\n )\n diff = torch.abs(measure_scaled - measure)\n assert diff <= 1e-5, f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_fsim_fails_for_incorrect_data_range(x, y, device: str) -> None:\n # Scale to [0, 255]\n x_scaled = (x * 255).type(torch.uint8)\n y_scaled = (y * 255).type(torch.uint8)\n with pytest.raises(AssertionError):\n fsim(x_scaled.to(device), y_scaled.to(device), data_range=1.0)\n \n\ndef test_fsim_simmular_to_matlab_implementation():\n # Greyscale images\n goldhill = torch.tensor(imread('tests/assets/goldhill.gif'))[None, None, ...]\n goldhill_jpeg = torch.tensor(imread('tests/assets/goldhill_jpeg.gif'))[None, None, ...]\n\n score = fsim(goldhill_jpeg, goldhill, data_range=255, chromatic=False, reduction='none')\n score_baseline = torch.tensor(0.89691)\n\n assert torch.isclose(score, score_baseline), \\\n f'Expected PyTorch score to be equal to MATLAB prediction. Got {score} and {score_baseline}'\n\n # RGB images\n I01 = torch.tensor(imread('tests/assets/I01.BMP')).permute(2, 0, 1)[None, ...]\n i1_01_5 = torch.tensor(imread('tests/assets/i01_01_5.bmp')).permute(2, 0, 1)[None, ...]\n\n score = fsim(i1_01_5, I01, data_range=255, chromatic=False, reduction='none')\n score_chromatic = fsim(i1_01_5, I01, data_range=255, chromatic=True, reduction='none')\n\n # Baseline values are from original MATLAB code\n score_baseline = torch.tensor(0.93674)\n score_baseline_chromatic = torch.tensor(0.92587)\n\n assert torch.isclose(score, score_baseline), \\\n f'Expected PyTorch score to be equal to MATLAB prediction. Got {score} and {score_baseline}'\n assert torch.isclose(score_chromatic, score_baseline_chromatic, atol=1e-4), \\\n 'Expected PyTorch chromatic score to be equal to MATLAB prediction.' \\\n f'Got {score_chromatic} and {score_baseline_chromatic}'\n\n\n# ================== Test class: `FSIMLoss` ==================\ndef test_fsim_loss_reduction(x, y) -> None:\n loss = FSIMLoss(reduction='mean')\n measure = loss(x, y)\n assert measure.dim() == 0, f'FSIM with `mean` reduction must return 1 number, got {len(measure)}'\n\n loss = FSIMLoss(reduction='sum')\n measure = loss(x, y)\n assert measure.dim() == 0, f'FSIM with `mean` reduction must return 1 number, got {len(measure)}'\n\n loss = FSIMLoss(reduction='none')\n measure = loss(x, y)\n assert len(measure) == x.size(0), \\\n f'FSIM with `none` reduction must have length equal to number of images, got {len(measure)}'\n \n loss = FSIMLoss(reduction='random string')\n with pytest.raises(ValueError):\n loss(x, y)\n\n\ndef test_fsim_loss_computes_grad(x, y, device: str) -> None:\n x.requires_grad_()\n loss_value = FSIMLoss()(x.to(device), y.to(device))\n loss_value.backward()\n assert x.grad is not None, 'Expected non None gradient of leaf variable'\n", "id": "7806685", "language": "Python", "matching_score": 3.8051226139068604, "max_stars_count": 471, "path": "tests/test_fsim.py" }, { "content": "import torch\nimport pytest\nfrom typing import Tuple\n\nfrom piq import VIFLoss, vif_p\nfrom skimage.io import imread\n\n\[email protected](scope='module')\ndef x() -> torch.Tensor:\n return torch.rand(4, 3, 256, 256)\n\n\[email protected](scope='module')\ndef y() -> torch.Tensor:\n return torch.rand(4, 3, 256, 256)\n\n\[email protected](scope='module')\ndef x_1d() -> torch.Tensor:\n return torch.rand(4, 1, 256, 256)\n\n\[email protected](scope='module')\ndef y_1d() -> torch.Tensor:\n return torch.rand(4, 1, 256, 256)\n\n\n# ================== Test function: `vif_p` ==================\ndef test_vif_p(input_tensors: Tuple[torch.Tensor, torch.Tensor], device: str) -> None:\n x, y = input_tensors\n vif_p(x.to(device), y.to(device), data_range=1.)\n\n\ndef test_vif_p_one_for_equal_tensors(x) -> None:\n y = x.clone()\n measure = vif_p(x, y)\n assert torch.isclose(measure, torch.tensor(1.0)), f'VIF for equal tensors shouls be 1.0, got {measure}.'\n\n\ndef test_vif_p_works_for_zeros_tensors() -> None:\n x = torch.zeros(4, 3, 256, 256)\n y = torch.zeros(4, 3, 256, 256)\n measure = vif_p(x, y, data_range=1.)\n assert torch.isclose(measure, torch.tensor(1.0)), f'VIF for 2 zero tensors shouls be 1.0, got {measure}.'\n\n\ndef test_vif_p_fails_for_small_images() -> None:\n x = torch.rand(2, 3, 32, 32)\n y = torch.rand(2, 3, 32, 32)\n with pytest.raises(ValueError):\n vif_p(x, y)\n\n\[email protected](\n \"data_range\", [128, 255],\n)\ndef test_vif_supports_different_data_ranges(x, y, data_range, device: str) -> None:\n x_scaled = (x * data_range).type(torch.uint8)\n y_scaled = (y * data_range).type(torch.uint8)\n measure_scaled = vif_p(x_scaled.to(device), y_scaled.to(device), data_range=data_range)\n measure = vif_p(\n x_scaled.to(device) / float(data_range),\n y_scaled.to(device) / float(data_range),\n data_range=1.0\n )\n diff = torch.abs(measure_scaled - measure)\n assert diff <= 1e-5, f'Result for same tensor with different data_range should be the same, got {diff}'\n\n\ndef test_vif_fails_for_incorrect_data_range(x, y, device: str) -> None:\n # Scale to [0, 255]\n x_scaled = (x * 255).type(torch.uint8)\n y_scaled = (y * 255).type(torch.uint8)\n with pytest.raises(AssertionError):\n vif_p(x_scaled.to(device), y_scaled.to(device), data_range=1.0)\n\n\ndef test_vif_simmular_to_matlab_implementation():\n # Greyscale images\n goldhill = torch.tensor(imread('tests/assets/goldhill.gif'))[None, None, ...]\n goldhill_jpeg = torch.tensor(imread('tests/assets/goldhill_jpeg.gif'))[None, None, ...]\n\n score = vif_p(goldhill_jpeg, goldhill, data_range=255, reduction='none')\n score_baseline = torch.tensor(0.2665)\n\n assert torch.isclose(score, score_baseline, atol=1e-4), \\\n f'Expected PyTorch score to be equal to MATLAB prediction. Got {score} and {score_baseline}'\n\n # RGB images\n I01 = torch.tensor(imread('tests/assets/I01.BMP')).permute(2, 0, 1)[None, ...]\n i1_01_5 = torch.tensor(imread('tests/assets/i01_01_5.bmp')).permute(2, 0, 1)[None, ...]\n\n score = vif_p(i1_01_5, I01, data_range=255, reduction='none')\n\n # RGB images are not supported by MATLAB code. Here is result for luminance channel taken from YIQ colour space\n score_baseline = torch.tensor(0.3147)\n\n assert torch.isclose(score, score_baseline, atol=1e-4), \\\n f'Expected PyTorch score to be equal to MATLAB prediction. Got {score} and {score_baseline}'\n\n\n# ================== Test class: `VIFLoss` ==================\ndef test_vif_loss_forward(x, y, device: str) -> None:\n loss = VIFLoss()\n loss(x.to(device), y.to(device))\n\n \ndef test_vif_loss_zero_for_equal_tensors(x):\n loss = VIFLoss()\n y = x.clone()\n measure = loss(x, y)\n assert torch.isclose(measure, torch.tensor(0.), atol=1e-6), f'VIF for equal tensors must be 0, got {measure}'\n\n\ndef test_vif_loss_reduction(x, y) -> None:\n loss = VIFLoss(reduction='mean')\n measure = loss(x, y)\n assert measure.dim() == 0, f'VIF with `mean` reduction must return 1 number, got {len(measure)}'\n\n loss = VIFLoss(reduction='sum')\n measure = loss(x, y)\n assert measure.dim() == 0, f'VIF with `mean` reduction must return 1 number, got {len(measure)}'\n\n loss = VIFLoss(reduction='none')\n measure = loss(x, y)\n assert len(measure) == x.size(0), \\\n f'VIF with `none` reduction must have length equal to number of images, got {len(measure)}'\n \n loss = VIFLoss(reduction='random string')\n with pytest.raises(ValueError):\n loss(x, y)\n\n\nNONE_GRAD_ERR_MSG = 'Expected non None gradient of leaf variable'\n\n\ndef test_vif_loss_computes_grad(x, y, device: str) -> None:\n x.requires_grad_()\n loss_value = VIFLoss()(x.to(device), y.to(device))\n loss_value.backward()\n assert x.grad is not None, NONE_GRAD_ERR_MSG\n\n\ndef test_vif_loss_computes_grad_for_zeros_tensors() -> None:\n x = torch.zeros(4, 3, 256, 256, requires_grad=True)\n y = torch.zeros(4, 3, 256, 256)\n loss_value = VIFLoss()(x, y)\n loss_value.backward()\n assert x.grad is not None, NONE_GRAD_ERR_MSG\n", "id": "989388", "language": "Python", "matching_score": 1.2963017225265503, "max_stars_count": 471, "path": "tests/test_vif.py" }, { "content": "import pytest\nimport torch\nfrom contextlib import contextmanager\n\nfrom piq import FID\nfrom piq.feature_extractors import InceptionV3\n\n\n@contextmanager\ndef raise_nothing():\n yield\n\n\nclass TestDataset(torch.utils.data.Dataset):\n def __init__(self, input_range=(0.0, 1.0)):\n self.data = torch.FloatTensor(15, 3, 256, 256).uniform_(*input_range)\n self.mask = torch.rand(15, 3, 256, 256)\n\n def __getitem__(self, index):\n x = self.data[index]\n y = self.mask[index]\n\n return {'images': x, 'mask': y}\n\n def __len__(self):\n return len(self.data)\n\n\[email protected](scope='module')\ndef features_y_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_beta() -> torch.Tensor:\n m = torch.distributions.Beta(torch.FloatTensor([2]), torch.FloatTensor([2]))\n return m.sample([1000, 20]).squeeze()\n\n\[email protected](scope='module')\ndef features_x_constant() -> torch.Tensor:\n return torch.ones(1000, 20)\n\n\n# ================== Test class: `FID` ==================\ndef test_initialization() -> None:\n FID()\n\n\ndef test_forward(features_y_normal, features_x_normal, device: str) -> None:\n fid = FID()\n fid(features_y_normal.to(device), features_x_normal.to(device))\n\n\ndef test_compute_feats(device: str) -> None:\n dataset = TestDataset()\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=3,\n num_workers=2,\n )\n fid = FID()\n model = InceptionV3()\n fid.compute_feats(loader, model, device=device)\n\n\[email protected](\"input_range,normalize_input,expectation\",\n [\n ((0.0, 1.0), True, raise_nothing()),\n ((-1.0, 1.0), False, raise_nothing()),\n ((-1.0, 1.0), True, pytest.raises(AssertionError)),\n ((-10.0, 10.0), False, pytest.raises(AssertionError))\n ])\ndef test_inception_input_range(input_range, normalize_input, expectation) -> None:\n with expectation:\n dataset = TestDataset(input_range)\n loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=3,\n num_workers=2,\n )\n fid = FID()\n model = InceptionV3(normalize_input=normalize_input)\n fid.compute_feats(loader, model, device='cpu')\n", "id": "12051772", "language": "Python", "matching_score": 3.6606760025024414, "max_stars_count": 471, "path": "tests/test_fid.py" }, { "content": "import pytest\nimport torch\nimport subprocess\nimport sys\nimport builtins\n\nfrom piq import GS\n\n\[email protected](scope='module')\ndef features_y_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_beta() -> torch.Tensor:\n m = torch.distributions.Beta(torch.FloatTensor([2]), torch.FloatTensor([2]))\n return m.sample([1000, 20]).squeeze()\n\n\ndef install(package: str) -> None:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n\ndef uninstall(package) -> None:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", package])\n\n\ndef prepare_test(scipy_version='1.3.3', gudhi_version='3.2') -> None:\n try:\n import scipy # noqa: F401\n except ImportError:\n install('scipy' + '==' + scipy_version)\n\n try:\n import gudhi # noqa: F401\n except ImportError:\n install('gudhi' + '==' + gudhi_version)\n\n\[email protected]\ndef hide_available_pkg(monkeypatch):\n import_orig = builtins.__import__\n\n def mocked_import(name, *args, **kwargs):\n if name in ['scipy', 'gudhi']:\n raise ImportError()\n\n return import_orig(name, *args, **kwargs)\n\n monkeypatch.setattr(builtins, '__import__', mocked_import)\n\n\n# ================== Test class: `GS` ==================\ndef test_initialization() -> None:\n prepare_test()\n try:\n GS()\n except Exception as e:\n pytest.fail(f\"Unexpected error occurred: {e}\")\n\n\[email protected]('hide_available_pkg')\ndef test_fails_if_libs_not_installed(features_y_normal, features_x_normal) -> None:\n with pytest.raises(ImportError):\n metric = GS(num_iters=10, sample_size=8)\n metric(features_y_normal, features_x_normal)\n\n\[email protected](reason=\"Randomnly fails, fix in separate PR\")\ndef test_similar_for_same_distribution(features_y_normal, features_x_normal) -> None:\n prepare_test()\n metric = GS(sample_size=1000, num_iters=100, i_max=1000, num_workers=4)\n diff = metric(features_x_normal, features_y_normal)\n assert diff <= 2.0, \\\n f'For same distributions GS should be small, got {diff}'\n\n\[email protected](reason=\"Randomnly fails, fix in separate PR\")\ndef test_differs_for_not_simular_distributions(features_x_beta, features_y_normal) -> None:\n prepare_test()\n metric = GS(sample_size=1000, num_iters=100, i_max=1000, num_workers=4)\n diff = metric(features_x_beta, features_y_normal)\n assert diff >= 5.0, \\\n f'For different distributions GS diff should be big, got {diff}'\n", "id": "6453790", "language": "Python", "matching_score": 3.1444430351257324, "max_stars_count": 471, "path": "tests/test_gs.py" }, { "content": "import pytest\nimport torch\n\nfrom piq import PR\n\n\[email protected](scope='module')\ndef features_y_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_beta() -> torch.Tensor:\n m = torch.distributions.Beta(torch.FloatTensor([2]), torch.FloatTensor([2]))\n return m.sample([1000, 20]).squeeze()\n\n\[email protected](scope='module')\ndef features_x_constant() -> torch.Tensor:\n return torch.ones(1000, 20)\n\n\ndef test_forward(features_y_normal, features_x_normal, ) -> None:\n metric = PR()\n metric(features_y_normal, features_x_normal)\n\n\ndef test_fails_for_different_dimensions(features_y_normal: torch.Tensor) -> None:\n features_x_normal = torch.rand(1000, 21)\n metric = PR()\n with pytest.raises(AssertionError):\n metric(features_y_normal, features_x_normal)\n\n\ndef test_works_for_different_number_of_images_in_stack(features_y_normal) -> None:\n features_x_normal = torch.rand(1010, 20)\n metric = PR()\n metric(features_y_normal, features_x_normal)\n\n\ndef test_return_two_scores(features_y_normal, features_x_normal) -> None:\n metric = PR()\n result = metric(features_y_normal, features_x_normal)\n print(result)\n assert len(result) == 2, \\\n f'Expected to get precision and recall, got {result}'\n", "id": "7923117", "language": "Python", "matching_score": 3.644807815551758, "max_stars_count": 471, "path": "tests/test_pr.py" }, { "content": "import pytest\nimport torch\n\nfrom piq import KID\n\n\[email protected](scope='module')\ndef features_y_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_normal() -> torch.Tensor:\n return torch.rand(1000, 20)\n\n\[email protected](scope='module')\ndef features_x_beta() -> torch.Tensor:\n m = torch.distributions.Beta(torch.FloatTensor([2]), torch.FloatTensor([2]))\n return m.sample([1000, 20]).squeeze()\n\n\[email protected](scope='module')\ndef features_x_constant() -> torch.Tensor:\n return torch.ones(1000, 20)\n\n\ndef test_initialization() -> None:\n try:\n KID()\n except Exception as e:\n pytest.fail(f\"Unexpected error occurred: {e}\")\n\n\ndef test_forward(features_y_normal, features_x_normal, ) -> None:\n try:\n metric = KID()\n metric(features_y_normal, features_x_normal)\n except Exception as e:\n pytest.fail(f\"Unexpected error occurred: {e}\")\n\n\ndef tes_fails_for_different_dimensions(features_y_normal: torch.Tensor) -> None:\n features_x_normal = torch.rand(1000, 21)\n metric = KID()\n with pytest.raises(AssertionError):\n metric(features_y_normal, features_x_normal)\n\n\ndef test_works_for_different_number_of_images_in_stack(features_y_normal) -> None:\n features_x_normal = torch.rand(1010, 20)\n metric = KID()\n metric(features_y_normal, features_x_normal)\n\n\ndef test_returns_variance(features_y_normal, features_x_normal) -> None:\n metric = KID(ret_var=True)\n result = metric(features_y_normal, features_x_normal)\n print(result)\n assert len(result) == 2, \\\n f'Expected to get score and variance, got {result}'\n", "id": "5620695", "language": "Python", "matching_score": 0.5449925661087036, "max_stars_count": 471, "path": "tests/test_kid.py" }, { "content": "# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\nimport torch\n\nfrom compressai._CXX import pmf_to_quantized_cdf\nfrom compressai.ops import LowerBound, NonNegativeParametrizer, ste_round\n\n\nclass TestSTERound:\n def test_ste_round_ok(self):\n x = torch.rand(16)\n assert (ste_round(x) == torch.round(x)).all()\n\n def test_ste_round_grads(self):\n x = torch.rand(24, requires_grad=True)\n y = ste_round(x)\n y.backward(x)\n assert x.grad is not None\n assert (x.grad == x).all()\n\n\nclass TestLowerBound:\n def test_lower_bound_ok(self):\n x = torch.rand(16)\n bound = torch.rand(1)\n lower_bound = LowerBound(bound)\n assert (lower_bound(x) == torch.max(x, bound)).all()\n\n def test_lower_bound_script(self):\n x = torch.rand(16)\n bound = torch.rand(1)\n lower_bound = LowerBound(bound)\n scripted = torch.jit.script(lower_bound)\n assert (scripted(x) == torch.max(x, bound)).all()\n\n def test_lower_bound_grads(self):\n x = torch.rand(16, requires_grad=True)\n bound = torch.rand(1)\n lower_bound = LowerBound(bound)\n y = lower_bound(x)\n y.backward(x)\n\n assert x.grad is not None\n assert (x.grad == ((x >= bound) * x)).all()\n\n\nclass TestNonNegativeParametrizer:\n def test_non_negative(self):\n parametrizer = NonNegativeParametrizer()\n x = torch.rand(1, 8, 8, 8) * 2 - 1 # [0, 1] -> [-1, 1]\n x_reparam = parametrizer(x)\n\n assert x_reparam.shape == x.shape\n assert x_reparam.min() >= 0\n\n def test_non_negative_init(self):\n parametrizer = NonNegativeParametrizer()\n x = torch.rand(1, 8, 8, 8) * 2 - 1\n x_init = parametrizer.init(x)\n\n assert x_init.shape == x.shape\n assert torch.allclose(x_init, torch.sqrt(torch.max(x, x - x)), atol=2 ** -18)\n\n def test_non_negative_min(self):\n for _ in range(10):\n minimum = torch.rand(1)\n parametrizer = NonNegativeParametrizer(minimum.item())\n x = torch.rand(1, 8, 8, 8) * 2 - 1\n x_reparam = parametrizer(x)\n\n assert x_reparam.shape == x.shape\n assert torch.allclose(x_reparam.min(), minimum)\n\n\nclass TestPmfToQuantizedCDF:\n def test_ok(self):\n out = pmf_to_quantized_cdf([0.1, 0.2, 0, 0], 16)\n assert out == [0, 21845, 65534, 65535, 65536]\n\n def test_negative_prob(self):\n with pytest.raises(ValueError):\n pmf_to_quantized_cdf([1, 0, -1], 16)\n\n @pytest.mark.parametrize(\"v\", (\"inf\", \"-inf\", \"nan\"))\n def test_non_finite_prob(self, v):\n with pytest.raises(ValueError):\n pmf_to_quantized_cdf([1, 0, float(v)], 16)\n\n with pytest.raises(ValueError):\n pmf_to_quantized_cdf([1, 0, float(v), 2, 3, 4], 16)\n", "id": "3617673", "language": "Python", "matching_score": 1.0348080396652222, "max_stars_count": 2, "path": "tests/test_ops.py" }, { "content": "# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport importlib\nimport json\nimport os\nimport random\n\nimport numpy as np\nimport pytest\nimport torch\n\neval_model = importlib.import_module(\"compressai.utils.eval_model.__main__\")\nupdate_model = importlib.import_module(\"compressai.utils.update_model.__main__\")\n\n# Example: GENERATE_EXPECTED=1 pytest -sx tests/test_eval_model.py\nGENERATE_EXPECTED = os.getenv(\"GENERATE_EXPECTED\")\n\n\ndef set_rng_seed(seed):\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef test_eval_model():\n with pytest.raises(SystemExit):\n eval_model.main([\"--help\"])\n\n with pytest.raises(SystemExit):\n eval_model.main([])\n\n with pytest.raises(SystemExit):\n eval_model.main([\"pretrained\"])\n\n with pytest.raises(SystemExit):\n eval_model.main(\n [\n \"pretrained\",\n \".\",\n \"-a\",\n \"bmshj2018-factorized\",\n \"-m\",\n \"mse\",\n \"-q\",\n \"1\",\n ]\n )\n\n\[email protected](\"model\", (\"bmshj2018-factorized\",))\[email protected](\"quality\", (\"1\", \"4\", \"8\"))\[email protected](\"metric\", (\"mse\", \"ms-ssim\"))\[email protected](\"entropy_estimation\", (False, True))\ndef test_eval_model_pretrained(capsys, model, quality, metric, entropy_estimation):\n here = os.path.dirname(__file__)\n dirpath = os.path.join(here, \"assets/dataset\")\n\n cmd = [\n \"pretrained\",\n dirpath,\n \"-a\",\n model,\n \"-m\",\n metric,\n \"-q\",\n quality,\n ]\n if entropy_estimation:\n cmd += [\"--entropy-estimation\"]\n eval_model.main(cmd)\n\n output = capsys.readouterr().out\n output = json.loads(output)\n expected = os.path.join(\n here,\n \"expected\",\n f\"eval_{int(entropy_estimation)}_{model}_{metric}_{quality}.json\",\n )\n\n if not os.path.isfile(expected):\n if not GENERATE_EXPECTED:\n raise RuntimeError(f\"Missing expected file {expected}\")\n with open(expected, \"w\") as f:\n json.dump(output, f)\n\n with open(expected, \"r\") as f:\n expected = json.loads(f.read())\n\n for key in (\"name\", \"description\"):\n assert expected[key] == output[key]\n\n for key in (\"psnr\", \"ms-ssim\", \"bpp\"):\n if key not in expected[\"results\"]:\n continue\n assert np.allclose(\n expected[\"results\"][key], output[\"results\"][key], rtol=1e-4, atol=1e-4\n )\n\n\[email protected](\"model_name\", (\"factorized-prior\", \"bmshj2018-factorized\"))\ndef test_eval_model_ckpt(tmp_path, model_name):\n here = os.path.dirname(__file__)\n parent = os.path.dirname(here)\n\n # fake training\n datapath = os.path.join(here, \"assets/fakedata/imagefolder\")\n spec = importlib.util.spec_from_file_location(\n \"examples.train\", os.path.join(parent, \"examples/train.py\")\n )\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n\n argv = [\n \"-d\",\n datapath,\n \"-e\",\n \"1\",\n \"--batch-size\",\n \"1\",\n \"--patch-size\",\n \"48\",\n \"64\",\n \"--seed\",\n \"0\",\n \"--save\",\n ]\n\n os.chdir(tmp_path)\n module.main(argv)\n\n checkpoint = \"checkpoint_best_loss.pth.tar\"\n assert os.path.isfile(checkpoint)\n\n # update model\n cmd = [\"-a\", model_name, \"-n\", \"factorized\", checkpoint]\n update_model.main(cmd)\n\n # ckpt evaluation\n dirpath = os.path.join(here, \"assets/dataset\")\n checkpoint = next(f for f in os.listdir(tmp_path) if f.startswith(\"factorized-\"))\n cmd = [\n \"checkpoint\",\n dirpath,\n \"-a\",\n \"bmshj2018-factorized\",\n \"-p\",\n checkpoint,\n ]\n eval_model.main(cmd)\n", "id": "1239919", "language": "Python", "matching_score": 2.0219695568084717, "max_stars_count": 2, "path": "tests/test_eval_model.py" }, { "content": "# Copyright 2020 InterDigital Communications, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCollect performance metrics of published traditional or end-to-end image\ncodecs.\n\"\"\"\nimport argparse\nimport json\nimport multiprocessing as mp\nimport os\nimport sys\n\nfrom collections import defaultdict\nfrom itertools import starmap\nfrom typing import List\n\nfrom .codecs import AV1, BPG, HM, JPEG, JPEG2000, TFCI, VTM, Codec, WebP\n\n# from torchvision.datasets.folder\nIMG_EXTENSIONS = (\n \".jpg\",\n \".jpeg\",\n \".png\",\n \".ppm\",\n \".bmp\",\n \".pgm\",\n \".tif\",\n \".tiff\",\n \".webp\",\n)\n\ncodecs = [JPEG, WebP, JPEG2000, BPG, TFCI, VTM, HM, AV1]\n\n\n# we need the quality index (not value) to compute the stats later\ndef func(codec, i, *args):\n rv = codec.run(*args)\n return i, rv\n\n\ndef collect(\n codec: Codec,\n dataset: str,\n qualities: List[int],\n metrics: List[str],\n num_jobs: int = 1,\n):\n if not os.path.isdir(dataset):\n raise OSError(f\"No such directory: {dataset}\")\n\n filepaths = [\n os.path.join(dirpath, f)\n for dirpath, _, filenames in os.walk(dataset)\n for f in filenames\n if os.path.splitext(f)[-1].lower() in IMG_EXTENSIONS\n ]\n\n pool = mp.Pool(num_jobs) if num_jobs > 1 else None\n\n if len(filepaths) == 0:\n print(\"No images found in the dataset directory\")\n sys.exit(1)\n\n args = [\n (codec, i, f, q, metrics) for i, q in enumerate(qualities) for f in filepaths\n ]\n\n if pool:\n rv = pool.starmap(func, args)\n else:\n rv = list(starmap(func, args))\n\n results = [defaultdict(float) for _ in range(len(qualities))]\n\n for i, metrics in rv:\n for k, v in metrics.items():\n results[i][k] += v\n\n # aggregate results for all images\n for i, _ in enumerate(results):\n for k, v in results[i].items():\n results[i][k] = v / len(filepaths)\n\n # list of dict -> dict of list\n out = defaultdict(list)\n for r in results:\n for k, v in r.items():\n out[k].append(v)\n return out\n\n\ndef setup_args():\n description = \"Collect codec metrics.\"\n parser = argparse.ArgumentParser(description=description)\n subparsers = parser.add_subparsers(dest=\"codec\", help=\"Select codec\")\n subparsers.required = True\n return parser, subparsers\n\n\ndef setup_common_args(parser):\n parser.add_argument(\"dataset\", type=str)\n parser.add_argument(\n \"-j\",\n \"--num-jobs\",\n type=int,\n metavar=\"N\",\n default=1,\n help=\"number of parallel jobs (default: %(default)s)\",\n )\n parser.add_argument(\n \"-q\",\n \"--quality\",\n dest=\"qualities\",\n metavar=\"Q\",\n default=[75],\n nargs=\"+\",\n type=int,\n help=\"quality parameter (default: %(default)s)\",\n )\n parser.add_argument(\n \"--metrics\",\n dest=\"metrics\",\n default=[\"psnr\", \"ms-ssim\"],\n nargs=\"+\",\n help=\"do not return PSNR and MS-SSIM metrics (use for very small images)\",\n )\n\n\ndef main(argv):\n parser, subparsers = setup_args()\n for c in codecs:\n cparser = subparsers.add_parser(c.__name__.lower(), help=f\"{c.__name__}\")\n setup_common_args(cparser)\n c.setup_args(cparser)\n args = parser.parse_args(argv)\n\n codec_cls = next(c for c in codecs if c.__name__.lower() == args.codec)\n codec = codec_cls(args)\n results = collect(\n codec,\n args.dataset,\n args.qualities,\n args.metrics,\n args.num_jobs,\n )\n\n output = {\n \"name\": codec.name,\n \"description\": codec.description,\n \"results\": results,\n }\n\n print(json.dumps(output, indent=2))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "id": "4007259", "language": "Python", "matching_score": 1.8386902809143066, "max_stars_count": 2, "path": "compressai/utils/bench/__main__.py" }, { "content": "import argparse\nfrom collections import defaultdict\nimport os\nimport subprocess\n\nimport pandas as pd\n\n\ndef parse_stdout(score_str: str):\n score_dict = dict()\n # parse standard output\n for line in score_str.strip().split(\"\\n\"):\n name, sc = line.split(\": \")\n score_dict[name] = float(sc)\n return score_dict\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"data\", help=\"/path/to/Kodak/images\")\n parser.add_argument(\n \"--qua_ent\",\n choices={\"AUN-Q\", \"STE-Q\", \"St-Q\", \"U-Q\", \"SGA-Q\"},\n default=\"AUN-Q\",\n )\n parser.add_argument(\n \"--lambda\",\n type=float,\n default=0.01,\n dest=\"lmbda\",\n help=\"Lambda for rate-distortion tradeoff.\",\n )\n parser.add_argument(\"--distortion\", default=\"mse\", choices={\"mse\", \"msssim\"})\n parser.add_argument(\"--checkpoint_dir\", default=\"train\")\n parser.add_argument(\"--decode\", action=\"store_true\")\n parser.add_argument(\"--out\", default=\"score.csv\")\n args = parser.parse_args()\n\n fnames = sorted(os.listdir(args.data))\n scores_dict = defaultdict(list)\n\n tfci_dir: str = os.path.join(args.checkpoint_dir, \"tfci\")\n decomp_dir: str = os.path.join(args.checkpoint_dir, \"decomp\")\n os.makedirs(tfci_dir, exist_ok=True)\n os.makedirs(decomp_dir, exist_ok=True)\n\n for fname in fnames:\n # compress\n p = subprocess.Popen(\n \"python main.py --verbose --qua_ent {} --checkpoint_dir {} compress {} {}.tfci\".format(\n args.qua_ent,\n args.checkpoint_dir,\n os.path.join(args.data, fname),\n os.path.join(tfci_dir, fname),\n ),\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL,\n shell=True,\n )\n print(p.args)\n output = p.communicate()[0]\n p.wait()\n\n # decompress\n if args.decode:\n p = subprocess.Popen(\n \"python main.py --qua_ent {} --checkpoint_dir {} decompress {}.tfci {}.tfci.png\".format(\n args.qua_ent,\n args.checkpoint_dir,\n os.path.join(tfci_dir, fname),\n os.path.join(decomp_dir, fname),\n ),\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n shell=True,\n )\n print(p.args)\n p.communicate()\n p.wait()\n\n score_str: str = str(output, encoding=\"utf-8\", errors=\"replace\")\n try:\n score_dict = parse_stdout(score_str)\n except Exception as e:\n print(e)\n print(fname, score_str, output)\n return\n\n for k, v in score_dict.items():\n scores_dict[k].append(v)\n\n df = pd.DataFrame.from_dict(scores_dict)\n df.index = fnames\n if args.distortion == \"mse\":\n df[\"Loss\"] = (\n args.lmbda * df[\"Mean squared error\"] + df[\"Information content in bpp\"]\n )\n else:\n df[\"Loss\"] = (\n args.lmbda * (1 - df[\"Multiscale SSIM\"]) + df[\"Information content in bpp\"]\n )\n df.to_csv(os.path.join(args.checkpoint_dir, args.out))\n print(df.mean())\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "5075336", "language": "Python", "matching_score": 1.3537588119506836, "max_stars_count": 0, "path": "evaluate.py" }, { "content": "\"\"\"Code for computation of PLCC, SRCC and KRCC between\n PIQ metrics predictions and ground truth scores from MOS databases.\n\"\"\"\nimport argparse\nimport functools\nfrom typing import List, Callable\n\nimport piq\nimport tqdm\nimport torch\nimport pandas as pd\nfrom pathlib import Path\nfrom skimage.io import imread\nfrom scipy.stats import spearmanr, kendalltau\n\n\nMETRICS = {\n # Full Reference\n \"PSNR\": functools.partial(piq.psnr, reduction='none'),\n \"SSIM\": functools.partial(piq.ssim, reduction='none'),\n \"MS-SSIM\": functools.partial(piq.multi_scale_ssim, reduction='none'),\n \"VIFp\": functools.partial(piq.vif_p, reduction='none'),\n \"GMSD\": functools.partial(piq.gmsd, reduction='none'),\n \"MS-GMSD\": functools.partial(piq.multi_scale_gmsd, reduction='none'),\n \"MS-GMSDc\": functools.partial(piq.multi_scale_gmsd, chromatic=True, reduction='none'),\n \"FSIM\": functools.partial(piq.fsim, chromatic=False, reduction='none'),\n \"FSIMc\": functools.partial(piq.fsim, chromatic=True, reduction='none'),\n \"VSI\": functools.partial(piq.vsi, reduction='none'),\n \"HaarPSI\": functools.partial(piq.haarpsi, reduction='none'),\n \"MDSI\": functools.partial(piq.mdsi, reduction='none'),\n \"LPIPS-vgg\": piq.LPIPS(replace_pooling=False, reduction='none'),\n \"DISTS\": piq.DISTS(reduction='none'),\n \"PieAPP\": piq.PieAPP(reduction='none'),\n \"Content\": piq.ContentLoss(reduction='none'),\n \"Style\": piq.StyleLoss(reduction='none'),\n\n # No Reference\n \"BRISQUE\": functools.partial(piq.brisque, reduction='none')\n}\n\n\nclass TID2013(torch.utils.data.Dataset):\n \"\"\"\n Args:\n root: Root directory path.\n\n Returns:\n x: image with some kind of distortion in [0, 1] range\n y: image without distortion in [0, 1] range\n score: MOS score for this pair of images\n \"\"\"\n _filename = \"mos_with_names.txt\"\n\n def __init__(self, root: Path = \"datasets/tid2013\"):\n assert root.exists(),\\\n \"You need to download TID2013 dataset first. Check http://www.ponomarenko.info/tid2013\"\n\n df = pd.read_csv(\n root / self._filename,\n sep=' ',\n names=['score', 'dist_img'],\n header=None\n )\n df[\"ref_img\"] = df[\"dist_img\"].apply(lambda x: f\"reference_images/{(x[:3] + x[-4:]).upper()}\")\n df[\"dist_img\"] = df[\"dist_img\"].apply(lambda x: f\"distorted_images/{x}\")\n\n self.scores = df['score'].to_numpy()\n self.df = df[[\"dist_img\", 'ref_img', 'score']]\n self.root = root\n\n def __getitem__(self, index):\n x_path = self.root / self.df.iloc[index][0]\n y_path = self.root / self.df.iloc[index][1]\n score = self.scores[index]\n\n # Load image and ref. Convert to tensor and [0, 1] range\n x = torch.tensor(imread(x_path)).permute(2, 0, 1) / 255\n y = torch.tensor(imread(y_path)).permute(2, 0, 1) / 255\n\n return x, y, score\n\n def __len__(self):\n return len(self.df)\n\n\nclass KADID10k(TID2013):\n _filename = \"dmos.csv\"\n\n def __init__(self, root: Path = \"datasets/kadid10k\"):\n assert root.exists(),\\\n \"You need to download KADID10K dataset first. Check http://database.mmsp-kn.de/kadid-10k-database.html\"\n\n # Read file mith DMOS\n self.df = pd.read_csv(root / self._filename)\n self.df.rename(columns={\"dmos\": \"score\"}, inplace=True)\n self.scores = self.df[\"score\"].to_numpy()\n self.df = self.df[[\"dist_img\", 'ref_img', 'score']]\n\n self.root = root / \"images\"\n\n\nDATASETS = {\n \"tid2013\": TID2013,\n \"kadid10k\": KADID10k,\n}\n\n\ndef eval_metric(loader: torch.utils.data.DataLoader, metric: Callable, device: str) -> List:\n \"\"\"Evaluate metric on a given dataset.\n Args:\n loader: PyTorch dataloader that returns batch of distorted images, reference images and scores.\n metric: Should support `metric(x, y)` or `metric(x)` call.\n device: Computation device.\n\n Returns:\n gt_scores: Ground truth values\n metric_scores: Predicted values as torch.Tensors.\n \"\"\"\n assert isinstance(loader, torch.utils.data.DataLoader), \"Expect loader to be DataLoader class\"\n assert callable(metric), f\"Expected metric to be callable, got {type(metric)} instead!\"\n\n gt_scores = []\n metric_scores = []\n\n for (distorted_images, reference_images, scores) in tqdm.tqdm(loader, ncols=50):\n distorted_images, reference_images = distorted_images.to(device), reference_images.to(device)\n gt_scores.append(scores.cpu())\n\n # Full Reference methods\n metric_score = metric(distorted_images, reference_images).cpu()\n metric_scores.append(metric_score.cpu())\n\n return torch.cat(gt_scores).numpy(), torch.cat(metric_scores).numpy()\n\n\ndef main(dataset_name: str, path: Path, metrics: List, batch_size: int, device: str) -> None:\n \n # Init dataset and dataloader\n dataset = DATASETS[dataset_name](root=path)\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=4)\n\n for name in metrics:\n gt_scores, metric_scores = eval_metric(loader, METRICS[name], device=device)\n print(f\"{name}: SRCC {abs(spearmanr(gt_scores, metric_scores)[0]):0.4f}\",\n f\"KRCC {abs(kendalltau(gt_scores, metric_scores)[0]):0.4f}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Benchmark PIQ metrics\")\n\n # General\n parser.add_argument(\"--dataset\", type=str, help=\"Dataset name\", choices=list(DATASETS.keys()))\n parser.add_argument(\"--path\", type=Path, help=\"Path to dataset\")\n parser.add_argument('--metrics', nargs='+', default=[], help='Metrics to benchmark', choices=list(METRICS.keys()))\n parser.add_argument('--batch_size', type=int, default=1, help='Batch size')\n parser.add_argument('--device', default='cuda', choices=['cpu', 'cuda'], help='Computation device')\n\n args = parser.parse_args()\n print(f\"Parameters used for benchmark: {args}\")\n main(\n dataset_name=args.dataset,\n path=args.path,\n metrics=args.metrics,\n batch_size=args.batch_size,\n device=args.device\n )\n", "id": "4987594", "language": "Python", "matching_score": 2.316092014312744, "max_stars_count": 0, "path": "tests/results_benchmark.py" }, { "content": "import torch\nimport pytest\nfrom typing import Any, Tuple, Callable, Union\nfrom contextlib import contextmanager\n\nfrom skimage.io import imread\nfrom piq import ContentLoss, StyleLoss, LPIPS, DISTS\nfrom piq.feature_extractors import InceptionV3\n\n\n@contextmanager\ndef raise_nothing():\n yield\n\n\nNONE_GRAD_ERR_MSG = 'Expected non None gradient of leaf variable'\n\n\n# ================== Test class: `ContentLoss` ==================\ndef test_content_loss_init() -> None:\n ContentLoss()\n\n\ndef test_content_loss_forward(input_tensors: Tuple[torch.Tensor, torch.Tensor], device: str) -> None:\n x, y = input_tensors\n loss = ContentLoss()\n loss(x.to(device), y.to(device))\n\n\ndef test_content_loss_computes_grad(input_tensors: Tuple[torch.Tensor, torch.Tensor], device: str) -> None:\n x, y = input_tensors\n x.requires_grad_()\n loss_value = ContentLoss()(x.to(device), y.to(device))\n loss_value.backward()\n assert x.grad is not None, NONE_GRAD_ERR_MSG\n\n\ndef test_content_loss_raises_if_wrong_reduction(x, y) -> None:\n for mode in ['mean', 'sum', 'none']:\n ContentLoss(reduction=mode)(x, y)\n\n for mode in [None, 'n', 2]:\n with pytest.raises(ValueError):\n ContentLoss(reduction=mode)(x, y)\n\n\[email protected](\n \"model,expectation\",\n [\n ('vgg16', raise_nothing()),\n ('vgg19', raise_nothing()),\n (InceptionV3(), raise_nothing()),\n (None, pytest.raises(ValueError)),\n ('random_encoder', pytest.raises(ValueError)),\n ],\n)\ndef test_content_loss_raises_if_wrong_extractor(x, y, model: Union[str, Callable], expectation: Any) -> None:\n with expectation:\n ContentLoss(feature_extractor=model)\n\n\[email protected](\n \"model\", ['vgg16', InceptionV3()],\n)\ndef test_content_loss_replace_pooling(x, y, model: Union[str, Callable]) -> None:\n ContentLoss(feature_extractor=model, replace_pooling=True)\n\n\ndef test_content_loss_supports_custom_extractor(x, y, device: str) -> None:\n loss = ContentLoss(feature_extractor=InceptionV3().blocks, layers=['0', '1'])\n loss(x, y)\n\n\[email protected](\n \"x, y, expectation, value\",\n [\n (torch.rand(2, 3, 96, 96, 2), torch.rand(2, 3, 96, 96, 2), pytest.raises(AssertionError), None),\n (torch.randn(2, 3, 96, 96), torch.randn(2, 3, 96, 96), raise_nothing(), None),\n (torch.zeros(2, 3, 96, 96), torch.zeros(2, 3, 96, 96), raise_nothing(), 0.0),\n (torch.ones(2, 3, 96, 96), torch.ones(2, 3, 96, 96), raise_nothing(), 0.0),\n (torch.rand(2, 3, 28, 28), torch.rand(2, 3, 28, 28), pytest.raises(RuntimeError), None),\n ],\n)\ndef test_content_loss_forward_for_special_cases(x, y, expectation: Any, value: float) -> None:\n loss = ContentLoss()\n with expectation:\n if value is None:\n loss(x, y)\n else:\n loss_value = loss(x, y)\n assert torch.isclose(loss_value, torch.tensor(value)), \\\n f'Expected loss value to be equal to target value. Got {loss_value} and {value}'\n\n\[email protected](\"Negative tensors are not supported yet\")\ndef test_content_loss_forward_for_normalized_input(device: str) -> None:\n x = torch.randn(2, 3, 96, 96).to(device)\n y = torch.randn(2, 3, 96, 96).to(device)\n loss = ContentLoss(mean=[0., 0., 0.], std=[1., 1., 1.])\n loss(x.to(device), y.to(device))\n\n\n# ================== Test class: `StyleLoss` ==================\ndef test_style_loss_init() -> None:\n StyleLoss()\n\n\ndef test_style_loss_forward(input_tensors: Tuple[torch.Tensor, torch.Tensor], device: str) -> None:\n x, y = input_tensors\n loss = StyleLoss()\n loss(x.to(device), y.to(device))\n\n\ndef test_style_loss_computes_grad(input_tensors: Tuple[torch.Tensor, torch.Tensor], device: str) -> None:\n x, y = input_tensors\n x.requires_grad_()\n loss_value = StyleLoss()(x.to(device), y.to(device))\n loss_value.backward()\n assert x.grad is not None, NONE_GRAD_ERR_MSG\n\n\ndef test_style_loss_raises_if_wrong_reduction(x, y) -> None:\n for mode in ['mean', 'sum', 'none']:\n StyleLoss(reduction=mode)(x, y)\n\n for mode in [None, 'n', 2]:\n with pytest.raises(ValueError):\n StyleLoss(reduction=mode)(x, y)\n\n\n# ================== Test class: `LPIPS` ==================\ndef test_lpips_loss_init() -> None:\n LPIPS()\n\n\ndef test_lpips_loss_forward(input_tensors: Tuple[torch.Tensor, torch.Tensor], device: str) -> None:\n x, y = input_tensors\n loss = LPIPS()\n loss(x.to(device), y.to(device))\n\n\ndef test_lpips_computes_grad(x, y, device: str) -> None:\n x.requires_grad_()\n loss_value = LPIPS()(x.to(device), y.to(device))\n loss_value.backward()\n assert x.grad is not None, NONE_GRAD_ERR_MSG\n\n\ndef test_lpips_loss_raises_if_wrong_reduction(x, y) -> None:\n for mode in ['mean', 'sum', 'none']:\n LPIPS(reduction=mode)(x, y)\n\n for mode in [None, 'n', 2]:\n with pytest.raises(ValueError):\n LPIPS(reduction=mode)(x, y)\n\n\[email protected](\n \"x, y, expectation, value\",\n [\n (torch.zeros(2, 3, 96, 96), torch.zeros(2, 3, 96, 96), raise_nothing(), 0.0),\n (torch.ones(2, 3, 96, 96), torch.ones(2, 3, 96, 96), raise_nothing(), 0.0),\n ],\n)\ndef test_lpips_loss_forward_for_special_cases(x, y, expectation: Any, value: float) -> None:\n loss = LPIPS()\n with expectation:\n loss_value = loss(x, y)\n assert torch.isclose(loss_value, torch.tensor(value), atol=1e-6), \\\n f'Expected loss value to be equal to target value. Got {loss_value} and {value}'\n\n\n# ================== Test class: `DISTS` ==================\ndef test_dists_loss_forward(x, y, device: str) -> None:\n loss = DISTS()\n loss(x.to(device), y.to(device))\n\n\ndef test_dists_computes_grad(x, y, device: str) -> None:\n x.requires_grad_()\n loss_value = DISTS()(x.to(device), y.to(device))\n loss_value.backward()\n assert x.grad is not None, NONE_GRAD_ERR_MSG\n\n\[email protected](\n \"x, y, expectation, value\",\n [\n (torch.zeros(2, 3, 96, 96), torch.zeros(2, 3, 96, 96), raise_nothing(), 0.0),\n (torch.ones(2, 3, 96, 96), torch.ones(2, 3, 96, 96), raise_nothing(), 0.0),\n ],\n)\ndef test_dists_loss_forward_for_special_cases(x, y, expectation: Any, value: float) -> None:\n loss = DISTS()\n with expectation:\n loss_value = loss(x, y)\n assert torch.isclose(loss_value, torch.tensor(value), atol=1e-6), \\\n f'Expected loss value to be equal to target value. Got {loss_value} and {value}'\n\n\ndef test_dists_simmilar_to_official_implementation() -> None:\n # Baseline scores from: https://github.com/dingkeyan93/DISTS\n loss = DISTS()\n\n # Greyscale images\n goldhill = torch.tensor(imread('tests/assets/goldhill.gif'))[None, None, ...] / 255.0\n goldhill_jpeg = torch.tensor(imread('tests/assets/goldhill_jpeg.gif'))[None, None, ...] / 255.0\n\n loss_value = loss(goldhill_jpeg, goldhill)\n baseline_value = torch.tensor(0.19509)\n assert torch.isclose(loss_value, baseline_value, atol=1e-3), \\\n f'Expected PIQ loss to be equal to original. Got {loss_value} and {baseline_value}'\n\n # RGB images\n I01 = torch.tensor(imread('tests/assets/I01.BMP')).permute(2, 0, 1)[None, ...] / 255.0\n i1_01_5 = torch.tensor(imread('tests/assets/i01_01_5.bmp')).permute(2, 0, 1)[None, ...] / 255.0\n\n loss_value = loss(i1_01_5, I01)\n baseline_value = torch.tensor(0.17321)\n\n assert torch.isclose(loss_value, baseline_value, atol=1e-3), \\\n f'Expected PIQ loss to be equal to original. Got {loss_value} and {baseline_value}'\n", "id": "4063402", "language": "Python", "matching_score": 1.8856037855148315, "max_stars_count": 0, "path": "tests/test_perseptual.py" }, { "content": "from typing import List, Any\n\nimport pytest\nimport torch\n\ndevices: List[str] = [\"cpu\"]\nif torch.cuda.is_available():\n devices.append(\"cuda\")\n\n\[email protected](params=devices, scope='module')\ndef device(request: Any) -> Any:\n return request.param\n\n\[email protected](scope='module')\ndef x() -> torch.Tensor:\n return torch.rand(4, 3, 96, 96)\n\n\[email protected](scope='module')\ndef y() -> torch.Tensor:\n return torch.rand(4, 3, 96, 96)\n\n\nx_tensors = [\n torch.rand(4, 3, 96, 96), # Random 4D\n torch.rand(4, 1, 96, 96), # Random 4D greyscale\n]\n\ny_tensors = [\n torch.rand(4, 3, 96, 96), # Random 4D\n torch.rand(4, 1, 96, 96), # Random 4D greyscale\n]\n\n\[email protected](params=zip(x_tensors, y_tensors))\ndef input_tensors(request: Any) -> Any:\n return request.param\n", "id": "7084827", "language": "Python", "matching_score": 0.13562917709350586, "max_stars_count": 471, "path": "tests/conftest.py" }, { "content": "import torch\nimport piq\n\n\[email protected]_grad()\ndef main():\n x_features = torch.rand(2000, 128)\n y_features = torch.rand(2000, 128)\n\n if torch.cuda.is_available():\n # Move to GPU to make computaions faster\n x_features = x_features.cuda()\n y_features = y_features.cuda()\n\n # Use FID class to compute FID score from image features, pre-extracted from some feature extractor network\n fid: torch.Tensor = piq.FID()(x_features, y_features)\n print(f\"FID: {fid:0.4f}\")\n\n # If image features are not available, extract them using compute_feats of FID class.\n # Please note that compute_feats consumes a data loader of predefined format.\n\n # Use GS class to compute Geometry Score from image features, pre-extracted from some feature extractor network.\n # Computation is heavily CPU dependent, adjust num_workers parameter according to your system configuration.\n gs: torch.Tensor = piq.GS(sample_size=64, num_iters=100, i_max=100, num_workers=4)(x_features, y_features)\n print(f\"GS: {gs:0.4f}\")\n\n # Use inception_score function to compute IS from image features, pre-extracted from some feature extractor network.\n # Note, that we follow recomendations from paper \"A Note on the Inception Score\"\n isc_mean, _ = piq.inception_score(x_features, num_splits=10)\n # To compute difference between IS for 2 sets of image features, use IS class.\n isc: torch.Tensor = piq.IS(distance='l1')(x_features, y_features)\n print(f\"IS: {isc_mean:0.4f}, difference: {isc:0.4f}\")\n\n # Use KID class to compute KID score from image features, pre-extracted from some feature extractor network:\n kid: torch.Tensor = piq.KID()(x_features, y_features)\n print(f\"KID: {kid:0.4f}\")\n\n # Use MSID class to compute MSID score from image features, pre-extracted from some feature extractor network:\n msid: torch.Tensor = piq.MSID()(x_features, y_features)\n print(f\"MSID: {msid:0.4f}\")\n\n\nif __name__ == '__main__':\n main()\n", "id": "7011283", "language": "Python", "matching_score": 2.347107172012329, "max_stars_count": 471, "path": "examples/feature_metrics.py" }, { "content": "\"\"\"\nPyTorch implementation of Inception score (IS)\nReference:\n <NAME>. et al. Improved techniques for training gans (2016)\n https://arxiv.org/abs/1606.03498\n <NAME> et al. A Note on the Inception Score\n https://arxiv.org/pdf/1801.01973.pdf\n\nCredits:\n https://github.com/sbarratt/inception-score-pytorch\n https://github.com/tsc2017/Inception-Score\n https://github.com/openai/improved-gan/issues/29\n\"\"\"\nimport torch\nimport torch.nn.functional as F\n\nfrom piq.base import BaseFeatureMetric\nfrom piq.utils import _validate_input\n\n\ndef inception_score(features: torch.Tensor, num_splits: int = 10):\n r\"\"\"Compute Inception Score for a list of image features.\n Expects raw logits from Inception-V3 as input.\n\n Args:\n features (torch.Tensor): Low-dimension representation of image set. Shape (N_samples, encoder_dim).\n num_splits: Number of parts to divide features. Inception Score is computed for them separately and\n results are then averaged.\n\n Returns:\n score\n\n variance\n\n References:\n \"A Note on the Inception Score\"\n https://arxiv.org/pdf/1801.01973.pdf\n\n \"\"\"\n assert len(features.shape) == 2, \\\n f\"Features must have shape (N_samples, encoder_dim), got {features.shape}\"\n N = features.size(0)\n\n # Convert logits to probabilities\n probas = F.softmax(features)\n\n # In the paper the score is computed for 10 splits of the dataset and then averaged.\n partial_scores = []\n for i in range(num_splits):\n subset = probas[i * (N // num_splits): (i + 1) * (N // num_splits), :]\n\n # Compute KL divergence\n p_y = torch.mean(subset, dim=0)\n scores = []\n for k in range(subset.shape[0]):\n p_yx = subset[k, :]\n scores.append(F.kl_div(p_y.log(), p_yx, reduction='sum'))\n\n # Compute exponential of the mean of the KL-divergence for each split\n partial_scores.append(torch.tensor(scores).mean().exp())\n\n partial_scores = torch.tensor(partial_scores)\n return torch.mean(partial_scores).to(features), torch.std(partial_scores).to(features)\n\n\nclass IS(BaseFeatureMetric):\n r\"\"\"Creates a criterion that measures difference of Inception Score between two datasets.\n\n IS is computed separately for predicted :math:`x` and target :math:`y` features and expects raw InceptionV3 model\n logits as inputs.\n\n Args:\n num_splits: Number of parts to divide features.\n IS is computed for them separately and results are then averaged.\n distance: How to measure distance between scores: ``'l1'`` | ``'l2'``. Default: ``'l1'``.\n\n Examples:\n >>> loss = IS()\n >>> x = torch.rand(3, 3, 256, 256, requires_grad=True)\n >>> y = torch.rand(3, 3, 256, 256)\n >>> output = loss(x, y)\n >>> output.backward()\n\n References:\n \"A Note on the Inception Score\" https://arxiv.org/pdf/1801.01973.pdf\n \"\"\"\n def __init__(self, num_splits: int = 10, distance: str = 'l1') -> None:\n r\"\"\"\n\n \"\"\"\n super(IS, self).__init__()\n self.num_splits = num_splits\n self.distance = distance\n\n def compute_metric(self, x_features: torch.Tensor, y_features: torch.Tensor) -> torch.Tensor:\n r\"\"\"Compute IS.\n\n Both features should have shape (N_samples, encoder_dim).\n\n Args:\n x_features: Samples from data distribution. Shape :math:`(N_x, D)`\n y_features: Samples from data distribution. Shape :math:`(N_y, D)`\n\n Returns:\n L1 or L2 distance between scores for datasets :math:`x` and :math:`y`.\n \"\"\"\n _validate_input([x_features, y_features], dim_range=(2, 2), size_range=(0, 2))\n x_is, _ = inception_score(x_features, num_splits=self.num_splits)\n y_is, _ = inception_score(y_features, num_splits=self.num_splits)\n if self.distance == 'l1':\n return torch.dist(x_is, y_is, 1)\n elif self.distance == 'l2':\n return torch.dist(x_is, y_is, 2)\n else:\n raise ValueError(\"Distance should be one of {`l1`, `l2`}\")\n", "id": "9336445", "language": "Python", "matching_score": 3.670142650604248, "max_stars_count": 0, "path": "piq/isc.py" }, { "content": "r\"\"\"PyTorch implementation of Improved Precision and Recall (P&R). Based on Improved Precision and Recall Metric for\nAssessing Generative Models https://arxiv.org/abs/1904.06991 and repository\nhttps://github.com/clovaai/generative-evaluation-prdc/blob/master/prdc/prdc.py\n\"\"\"\nfrom typing import Tuple, Optional\nimport torch\n\nfrom piq.base import BaseFeatureMetric\nfrom piq.utils import _validate_input\n\n\ndef _compute_pairwise_distance(data_x: torch.Tensor, data_y: Optional[torch.Tensor] = None) -> torch.Tensor:\n r\"\"\"Compute Euclidean distance between :math:`x` and :math:`y`.\n\n Args:\n data_x: Tensor of shape :math:`(N, feature_dim)`\n data_y: Tensor of shape :math:`(N, feature_dim)`\n Returns:\n Tensor of shape :math:`(N, N)` of pairwise distances.\n \"\"\"\n if data_y is None:\n data_y = data_x\n dists = torch.cdist(data_x, data_y, p=2)\n return dists\n\n\ndef _get_kth_value(unsorted: torch.Tensor, k: int, axis: int = -1) -> torch.Tensor:\n r\"\"\"\n Args:\n unsorted: Tensor of any dimensionality.\n k: Int of the :math:`k`-th value to retrieve.\n Returns:\n kth values along the designated axis.\n \"\"\"\n k_smallests = torch.topk(unsorted, k, dim=axis, largest=False)[0]\n kth_values = k_smallests.max(dim=axis)[0]\n return kth_values\n\n\ndef _compute_nearest_neighbour_distances(input_features: torch.Tensor, nearest_k: int) -> torch.Tensor:\n r\"\"\"Compute K-nearest neighbour distances.\n\n Args:\n input_features: Tensor of shape :math:`(N, feature_dim)`\n nearest_k: Int of the :math:`k`-th nearest neighbour.\n Returns:\n Distances to :math:`k`-th nearest neighbours.\n \"\"\"\n distances = _compute_pairwise_distance(input_features)\n radii = _get_kth_value(distances, k=nearest_k + 1, axis=-1)\n return radii\n\n\nclass PR(BaseFeatureMetric):\n r\"\"\"Interface of Improved Precision and Recall.\n It's computed for a whole set of data and uses features from encoder instead of images itself to decrease\n computation cost. Precision and Recall can compare two data distributions with different number of samples.\n But dimensionalities should match, otherwise it won't be possible to correctly compute statistics.\n\n Args:\n nearest_k: Nearest neighbor to compute the non-parametric representation. Shape :math:`1`\n \n Examples:\n >>> loss = PR()\n >>> x = torch.rand(3, 3, 256, 256, requires_grad=True)\n >>> y = torch.rand(3, 3, 256, 256)\n >>> precision, recall = loss(x, y)\n\n References:\n <NAME> al. (2019).\n Improved Precision and Recall Metric for Assessing Generative Models.\n Advances in Neural Information Processing Systems,\n https://arxiv.org/abs/1904.06991\n \"\"\"\n\n def __init__(self, nearest_k: int = 5) -> None:\n r\"\"\"\n Args:\n nearest_k: Nearest neighbor to compute the non-parametric representation. Shape :math:`1`\n \"\"\"\n super(PR, self).__init__()\n\n self.nearest_k = nearest_k\n\n def compute_metric(self, real_features: torch.Tensor, fake_features: torch.Tensor) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"Creates non-parametric representations of the manifolds of real and generated data and computes\n the precision and recall between them.\n\n Args:\n real_features: Samples from data distribution. Shape :math:`(N_x, D)`\n fake_features: Samples from fake distribution. Shape :math:`(N_x, D)`\n Returns:\n Scalar value of the precision of the generated images.\n \n Scalar value of the recall of the generated images.\n \"\"\"\n _validate_input([real_features, fake_features], dim_range=(2, 2), size_range=(1, 2))\n real_nearest_neighbour_distances = _compute_nearest_neighbour_distances(real_features, self.nearest_k)\n fake_nearest_neighbour_distances = _compute_nearest_neighbour_distances(fake_features, self.nearest_k)\n distance_real_fake = _compute_pairwise_distance(real_features, fake_features)\n\n precision = (\n distance_real_fake < real_nearest_neighbour_distances.unsqueeze(1)\n ).any(dim=0).float().mean()\n\n recall = (\n distance_real_fake < fake_nearest_neighbour_distances.unsqueeze(0)\n ).any(dim=1).float().mean()\n\n return precision, recall\n", "id": "531326", "language": "Python", "matching_score": 2.0166242122650146, "max_stars_count": 0, "path": "piq/pr.py" }, { "content": "\"\"\"\nImplementation of PieAPP\nReferences:\n .. [1] <NAME>, <NAME>, <NAME>, <NAME>\n (2018). PieAPP: Perceptual Image-Error Assessment through Pairwise Preference\n https://arxiv.org/abs/1806.02067\n\"\"\"\nimport warnings\nfrom typing import Union, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom piq.utils import _validate_input, _reduce\nfrom piq.functional import crop_patches\n\n\nclass PieAPPModel(nn.Module):\n r\"\"\" Model used for PieAPP score computation \"\"\"\n # Base feature size, which is multiplied by 2 every 2 blocks\n FEATURES = 64\n \n def __init__(self):\n super().__init__()\n\n self.pool = nn.MaxPool2d(2, 2)\n self.flatten = nn.Flatten(start_dim=1)\n\n self.conv1 = nn.Conv2d(3, self.FEATURES, kernel_size=3, padding=1)\n self.conv2 = nn.Conv2d(self.FEATURES, self.FEATURES, kernel_size=3, padding=1)\n self.conv3 = nn.Conv2d(self.FEATURES, self.FEATURES, kernel_size=3, padding=1)\n self.conv4 = nn.Conv2d(self.FEATURES, self.FEATURES * 2, kernel_size=3, padding=1)\n self.conv5 = nn.Conv2d(self.FEATURES * 2, self.FEATURES * 2, kernel_size=3, padding=1)\n self.conv6 = nn.Conv2d(self.FEATURES * 2, self.FEATURES * 2, kernel_size=3, padding=1)\n self.conv7 = nn.Conv2d(self.FEATURES * 2, self.FEATURES * 4, kernel_size=3, padding=1)\n self.conv8 = nn.Conv2d(self.FEATURES * 4, self.FEATURES * 4, kernel_size=3, padding=1)\n self.conv9 = nn.Conv2d(self.FEATURES * 4, self.FEATURES * 4, kernel_size=3, padding=1)\n self.conv10 = nn.Conv2d(self.FEATURES * 4, self.FEATURES * 8, kernel_size=3, padding=1)\n self.conv11 = nn.Conv2d(self.FEATURES * 8, self.FEATURES * 8, kernel_size=3, padding=1)\n\n # TODO: Reconsider this (hardcoded) implementation as soon as dataset used for PieAPP model training is released\n # Check out project repo: https://github.com/prashnani/PerceptualImageError\n # and project web site http://civc.ucsb.edu/graphics/Papers/CVPR2018_PieAPP/\n # for updates on that.\n self.fc1_score = nn.Linear(in_features=120832, out_features=512, bias=True)\n self.fc2_score = nn.Linear(in_features=512, out_features=1, bias=True)\n self.fc1_weight = nn.Linear(in_features=2048, out_features=512)\n self.fc2_weight = nn.Linear(in_features=512, out_features=1, bias=True)\n self.ref_score_subtract = nn.Linear(in_features=1, out_features=1, bias=True)\n\n # Term for numerical stability\n self.EPS = 1e-6\n\n def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"\n Forward pass a batch of square patches with shape :math:`(N, C, F, F)`.\n\n Returns:\n features: Concatenation of model features from different scales\n x11: Outputs of the last convolutional layer used as weights\n \"\"\"\n _validate_input([x, ], dim_range=(4, 4), data_range=(0, -1))\n\n assert x.shape[2] == x.shape[3] == self.FEATURES, \\\n f\"Expected square input with shape {self.FEATURES, self.FEATURES}, got {x.shape}\"\n\n # conv1 -> relu -> conv2 -> relu -> pool -> conv3 -> relu\n x3 = F.relu(self.conv3(self.pool(F.relu(self.conv2(F.relu(self.conv1(x)))))))\n # conv4 -> relu -> pool -> conv5 -> relu\n x5 = F.relu(self.conv5(self.pool(F.relu(self.conv4(x3)))))\n # conv6 -> relu -> pool -> conv7 -> relu\n x7 = F.relu(self.conv7(self.pool(F.relu(self.conv6(x5)))))\n # conv8 -> relu -> pool -> conv9 -> relu\n x9 = F.relu(self.conv9(self.pool(F.relu(self.conv8(x7)))))\n # conv10 -> relu -> pool1-> conv11 -> relU\n x11 = self.flatten(F.relu(self.conv11(self.pool(F.relu(self.conv10(x9))))))\n # flatten and concatenate\n features = torch.cat((self.flatten(x3), self.flatten(x5), self.flatten(x7), self.flatten(x9), x11), dim=1)\n return features, x11\n\n def compute_difference(self, features_diff: torch.Tensor, weights_diff: torch.Tensor) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"\n Args:\n features_diff: Tensor. Shape :math:`(N, C_1)`\n weights_diff: Tensor. Shape :math:`(N, C_2)`\n\n Returns:\n distances\n weights\n \"\"\"\n # Get scores: fc1_score -> relu -> fc2_score\n # 0.01 is the sigmoid coefficient\n distances = self.ref_score_subtract(0.01 * self.fc2_score(F.relu(self.fc1_score(features_diff))))\n\n weights = self.fc2_weight(F.relu(self.fc1_weight(weights_diff))) + self.EPS\n return distances, weights\n\n\nclass PieAPP(_Loss):\n r\"\"\"\n Implementation of Perceptual Image-Error Assessment through Pairwise Preference.\n \n Expects input to be in range ``[0, data_range]`` with no normalization and RGB channel order.\n Input images are cropped into smaller patches. Score for each individual image is mean of it's patch scores.\n\n Args:\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``\n data_range: Maximum value range of images (usually 1.0 or 255).\n stride: Step between cropped patches. Smaller values lead to better quality,\n but cause higher memory consumption. Default: 27 (`sparse` sampling in original implementation)\n enable_grad: Flag to compute gradients. Useful when PieAPP used as a loss. Default: False.\n\n Examples:\n >>> loss = PieAPP()\n >>> x = torch.rand(3, 3, 256, 256, requires_grad=True)\n >>> y = torch.rand(3, 3, 256, 256)\n >>> output = loss(x, y)\n >>> output.backward()\n\n References:\n <NAME>, <NAME>, <NAME>, Pradeep Sen (2018).\n PieAPP: Perceptual Image-Error Assessment through Pairwise Preference\n https://arxiv.org/abs/1806.02067\n\n https://github.com/prashnani/PerceptualImageError\n\n \"\"\"\n _weights_url = \"https://github.com/photosynthesis-team/piq/releases/download/v0.5.4/PieAPPv0.1.pth\"\n\n def __init__(self, reduction: str = \"mean\", data_range: Union[int, float] = 1.0, stride: int = 27,\n enable_grad: bool = False) -> None:\n super().__init__()\n \n # Load weights and initialize model\n weights = torch.hub.load_state_dict_from_url(self._weights_url, progress=False)\n # Fix small bug in original weights\n weights['ref_score_subtract.weight'] = weights['ref_score_subtract.weight'].unsqueeze(1)\n self.model = PieAPPModel()\n self.model.load_state_dict(weights)\n\n # Disable gradients\n for param in self.model.parameters():\n param.requires_grad_(False)\n\n self.data_range = data_range\n self.reduction = reduction\n self.stride = stride\n self.enable_grad = enable_grad\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Computation of PieAPP between feature representations of prediction :math:`x` and target :math:`y` tensors.\n\n Args:\n x: An input tensor. Shape :math:`(N, C, H, W)`.\n y: A target tensor. Shape :math:`(N, C, H, W)`.\n\n Returns:\n Perceptual Image-Error Assessment through Pairwise Preference\n \"\"\"\n _validate_input([x, y], dim_range=(4, 4), data_range=(0, self.data_range))\n\n N, C, _, _ = x.shape\n if C == 1:\n x = x.repeat(1, 3, 1, 1)\n y = y.repeat(1, 3, 1, 1)\n warnings.warn('The original PieAPP supports only RGB images.'\n 'The input images were converted to RGB by copying the grey channel 3 times.')\n\n self.model.to(device=x.device)\n x_features, x_weights = self.get_features(x)\n y_features, y_weights = self.get_features(y)\n\n distances, weights = self.model.compute_difference(\n y_features - x_features,\n y_weights - x_weights\n )\n\n distances = distances.reshape(N, -1)\n weights = weights.reshape(N, -1)\n\n # Scale scores, then average across patches\n loss = torch.stack([(d * w).sum() / w.sum() for d, w in zip(distances, weights)])\n\n return _reduce(loss, self.reduction)\n\n def get_features(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n r\"\"\"\n\n Args:\n x: Tensor. Shape :math:`(N, C, H, W)`.\n \n Returns:\n List of features extracted from intermediate layers weights\n \"\"\"\n # Rescale to [0, 255] range on which models was trained\n x = x / float(self.data_range) * 255\n x_patches = crop_patches(x, size=64, stride=self.stride)\n\n with torch.autograd.set_grad_enabled(self.enable_grad):\n features, weights = self.model(x_patches)\n\n return features, weights\n", "id": "7982261", "language": "Python", "matching_score": 2.748063087463379, "max_stars_count": 471, "path": "piq/pieapp.py" }, { "content": "r\"\"\"Custom layers used in metrics computations\"\"\"\nimport torch\nfrom typing import Optional\n\nfrom piq.functional import hann_filter\n\n\nclass L2Pool2d(torch.nn.Module):\n r\"\"\"Applies L2 pooling with Hann window of size 3x3\n Args:\n x: Tensor with shape (N, C, H, W)\"\"\"\n EPS = 1e-12\n\n def __init__(self, kernel_size: int = 3, stride: int = 2, padding=1) -> None:\n super().__init__()\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n\n self.kernel: Optional[torch.Tensor] = None\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n if self.kernel is None:\n C = x.size(1)\n self.kernel = hann_filter(self.kernel_size).repeat((C, 1, 1, 1)).to(x)\n\n out = torch.nn.functional.conv2d(\n x ** 2, self.kernel,\n stride=self.stride,\n padding=self.padding,\n groups=x.shape[1]\n )\n return (out + self.EPS).sqrt()\n", "id": "5703205", "language": "Python", "matching_score": 1.1578433513641357, "max_stars_count": 471, "path": "piq/functional/layers.py" }, { "content": "r\"\"\"Implemetation of Visual Information Fidelity metric\nCode is based on MATLAB version for computations in pixel domain\nhttps://live.ece.utexas.edu/research/Quality/VIF.htm\n\nReferences:\n https://ieeexplore.ieee.org/abstract/document/1576816/\n\"\"\"\nimport torch\nfrom torch.nn.modules.loss import _Loss\nimport torch.nn.functional as F\nfrom typing import Union\n\nfrom piq.functional import gaussian_filter\nfrom piq.utils import _validate_input, _reduce\n\n\ndef vif_p(x: torch.Tensor, y: torch.Tensor, sigma_n_sq: float = 2.0,\n data_range: Union[int, float] = 1.0, reduction: str = 'mean') -> torch.Tensor:\n r\"\"\"Compute Visiual Information Fidelity in **pixel** domain for a batch of images.\n This metric isn't symmetric, so make sure to place arguments in correct order.\n Both inputs supposed to have RGB channels order.\n\n Args:\n x: An input tensor. Shape :math:`(N, C, H, W)`.\n y: A target tensor. Shape :math:`(N, C, H, W)`.\n sigma_n_sq: HVS model parameter (variance of the visual noise).\n data_range: Maximum value range of images (usually 1.0 or 255).\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``\n \n Returns:\n VIF Index of similarity betwen two images. Usually in [0, 1] interval.\n Can be bigger than 1 for predicted :math:`x` images with higher contrast than original one.\n\n References:\n <NAME> and <NAME>, \"Image information and visual quality,\"\n IEEE Transactions on Image Processing, vol. 15, no. 2, pp. 430-444, Feb. 2006\n https://ieeexplore.ieee.org/abstract/document/1576816/\n DOI: 10.1109/TIP.2005.859378.\n\n Note:\n In original paper this method was used for bands in discrete wavelet decomposition.\n Later on authors released code to compute VIF approximation in pixel domain.\n See https://live.ece.utexas.edu/research/Quality/VIF.htm for details.\n \"\"\"\n _validate_input([x, y], dim_range=(4, 4), data_range=(0, data_range))\n\n min_size = 41\n if x.size(-1) < min_size or x.size(-2) < min_size:\n raise ValueError(f'Invalid size of the input images, expected at least {min_size}x{min_size}.')\n\n x = x / float(data_range) * 255\n y = y / float(data_range) * 255\n\n # Convert RGB image to YCbCr and take luminance: Y = 0.299 R + 0.587 G + 0.114 B\n num_channels = x.size(1)\n if num_channels == 3:\n x = 0.299 * x[:, 0, :, :] + 0.587 * x[:, 1, :, :] + 0.114 * x[:, 2, :, :]\n y = 0.299 * y[:, 0, :, :] + 0.587 * y[:, 1, :, :] + 0.114 * y[:, 2, :, :]\n\n # Add channel dimension\n x = x[:, None, :, :]\n y = y[:, None, :, :]\n \n # Constant for numerical stability\n EPS = 1e-8\n \n # Progressively downsample images and compute VIF on different scales\n x_vif, y_vif = 0, 0\n for scale in range(4):\n kernel_size = 2 ** (4 - scale) + 1\n kernel = gaussian_filter(kernel_size, sigma=kernel_size / 5)\n kernel = kernel.view(1, 1, kernel_size, kernel_size).to(x)\n\n if scale > 0:\n # Convolve and downsample\n x = F.conv2d(x, kernel)[:, :, fc00:db20:35b:7399::5, ::2] # valid padding\n y = F.conv2d(y, kernel)[:, :, fc00:db20:35b:7399::5, ::2] # valid padding\n\n mu_x, mu_y = F.conv2d(x, kernel), F.conv2d(y, kernel) # valid padding\n mu_x_sq, mu_y_sq, mu_xy = mu_x * mu_x, mu_y * mu_y, mu_x * mu_y\n\n # Good\n sigma_x_sq = F.conv2d(x ** 2, kernel) - mu_x_sq\n sigma_y_sq = F.conv2d(y ** 2, kernel) - mu_y_sq\n sigma_xy = F.conv2d(x * y, kernel) - mu_xy\n \n # Zero small negative values\n sigma_x_sq = torch.relu(sigma_x_sq)\n sigma_y_sq = torch.relu(sigma_y_sq)\n\n g = sigma_xy / (sigma_y_sq + EPS)\n sigma_v_sq = sigma_x_sq - g * sigma_xy\n\n g = torch.where(sigma_y_sq >= EPS, g, torch.zeros_like(g))\n sigma_v_sq = torch.where(sigma_y_sq >= EPS, sigma_v_sq, sigma_x_sq)\n sigma_y_sq = torch.where(sigma_y_sq >= EPS, sigma_y_sq, torch.zeros_like(sigma_y_sq))\n\n g = torch.where(sigma_x_sq >= EPS, g, torch.zeros_like(g))\n sigma_v_sq = torch.where(sigma_x_sq >= EPS, sigma_v_sq, torch.zeros_like(sigma_v_sq))\n\n sigma_v_sq = torch.where(g >= 0, sigma_v_sq, sigma_x_sq)\n g = torch.relu(g)\n\n sigma_v_sq = torch.where(sigma_v_sq > EPS, sigma_v_sq, torch.ones_like(sigma_v_sq) * EPS)\n \n x_vif_scale = torch.log10(1.0 + (g ** 2.) * sigma_y_sq / (sigma_v_sq + sigma_n_sq))\n x_vif = x_vif + torch.sum(x_vif_scale, dim=[1, 2, 3])\n y_vif = y_vif + torch.sum(torch.log10(1.0 + sigma_y_sq / sigma_n_sq), dim=[1, 2, 3])\n\n score: torch.Tensor = (x_vif + EPS) / (y_vif + EPS)\n\n return _reduce(score, reduction)\n\n\nclass VIFLoss(_Loss):\n r\"\"\"Creates a criterion that measures the Visual Information Fidelity loss\n between predicted (x) and target (y) image. In order to be considered as a loss,\n value ``1 - clip(VIF, min=0, max=1)`` is returned.\n\n Args:\n sigma_n_sq: HVS model parameter (variance of the visual noise).\n data_range: Maximum value range of images (usually 1.0 or 255).\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``\n\n Examples:\n >>> loss = VIFLoss()\n >>> x = torch.rand(3, 3, 256, 256, requires_grad=True)\n >>> y = torch.rand(3, 3, 256, 256)\n >>> output = loss(x, y)\n >>> output.backward()\n\n References:\n <NAME> and <NAME>, \"Image information and visual quality,\"\n IEEE Transactions on Image Processing, vol. 15, no. 2, pp. 430-444, Feb. 2006\n https://ieeexplore.ieee.org/abstract/document/1576816/\n DOI: 10.1109/TIP.2005.859378.\n \"\"\"\n\n def __init__(self, sigma_n_sq: float = 2.0, data_range: Union[int, float] = 1.0, reduction: str = 'mean'):\n super().__init__()\n self.sigma_n_sq = sigma_n_sq\n self.data_range = data_range\n self.reduction = reduction\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n r\"\"\"Computation of Visual Information Fidelity (VIF) index as a loss function.\n Colour images are expected to have RGB channel order.\n Order of inputs is important! First tensor must contain distorted images, second reference images.\n\n Args:\n x: An input tensor. Shape :math:`(N, C, H, W)`.\n y: A target tensor. Shape :math:`(N, C, H, W)`.\n\n Returns:\n Value of VIF loss to be minimized in [0, 1] range.\n \"\"\"\n # All checks are done in vif_p function\n score = vif_p(x, y, sigma_n_sq=self.sigma_n_sq, data_range=self.data_range, reduction=self.reduction)\n\n # Make sure value to be in [0, 1] range and convert to loss\n loss = 1 - torch.clamp(score, 0, 1)\n return loss\n", "id": "11392161", "language": "Python", "matching_score": 2.793597459793091, "max_stars_count": 471, "path": "piq/vif.py" }, { "content": "from typing import Tuple, List, Optional\nimport torch\n\n\ndef _validate_input(\n tensors: List[torch.Tensor],\n dim_range: Tuple[int, int] = (0, -1),\n data_range: Tuple[float, float] = (0., -1.),\n # size_dim_range: Tuple[float, float] = (0., -1.),\n size_range: Optional[Tuple[int, int]] = None,\n) -> None:\n r\"\"\"Check that input(-s) satisfies the requirements\n Args:\n tensors: Tensors to check\n dim_range: Allowed number of dimensions. (min, max)\n data_range: Allowed range of values in tensors. (min, max)\n size_range: Dimensions to include in size comparison. (start_dim, end_dim + 1)\n \"\"\"\n\n if not __debug__:\n return\n\n x = tensors[0]\n\n for t in tensors:\n assert torch.is_tensor(t), f'Expected torch.Tensor, got {type(t)}'\n assert t.device == x.device, f'Expected tensors to be on {x.device}, got {t.device}'\n\n if size_range is None:\n assert t.size() == x.size(), f'Expected tensors with same size, got {t.size()} and {x.size()}'\n else:\n assert t.size()[size_range[0]: size_range[1]] == x.size()[size_range[0]: size_range[1]], \\\n f'Expected tensors with same size at given dimensions, got {t.size()} and {x.size()}'\n\n if dim_range[0] == dim_range[1]:\n assert t.dim() == dim_range[0], f'Expected number of dimensions to be {dim_range[0]}, got {t.dim()}'\n elif dim_range[0] < dim_range[1]:\n assert dim_range[0] <= t.dim() <= dim_range[1], \\\n f'Expected number of dimensions to be between {dim_range[0]} and {dim_range[1]}, got {t.dim()}'\n\n if data_range[0] < data_range[1]:\n assert data_range[0] <= t.min(), \\\n f'Expected values to be greater or equal to {data_range[0]}, got {t.min()}'\n assert t.max() <= data_range[1], \\\n f'Expected values to be lower or equal to {data_range[1]}, got {t.max()}'\n\n\ndef _reduce(x: torch.Tensor, reduction: str = 'mean') -> torch.Tensor:\n r\"\"\"Reduce input in batch dimension if needed.\n\n Args:\n x: Tensor with shape (N, *).\n reduction: Specifies the reduction type:\n ``'none'`` | ``'mean'`` | ``'sum'``. Default: ``'mean'``\n \"\"\"\n if reduction == 'none':\n return x\n elif reduction == 'mean':\n return x.mean(dim=0)\n elif reduction == 'sum':\n return x.sum(dim=0)\n else:\n raise ValueError(\"Uknown reduction. Expected one of {'none', 'mean', 'sum'}\")\n\n\ndef _version_tuple(v):\n # Split by dot and plus\n return tuple(map(int, v.split('+')[0].split('.')))\n", "id": "2510099", "language": "Python", "matching_score": 2.378094434738159, "max_stars_count": 0, "path": "piq/utils/common.py" }, { "content": "from piq.utils.common import _validate_input, _reduce, _version_tuple\n\n__all__ = [\n \"_validate_input\",\n \"_reduce\",\n '_version_tuple'\n]\n", "id": "8523083", "language": "Python", "matching_score": 1.0772716999053955, "max_stars_count": 0, "path": "piq/utils/__init__.py" }, { "content": "import pytest\n\nfrom piq.utils import _version_tuple as vt\n\n\ndef test_version_tuple_fails_on_empty_string() -> None:\n \"\"\" Has to fail because no int values in the empty string \"\"\"\n with pytest.raises(ValueError):\n vt('')\n\n\ndef test_version_tuple_compares_correctly() -> None:\n assert vt('0.0') < vt('0.0.1')\n assert vt('0.1') < vt('0.2')\n assert vt('1.2.3') < vt('3.2.1')\n assert vt('2.3.1') < vt('2.4.1')\n assert vt('1.0') < vt('2.0')\n\n # This one indeed should NOT be equal because in python tuples of different lengths cannot be equal\n assert vt('0.0.0') != vt('0.0')\n\n # But if the length is the same then yes\n assert vt('0.0') == vt('0.0')\n", "id": "3664296", "language": "Python", "matching_score": 0.35499608516693115, "max_stars_count": 0, "path": "tests/utils/test_common.py" }, { "content": "from examples.feature_metrics import main as feature_metrics_examples\nfrom examples.image_metrics import main as image_metrics_examples\nfrom tests.test_gs import prepare_test\n\n\ndef test_image_metrics():\n prepare_test()\n image_metrics_examples()\n\n\ndef test_feature_metrics():\n prepare_test()\n feature_metrics_examples()\n", "id": "5056961", "language": "Python", "matching_score": 0.14625778794288635, "max_stars_count": 471, "path": "tests/test_examples.py" }, { "content": "from piq.feature_extractors.fid_inception import InceptionV3\n\n__all__ = ['InceptionV3']\n", "id": "12651191", "language": "Python", "matching_score": 0.1744208186864853, "max_stars_count": 471, "path": "piq/feature_extractors/__init__.py" } ]
2.093306
andrereynaldi
[ { "content": "#!/usr/bin/env python\n#####################################\n# Installation module for Eternalblue-Doublepulsar-Metasploit\n#####################################\n\n# AUTHOR OF MODULE NAME\nAUTHOR=\"<NAME> (@mikeschladt). Metasploit module by <NAME> (@jaesga)\"\n\n# DESCRIPTION OF THE MODULE\nDESCRIPTION=\"This module will install Eternalblue-Doublepulsar-Metasploit from ElevenPaths\"\n\n# PREREQ INSTALL MODULES NEEDED FOR THIS TOOL TO WORK PROPERLY\nTOOL_DEPEND=\"modules/exploitation/metasploit\"\n\n# INSTALL TYPE GIT, SVN, FILE DOWNLOAD\n# OPTIONS = GIT, SVN, FILE\nINSTALL_TYPE=\"GIT\"\n\n# LOCATION OF THE FILE OR GIT/SVN REPOSITORY\nREPOSITORY_LOCATION=\"https://github.com/ElevenPaths/Eternalblue-Doublepulsar-Metasploit\"\n\n# WHERE DO YOU WANT TO INSTALL IT\nINSTALL_LOCATION=\"eternalblue-doublepulsar-metasploit\"\n\n# DEPENDS FOR DEBIAN INSTALLS\nDEBIAN=\"git,wine\"\n\n# DEPENDS FOR FEDORA INSTALLS\nFEDORA=\"git,wine\"\n\n# COMMANDS TO RUN AFTER\nAFTER_COMMANDS='mkdir -p $HOME/.msf4/modules/exploits/windows/smb, sed -i \"s~/root/Eternalblue-Doublepulsar-Metasploit/deps~{INSTALL_LOCATION}deps~g\" {INSTALL_LOCATION}eternalblue_doublepulsar.rb, sed -i \"s~/root/.wine/~$HOME/.wine/~g\" {INSTALL_LOCATION}eternalblue_doublepulsar.rb, cp {INSTALL_LOCATION}eternalblue_doublepulsar.rb $HOME/.msf4/modules/exploits/windows/smb/eternalblue_doublepulsar.rb,chown -R $SUDO_USER:$SUDO_USER {INSTALL_LOCATION}, chown -R $SUDO_USER:$SUDO_USER $HOME/.msf4/,su - $SUDO_USER -c \"winepath\", echo \"EternalBlue Metasploit module added to .msf4/modules/exploits/windows/smb/eternalblue_doublepulsar.rb\"'\n\n# THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL\nLAUNCHER=\"\"\n", "id": "5745608", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "modules/exploitation/eternalblue-doublepulsar-metasploit.py" }, { "content": "#!/usr/bin/env python\n#####################################\n# Installation module for FuzzBunch (on Wine)\n#####################################\n\n# AUTHOR OF MODULE NAME\nAUTHOR=\"<NAME> (@mikeschladt). Tool by <NAME> (mdiazcl)\"\n\n# DESCRIPTION OF THE MODULE\nDESCRIPTION=\"This module will install FuzzBunch on Wine\"\n\n# PREREQ INSTALL MODULES NEEDED FOR THIS TOOL TO WORK PROPERLY\nTOOL_DEPEND=\"\"\n\n# INSTALL TYPE GIT, SVN, FILE DOWNLOAD\n# OPTIONS = GIT, SVN, FILE\nINSTALL_TYPE=\"GIT\"\n\n# LOCATION OF THE FILE OR GIT/SVN REPOSITORY\nREPOSITORY_LOCATION=\"https://github.com/mdiazcl/fuzzbunch-debian.git\"\n\n# WHERE DO YOU WANT TO INSTALL IT\nINSTALL_LOCATION=\"fuzzbunch\"\n\n# DEPENDS FOR DEBIAN INSTALLS\nDEBIAN=\"git,wine\"\n\n# DEPENDS FOR FEDORA INSTALLS\nFEDORA=\"git,wine,winbind,winetricks\"\n\n# COMMANDS TO RUN AFTER\nAFTER_COMMANDS=WINEPREFIX=\"$HOME/.wine-fuzzbunch\", su - $SUDO_USER -c \"WINEARCH=win32; wine wineboot; export WINEPREFIX=$HOME/.wine-fuzzbunch; wine reg ADD \\\"HKEY_CURRENT_USER\\\\Environment\\\" /v Path /t REG_SZ /d \\\"c:\\\\windows;c:\\\\windows\\\\system;C:\\\\Python26;C:\\\\fuzzbunch-debian\\\\windows\\\\fuzzbunch\\\";winetricks python26;cp -R {INSTALL_LOCATION} $HOME/.wine-fuzzbunch/drive_c/fuzzbunch-debian\", echo '#!/usr/bin/env bash' > {INSTALL_LOCATION}fuzzbunch.sh, echo 'export WINEPREFIX=$HOME/.wine-fuzzbunch' >> {INSTALL_LOCATION}fuzzbunch.sh, echo 'wine cmd.exe /C python C:\\\\\\\\fuzzbunch-debian\\\\\\\\windows\\\\\\\\fb.py' >> {INSTALL_LOCATION}fuzzbunch.sh, chmod 755 {INSTALL_LOCATION}fuzzbunch.sh\n\n# THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL\nLAUNCHER=\"fuzzbunch\"\n", "id": "11133326", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "modules/exploitation/fuzzbunch.py" } ]
0
gregdp
[ { "content": "\nimport chimera\nimport numpy\n\n\nclass Quaternion :\n\n def __init__ ( self, s=1.0, v=chimera.Vector(0,0,0) ) :\n self.s = s\n self.v = v\n\n def length (self) :\n return numpy.sqrt ( (self.s*self.s) + self.v.sqlength() )\n\n\n def rotation (self, angDegrees, axis) :\n angRad = 0.5 * angDegrees * numpy.pi / 180.0\n self.s = numpy.cos ( angRad )\n self.v = axis * numpy.sin ( angRad )\n\n\n def inverse ( self ) :\n return Quaternion ( self.s, self.v * -1.0 )\n\n\n def fromXform ( self, xf ) :\n\n axis, angle = xf.getRotation ()\n if angle >= -180.0 and angle <= 180.0 :\n self.rotation ( angle, axis )\n elif angle < -180.0 :\n blah\n self.rotation ( angle, axis*-1.0 )\n else :\n blah\n self.rotation ( angle, axis*-1.0 )\n\n m = numpy.reshape ( xf.getOpenGLMatrix(), (4,4) )\n m = numpy.transpose ( m )\n self.fromMatrix ( m )\n\n\n def dot ( self, q ) :\n return self.s * q.s + self.v * q.v\n\n def angleTo ( self, q2 ) :\n self.normalize()\n q2.normalize()\n return 2.0 * numpy.arccos ( self * q2 )\n\n\n def normalize (self) :\n l = self.length()\n if (l > 1e-4) :\n self.s = self.s / l\n self.v = self.v / l\n else :\n raise (\"quaternion normalization error\")\n\n def __mul__(self, x) :\n if type(x) == type(1.0) or type(x) == numpy.float64 :\n return Quaternion ( self.s*x, self.v*x )\n else :\n return self.dot ( x )\n\n def __add__(self, x) :\n return Quaternion ( self.s + x.s, self.v + x.v )\n\n def __sub__(self, x) :\n return Quaternion ( self.s - x.s, self.v - x.v )\n\n def __copy__ (self) :\n return Quaternion ( self.s, self.v.__copy__() )\n\n def Xform (self) :\n #self.normalize()\n s = self.s\n v = self.v\n return chimera.Xform.xform (\n 1-2*v.y*v.y-2*v.z*v.z, 2*v.x*v.y-2*s*v.z, 2*v.x*v.z+2*s*v.y, 0,\n 2*v.x*v.y+2*s*v.z, 1-2*v.x*v.x-2*v.z*v.z, 2*v.y*v.z-2*s*v.x, 0,\n 2*v.x*v.z-2*s*v.y, 2*v.y*v.z+2*s*v.x, 1-2*v.x*v.x-2*v.y*v.y, 0\n )\n\n def matrix (self) :\n #self.normalize()\n s = self.s\n v = self.v\n return [\n [1-2*v.y*v.y-2*v.z*v.z, 2*v.x*v.y-2*s*v.z, 2*v.x*v.z+2*s*v.y],\n [2*v.x*v.y+2*s*v.z, 1-2*v.x*v.x-2*v.z*v.z, 2*v.y*v.z-2*s*v.x],\n [2*v.x*v.z-2*s*v.y, 2*v.y*v.z+2*s*v.x, 1-2*v.x*v.x-2*v.y*v.y],\n ]\n\n\n def fromMatrix ( self, rkRot ) :\n # Algorithm in <NAME>'s article in 1987 SIGGRAPH course notes\n # article \"Quaternion Calculus and Fast Animation\".\n\n fTrace = rkRot[0,0] + rkRot[1,1] + rkRot[2,2]\n fRoot = 0.0\n if fTrace > 0.0 :\n # |w| > 1/2, may as well choose w > 1/2\n fRoot = numpy.sqrt (fTrace + 1.0) # 2w\n self.s = 0.5 * fRoot;\n fRoot = 0.5 / fRoot; # 1/(4w)\n self.v[0] = (rkRot[2,1]-rkRot[1,2])*fRoot;\n self.v[1] = (rkRot[0,2]-rkRot[2,0])*fRoot;\n self.v[2] = (rkRot[1,0]-rkRot[0,1])*fRoot;\n\n else :\n # |w| <= 1/2\n i = 0\n if rkRot[1,1] > rkRot[0,0] :\n i = 1\n if rkRot[2,2] > rkRot[i,i] :\n i = 2\n\n j = (i + 1) % 3 # ms_iNext[i];\n k = (j + 1) % 3 # ms_iNext[j];\n\n fRoot = numpy.sqrt(rkRot[i,i]-rkRot[j,j]-rkRot[k,k]+1.0);\n\n # Real* apfQuat[3] = { &m_afTuple[1], &m_afTuple[2], &m_afTuple[3] };\n self.v[i] = 0.5 * fRoot # *apfQuat[i] = ((Real)0.5)*fRoot;\n\n fRoot = 0.5 / fRoot\n self.s = (rkRot[k,j]-rkRot[j,k])*fRoot\n self.v[j] = (rkRot[j,i]+rkRot[i,j])*fRoot # *apfQuat[j]\n self.v[k] = (rkRot[k,i]+rkRot[i,k])*fRoot # *apfQuat[k]\n\n\ndef mult (a, b) :\n return Quaternion (a.s*b.s - a.v*b.v, b.v*a.s + a.v*b.s + chimera.cross(a.v,b.v))\n\n\ndef slerp0 (p, q, t) :\n\n cs = p.dot(q)\n angle = numpy.arccos ( cs )\n\n if abs (angle) > 0.0 :\n sn = numpy.sin ( angle )\n invSn = 1.0 / sn;\n tAngle = t*angle;\n c0 = numpy.sin(angle - tAngle)*invSn;\n c1 = numpy.sin(tAngle)*invSn;\n\n #mTuple[0] = coeff0*p.mTuple[0] + coeff1*q.mTuple[0];\n #mTuple[1] = coeff0*p.mTuple[1] + coeff1*q.mTuple[1];\n #mTuple[2] = coeff0*p.mTuple[2] + coeff1*q.mTuple[2];\n #mTuple[3] = coeff0*p.mTuple[3] + coeff1*q.mTuple[3];\n return Quaternion (p.s*c0+q.s*c1, p.v*c0 + q.v*c1)\n\n else :\n return Quaternion (p.s, chimera.Vector(p.v[0], p.v[1], p.v[2]))\n\n\ndef slerp (v0, v1, t) :\n\n # http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It/\n\n #; Inputs are: unit vectors v0 and v1, scalar t\n #; v0 and v1 are linearly independent\n\n # Quaternion slerp(Quaternion const &v0, Quaternion const &v1, double t) {\n # // v0 and v1 should be unit length or else\n # // something broken will happen.\n #\n # // Compute the cosine of the angle between the two vectors.\n # double dot = dot_product(v0, v1);\n #\n # const double DOT_THRESHOLD = 0.9995;\n # if (dot > DOT_THRESHOLD) {\n # // If the inputs are too close for comfort, linearly interpolate\n # // and normalize the result.\n #\n # Quaternion result = v0 + t*(v1 - v0)\n # result.normalize();\n # return result;\n # }\n #\n # Clamp(dot, -1, 1); // Robustness: Stay within domain of acos()\n # double theta_0 = acos(dot); // theta_0 = angle between input vectors\n # double theta = theta_0*t; // theta = angle between v0 and result\n #\n # Quaternion v2 = v1 - v0*dot\n # v2.normalize(); // { v0, v2 } is now an orthonormal basis\n #\n # return v0*cos(theta) + v2*sin(theta);\n\n\n\n dot = v0.dot(v1)\n #print dot\n\n if 1 or dot > 0.9995 :\n r = v0 + (v1-v0) * t\n r.normalize()\n return r\n\n if dot < -1.0 : dot = -1.0\n if dot > 1.0 : dot = 1.0\n\n theta_0 = numpy.arccos ( dot )\n theta = theta_0*t\n\n v2 = v1 - v0 * dot\n v2.normalize()\n\n r = v0 * numpy.cos(theta) + v2 * numpy.sin(theta)\n\n if 0 :\n # from http://graphics.cs.cmu.edu/nsp/course/15-464/Fall05/assignments/p245-shoemake.pdf\n a0 = numpy.sin( (1-t) * theta_0 ) / numpy.sin(theta_0)\n a1 = numpy.sin ( t * theta_0 ) / numpy.sin ( theta_0 )\n r = v0 * a0 + v1 * a1\n\n return r\n", "id": "5477487", "language": "Python", "matching_score": 0.7418850660324097, "max_stars_count": 6, "path": "Segger/quaternion.py" }, { "content": "# ------------------------------------------------------------------------------\n# Produce tiled grids of cross-sections of density of segmented regions along\n# 3 orthogonal axes of straightened density.\n#\ndef make_orthoslice_images(\n rlist, # Segmentation regions.\n volume, # Volume model.\n trace_spacing = None, # Physical units.\n trace_tip_length = None, # Physical units.\n unbend_size = 1.5, # Factor multipied by region diameter.\n unbend_yaxis = (0,0,1),\n unbend_grid_spacing = 1, # Factor multiplied by minimum voxel size.\n slice_spacing = None, # 3-tuple, physical units.\n xy_trim = 0.3, # Factor multipled by unbent grid width/height.\n panel_aspect = 0.5, # Minimum aspect ratio for tiled layout.\n image_spacing = 20, # Pixels between 3 sets of slices.\n show_image = True,\n task = None,\n ):\n\n from math import ceil\n from Measure.spine import trace_spine, measure_diameter\n from VolumeFilter.unbend import atom_path, unbend_volume\n from VolumeFilter.tile import tile_planes\n from chimera import openModels\n\n ubgs = unbend_grid_spacing * min(volume.data.step)\n\n axes = ('x','y','z')\n pstep = (10,10,10) if slice_spacing is None else [int(ceil(s/ubgs))\n for s in slice_spacing]\n orders = ('ulh','urv','ulhr')\n\n for ri,r in enumerate(rlist):\n\n if task:\n task.updateStatus('region %d (%d of %d)' % (r.rid,ri+1,len(rlist)))\n\n # Trace center-line.\n mset = trace_spine(r, trace_spacing, trace_tip_length) \n\n # Unbend volume.\n p = atom_path([m.atom for m in mset.markers()])\n dmax, dmin = measure_diameter(r, mset)\n if dmax is None:\n print 'Region %d has no diameter' % r.rid\n mset.close()\n continue\n xsize = ysize = unbend_size*dmax\n ubv = unbend_volume(volume, p, unbend_yaxis, xsize, ysize, ubgs,\n open = False)\n# ubv.set_representation('solid')\n# ubv.set_parameters(show_outline_box = True)\n\n # Create tiled cross-section volumes along 3 axes.\n etrim = (xy_trim*ubv.data.size[0],xy_trim*ubv.data.size[1],0)\n etrim = [int(ceil(t)) for t in etrim]\n rowcol = rows_and_columns(ubv.data.size, pstep, etrim, panel_aspect)\n tparams = zip(axes,pstep,etrim,rowcol,orders)\n tv = [tile_planes(ubv, axis, step, trim, rows, cols, order, open=False)\n for axis,step,trim,(rows,cols),order in tparams]\n if [v for v in tv if v is None]:\n print 'Region %d has no sections for some axes' % r.rid\n mset.close()\n openModels.close([v for v in tv if v] + [ubv])\n continue\n\n # Make images for each set of slices and combine them side-by-side.\n images = (volume_image(tv[0]),\n volume_image(tv[1], xflip=True),\n volume_image(tv[2]))\n image = montage_image(images, pad = image_spacing)\n image.title = 'Region %d' % r.rid\n if show_image:\n from Segger.imageviewer import showImage\n showImage(image, title = image.title, fit_to_window = True)\n\n # Set image region attribute\n r.set_attribute('slices', image)\n\n # Close center line.\n mset.close()\n\n# ------------------------------------------------------------------------------\n# Figure out optimal row/column tiling for each set of sections so that the\n# composite 3 panels has a convenient aspect ratio around 1.5.\n# yz slices\n#\ndef rows_and_columns(vsize, step, trim, min_aspect):\n\n from math import ceil, floor\n tc = [max(1,(s-t+st-1)/st - (t+st-1)/st) for s,st,t in zip(vsize,step,trim)]\n tc0 = tc[0]\n c = 1\n while c <= tc0:\n r = int(ceil((tc0-1+c)/c))\n aspect = float(c*vsize[1])/(r*vsize[2])\n if r <= 1 or aspect >= min_aspect:\n break\n while int(ceil((tc0-1+c)/c)) == r:\n c += 1\n rz = max(1,int(floor(float(r*vsize[2])/vsize[1])))\n rowcol = ((None,c),(c,None),(rz,None))\n return rowcol\n\n# ------------------------------------------------------------------------------\n# Look down x, y, z axes. For x and y use z as vertical.\n#\ndef position_orthoviews(tv):\n\n # Put all 3 planes in xy plane.\n vx, vy, vz = tv\n from chimera import Xform\n vx.openState.localXform(Xform.xRotation(-90))\n vx.openState.localXform(Xform.zRotation(-90))\n vy.openState.localXform(Xform.xRotation(-90))\n vy.openState.localXform(Xform.zRotation(180))\n # Horz/vert axes vx:(y,z), vy:(x,z), vz:(x,y)\n\n sizes = []\n for v in tv:\n (x0,y0,z0), (x1,y1,z1) = v.xyz_bounds(step = 1, subregion = 'all')\n sizes.append((x1-x0,y1-y0,z1-z0))\n\n # Align lower left corners\n vy.openState.localXform(Xform.translation(-sizes[1][0]))\n\n # Center vertically and spread out horizontally.\n vx.openState.localXform(Xform.translation(0,0,-0.5*sizes[0][2]))\n offset = 1.05 * sizes[0][1]\n vy.openState.localXform(Xform.translation(offset,0,-0.5*sizes[1][2]))\n offset += .05 * sizes[0][1] + sizes[1][0]\n vz.openState.localXform(Xform.translation(offset,-0.5*sizes[2][1],0))\n\n # Hide all other models.\n from chimera import openModels\n mset = set(tv + [v.solid_model() for v in tv])\n for m in openModels.list():\n if not m in mset:\n m.display = False\n\n # Standard orientation, fit in window.\n from Midas import reset\n reset()\n from chimera import viewer\n viewer.viewAll()\n viewer.scaleFactor *= 0.95\n viewer.depthCue = False\n\n # Capture image\n# width,height = viewer.windowSize\n# image = viewer.pilImages(width, height, supersample = 3)[0]\n\n# ------------------------------------------------------------------------------\n# Creates a PIL image from a volume plane with 1 pixel exactly matching one\n# voxel, and using the solid rendering transfer function.\n#\n# The volume must be rendering as single plane in solid rendering style.\n#\ndef volume_image(v, xflip = False, yflip = False):\n\n if v.solid is None:\n v.update_solid(v.rendering_options)\n colors = v.solid.color_values()\n if colors.shape[0] == 1:\n c = colors[0,:,:,:]\n elif colors.shape[1] == 1:\n c = colors[:,0,:,:]\n elif colors.shape[2] == 1:\n c = colors[:,:,0,:]\n else:\n raise ValueError('Color array not a plane %s' % str(colors.shape))\n size = (c.shape[1], c.shape[0])\n mode = {1:'L', 2:'LA', 3:'BGR', 4:'BGRA'}[c.shape[2]]\n if mode.endswith('A'):\n # Don't include alpha channel in image.\n mode = mode[:-1]\n c = c[:,:,0] if c.shape[2] == 2 else c[:,:,:-1]\n if xflip:\n c = c[:,::-1]\n if not yflip:\n c = c[::-1,:]\n from PIL import Image\n im = Image.fromarray(c, mode)\n return im\n\n# ------------------------------------------------------------------------------\n#\ndef montage_image(images, pad = 0):\n\n w = sum([im.size[0] for im in images]) + pad * (len(images)-1)\n h = max([im.size[1] for im in images])\n from PIL import Image\n mi = Image.new(images[0].mode, (w,h))\n x = y = 0\n for im in images:\n mi.paste(im, (x,y))\n x += im.size[0] + pad\n return mi\n \n# ------------------------------------------------------------------------------\n#\ndef test():\n\n from Segger import SelectedRegions\n rlist = SelectedRegions()\n\n from VolumeViewer import active_volume\n volume = active_volume()\n\n make_orthoslice_images(rlist, volume)\n\n#test()\n", "id": "10961245", "language": "Python", "matching_score": 1.7681858539581299, "max_stars_count": 6, "path": "Segger/orthoview.py" }, { "content": "\n\n\n# How to run:\n# This script should be copied and the parameters changed as needed\n# - or this script can be used as placed in the Chimera/Contents/Resources/share/ folder\n# with default parameters\n# - command to use:\n# [path to Chimera exec] --nogui --silent --nostatus [path to map] [path to this script]\n# e.g. ~/_mol/Chimera.app/Contents/MacOS/chimera --nogui --silent --nostatus\n# ~/_data/emd_5001.map seggercmd.py\n\n\n# The grouping mode paramater:\n# - can be either 'smoothing' or 'connectivity'\n# 'smoothing' tends to work better at lower resolutions (4A and lower)\n# 'connectivity' tends to work better at higher resolutions (4A and better)\ngroupingMode = \"smoothing\"\n\n# Minimum region size in number of voxels (can be turned to A^3 by dividing by\n# step size)\n# - as below when set to 1, all regions will be kept; for a value of 5, only\n# regions that have at least 5 voxels would be kept after the first\n# segmentation step\n# (1 means no regions are removed)\nminRegionSize = 1\n\n# Minimum contact voxels -\n# (0 means no regions are removed)\nminContactVoxels = 0\n\n# when to stop the grouping process (either by)\nstopAtNumberOfRegions = 1\n\n# map threshold - only include voxels with map value above this values\n# if not sure, leave as None; 3sigma above mean will be used\nmapThreshold = None\n\n# parameters for smoothing and grouping:\nnumSmoothingSteps = 4\nsmoothingStepSize = 3\n\n# parameters for grouping by connectivity\nnumConnectivitySteps = 10\n\n\n# parameters for outputting of regions\noutputLargestN = 5\noutputRegions = [] # a list of regions to output\n\n# other options\noptions = {}\noptions[\"outputMapSize\"] = \"box\" # can be 'same', 'cube', 'box'\noptions[\"borderWidth\"] = 4 # border in voxels if mapSize is not \"same\"\noptions[\"mask01\"] = False # border in voxels if mapSize is not \"same\"\n\n\n\nimport Segger\nimport chimera\nimport VolumeViewer\nimport regions\nimport numpy\n\nmaps = chimera.openModels.list (modelTypes = [VolumeViewer.volume.Volume])\n\nif len(maps) == 0 :\n print \" - no maps opened; specify a map on the command line\"\n\ndmap = maps[0]\n\nif mapThreshold == None :\n #maxM = numpy.max(M)\n #minM = numpy.min(M)\n\n M = dmap.data.full_matrix()\n mapThreshold = numpy.average(M)+numpy.std(M)*3.0\n print \" - threshold: %f\" % mapThreshold\n\n\nsmod = regions.Segmentation(dmap.name, dmap)\nsmod.calculate_watershed_regions ( dmap, mapThreshold )\n\nif minRegionSize > 1 :\n print \"\\n - removing regions below %d voxels\" % minRegionSize\n smod.remove_small_regions(minRegionSize)\n\nif minContactVoxels > 0 :\n print \"\\n - removing regions with connection smaller than %d voxels\" % minContactVoxels\n smod.remove_contact_regions(minContactVoxels)\n\n\nif groupingMode == 'smoothing' :\n print \"\\n - grouping by smoothing\"\n smod.smooth_and_group(numSmoothingSteps, smoothingStepSize, stopAtNumberOfRegions)\n\nelif groupingMode == 'connectivity' :\n print \"\\n - grouping by connectivity\"\n self.GroupByCons ( smod, task )\n\n\nregions = smod.grouped_regions()\nprint \" - %d regions\" % len(regions)\noutputN = min ( outputLargestN, len(regions) )\n\n\nimport os\nmdir, mfile = os.path.split(dmap.data.path)\nmname, mext = os.path.splitext ( mfile )\n\n\nif outputN > 0 :\n print \"\\n - outputing %d regions by size\" % outputN\n\n import Segger.extract_region_dialog\n\n for i in range ( outputN ) :\n reg = regions[i]\n print \" -- %d, %d voxels\" % (i+1, len(reg.points()) )\n\n newMapName = mname + \"_%d.mrc\" % (i+1)\n\n Segger.extract_region_dialog.ExtractNonUI (dmap, dmap, smod, [reg], newMapName, options)\n", "id": "1508613", "language": "Python", "matching_score": 1.52469801902771, "max_stars_count": 6, "path": "Segger/seggercmd.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n\n\nimport sys, os\n\nmods = []\nchimeraPath = None\nnumProc = 1\nres = 3.0\nbfactor = -1\ngSigma = 0.6\n\n\nprint (\"\")\nprint (\"Found parameters:\")\n\nfor arg in sys.argv :\n\n print (\": %s\" % arg),\n\n if \"mapq_cmd.py\" in arg :\n print ( \" -> this script\" )\n\n elif os.path.isdir(arg) :\n #print ( \" -> Chimera path\" )\n\n cp = os.path.join ( os.path.join ( arg, 'bin' ), 'chimera' )\n if os.path.isfile(cp) :\n print ( \" -> Chimera path, Unix\" )\n chimeraPath = cp\n\n cp = os.path.join ( os.path.join ( arg, 'bin' ), 'chimera.exe' )\n if os.path.isfile(cp) :\n print ( \" -> Chimera path, Windows\" )\n chimeraPath = cp\n\n cp = os.path.join ( os.path.join ( os.path.join ( arg, 'Contents' ), 'MacOS' ), \"chimera\" )\n if os.path.isfile(cp) :\n print ( \" -> Chimera path, Mac\" )\n chimeraPath = cp\n\n\n elif os.path.isfile(arg) :\n print ( \" -> map or model\" )\n if arg[0:2] == '..' :\n print ( \" -X- please do not use relatives, i.e. .., in path (sorry)\" )\n else :\n mods.append ( arg )\n\n else :\n tokens = arg.split(\"=\")\n if len(tokens) == 2 and tokens[0] == \"np\" :\n try :\n numProc = int ( tokens[1] )\n print ( \" -> number of processes: %d\" % numProc )\n except :\n print ( \" -> specify an integer\" )\n elif len(tokens) == 2 and tokens[0] == \"res\" :\n try :\n res = float ( tokens[1] )\n print ( \" -> resolution: %.3f\" % res )\n except :\n print ( \" -> specify a number\" )\n elif len(tokens) == 2 and tokens[0] == \"bfactor\" :\n try :\n bfactor = float ( tokens[1] )\n print ( \" -> bfactor: %.0f\" % bfactor )\n except :\n print ( \" -> specify a number\" )\n elif len(tokens) == 2 and tokens[0] == \"sigma\" :\n try :\n gSigma = float ( tokens[1] )\n print ( \" -> sigma: %.0f\" % gSigma )\n except :\n print ( \" -> specify a number\" )\n else :\n print ( \" -> unknown\" )\n\nprint (\"\")\n\nok = True\nif len(mods) <= 1 or chimeraPath == None :\n print (\"\")\n print (\"mapq_cmd.py\")\n print (\" - Calculate Q-scores from command line\")\n print (\"\")\n print (\"Parameters:\")\n print (\" [path to model or map file]\")\n print (\" one map and at least one model should be specified\")\n print (\" [path to Chimera]\")\n print (\" e.g.: ~/Chimera.app (Mac)\")\n print (\" ~/Chimera (Unix)\")\n print (\" C:\\\\Users\\\\name\\\\Chimera (Windows)\")\n print (\" sigma=# (optional)\")\n print (\" sigma of reference Gaussian, default is 0.6\")\n print (\" res=# (optional)\")\n print (\" resolution of map, e.g. res=3.2\")\n print (\" only used in output of Q-scores/residue as comparison\")\n print (\" bfactor=f (optional, f=50,100,200,...)\")\n print (\" if specified, Q-scores are converted to Bfactors\")\n print (\" using the formula bfactor=f*(1.0-Qscore)\")\n print (\" np=# (optional, #=1,2,3,4,...\")\n print (\" number of processors to use\")\n\n ok = False\n\nprint (\"\")\n\nif ok :\n\n #scriptPath = os.path.dirname(os.path.realpath(__file__))\n scriptPath = os.path.dirname ( mods[0] )\n newScript = os.path.join ( scriptPath, \"_mapqScript.py\" )\n\n print (\"Creating Chimera script in %s\" % newScript)\n print (\"\")\n\n try :\n fp = open ( newScript, \"w\" )\n except :\n print ( \" - could not write script for Chimera, check if you have write permission in %s\" % scriptPath )\n exit ( 0 )\n\n fp.write ( \"import mapq\\n\" )\n fp.write ( \"import mapq.qscores\\n\" )\n fp.write ( \"from mapq.mmcif import LoadMol as LoadMol\\n\" )\n\n\n print (\"Running:\")\n cmd = \"%s --nogui --silent --nostatus \" % chimeraPath\n for mod in mods :\n if os.path.splitext(mod)[1] == \".cif\" :\n fp.write ( \"LoadMol('%s')\\n\" % mod )\n else :\n cmd += '\"%s\" ' % mod\n\n cmd += \"'%s'\" % newScript\n\n print (\" : \" + cmd)\n print (\"\")\n\n fp.write ( \"mapq.qscores.Calc('%s',%d,%f,%f,%f)\\n\" % (chimeraPath, numProc, res, bfactor, gSigma) )\n fp.close()\n\n os.system(cmd)\n\n if 1 :\n print ( \"Removing temp Chimera script \")\n print ( \" - %s\" % newScript )\n os.remove ( newScript )\n print ( \" - %s\" % (newScript+\"c\") )\n os.remove ( newScript + \"c\" )\n", "id": "396522", "language": "Python", "matching_score": 3.7262091636657715, "max_stars_count": 6, "path": "mapq/mapq_cmd.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n\nimport sys, os, shutil\n\nif len(sys.argv) != 2 :\n print (\"\")\n print (\"Please add the path where Chimera is installed, e.g.:\")\n print (\" python install.py /home/greg/applications/Chimera\")\n print (\"\")\n\n sys.exit(0)\n\n\nprint (\"\")\n\n\n\nsharePaths = []\ndef FindShare (path, lev) :\n\n try :\n fs = os.listdir (path)\n except :\n fs = []\n\n for f in fs :\n atPath = os.path.join ( path, f )\n if os.path.isdir ( atPath ) :\n #for i in range(lev+1) :\n # print \" \",\n #print f\n if f == \"share\" and \"chimera\" in atPath.lower() :\n #print \" - found: \", atPath\n sharePaths.append ( atPath )\n return\n else :\n FindShare ( atPath, lev+1 )\n\n\n\n\n\n\n# Mac...\nopath1 = os.path.join ( sys.argv[1], \"Contents\" )\nopath1 = os.path.join ( opath1, \"Resources\" )\nopath1 = os.path.join ( opath1, \"share\" )\n\n# Unix...\nopath2 = os.path.join ( sys.argv[1], \"share\" )\n\ndidInstall = False\n\nchimeraPath = sys.argv[1]\nif not os.path.isdir ( chimeraPath ) :\n print (\"\")\n print (\"The specified Chimera path '%s' doesn't seem to exist\" % chimeraPath )\n print (\" - please check and try again\")\n print (\"\")\n sys.exit(0)\n\nif not \"chimera\" in chimeraPath.lower() :\n print (\"\")\n print (\"The specified path '%s' doesn't seem to be for Chimera\" % chimeraPath )\n print (\" - please check and try again\")\n print (\"\")\n sys.exit(0)\n\n\n#print (\"finding...\")\nFindShare (sys.argv[1], 0)\nsharePath = None\nfor p in sharePaths :\n if sharePath == None or len(p) < len(sharePath) :\n sharePath = p\n#print (sharePath)\n\n#exit()\n\nif sharePath == None :\n print (\"\")\n print (\"Could not find the 'share' folder in the specified path\" )\n print (\" - please check that the path points to Chimera and try again\")\n print (\"\")\n sys.exit(0)\n\n\n\nfor opath in [sharePath] :\n\n if os.path.isdir( opath ) :\n opath = os.path.join ( opath, \"Segger\" )\n\n if os.path.isdir( opath ) :\n print (\" - removing previous Segger:\" + opath)\n try :\n shutil.rmtree(opath)\n except :\n pass\n\n #print \" - copying from:\", os.getcwd()\n print (\" - copying . ->\" + opath )\n\n try :\n shutil.copytree ( os.getcwd(), opath )\n didInstall = True\n except :\n print (\"\")\n print (\"-----------------------------------------------------\")\n print (\"Problem: Could not copy to:\", opath)\n print (\" 1. please check if you have write access\")\n print (\" 2. try with sudo python install.py <path to Chimera>\")\n print (\"-----------------------------------------------------\")\n print (\"\")\n break\n\n didInstall = True\n\nif didInstall :\n\n print (\"\")\n print (\"------------------------\")\n print (\"Installation successful.\")\n print (\"------------------------\")\n print (\"\")\n print (\"To use:\")\n print (\" 1. Restart Chimera.\")\n print (\" 2. Select Tools -> Volume Data -> Segger\")\n print (\"\")\n\n #wh = os.path.join ( os.getcwd(), \"install.html\" )\n #import webbrowser\n #webbrowser.open( 'file://' + wh, new=2)\n\n\nelse :\n print (\"\")\n print (\"-----------------------------------------------------------------------\")\n print (\"Problem: Could not find 'share' folder in '\" + sys.argv[1] + \"'\")\n print (\" 1. please check the path\")\n print (\" 2. remember you can auto-complete while typing the path with <tab>\")\n print (\"-----------------------------------------------------------------------\")\n print (\"\")\n", "id": "945511", "language": "Python", "matching_score": 3.333296775817871, "max_stars_count": 6, "path": "Segger/install.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n# LICENCE - please see: https://opensource.org/licenses/MIT\n\n\nimport sys, os, shutil\n\n#li = sys.argv.index ( \"install.py\" )\n#print li\n\n\nif len(sys.argv) != 2 :\n print \"\"\n print \"Please add the path where Chimera is installed, e.g.:\"\n print \" python install.py /home/greg/applications/Chimera\"\n print \"\"\n\n exit()\n\n\nprint \"\"\n\n# path on Mac\nopath1 = os.path.join ( sys.argv[1], \"Contents\" )\nopath1 = os.path.join ( opath1, \"Resources\" )\nopath1 = os.path.join ( opath1, \"share\" )\n\n# path on unix\nopath2 = os.path.join ( sys.argv[1], \"share\" )\n\ndidInstall = False\n\nfor opath in [opath1, opath2] :\n\n if os.path.isdir( opath ) :\n opath = os.path.join ( opath, \"biomovie\" )\n\n if os.path.isdir( opath ) :\n print \" - removing previous BioMovie:\", opath\n try :\n shutil.rmtree(opath)\n except :\n pass\n\n #print \" - copying from:\", os.getcwd()\n print \" - copying . ->\", opath\n\n try :\n shutil.copytree ( os.getcwd(), opath )\n didInstall = True\n except :\n print \"Could not copy to:\", opath\n print \" 1. please check if you have write access\"\n print \" 2. try with sudo python install.py <path to Chimera>\"\n print \"\"\n break\n\n didInstall = True\n\nif didInstall :\n\n print \"\"\n print \"Installation complete.\"\n print \"\"\n print \"To use:\"\n print \" 1. Start or restart Chimera.\"\n print \" 2. Select Tools -> Utilities -> BioMovie\"\n print ' 3. More info: https://cryoem.slac.stanford.edu/ncmi/resources/software/BioMovie'\n print \"\"\n\n #wh = os.path.join ( os.getcwd(), \"install.html\" )\n #import webbrowser\n #webbrowser.open( 'file://' + wh, new=2)\n\n\nelse :\n print \"\"\n print 'Could not find install path from \"' + sys.argv[1] + '\"'\n print \" - please check the path to Chimera\"\n print \"\"\n", "id": "10353877", "language": "Python", "matching_score": 0.02923530526459217, "max_stars_count": 1, "path": "biomovie/install.py" }, { "content": "\n\nimport chimera\nimport _surface\nimport numpy\n\n\nfrom chimera import Vector\n\n\ndef Quad2Tri ( vi ) :\n t1 = (vi[0], vi[1], vi[2])\n t2 = (vi[0], vi[2], vi[3])\n return t1, t2\n\n\ndef SubdivideQuadRec ( pts, qt, numit ) :\n\n for i in range (numit) :\n nq = ()\n for q in qt :\n pts, q2 = SubdivideQuad (pts, q)\n if q2 :\n nq = nq + q2\n else :\n nq = nq + q\n\n #print i, len(qt), \"*** Old quads\" #, qt\n #print i, len(nq), \"*** New quads\" #, nq\n if len(nq) == len(qt) :\n break\n qt = nq\n #break\n\n return pts, qt\n\n\ndef SubdivideQuad ( pts, q ) :\n # 4 pts -\n if ( len(q) != 4 ) :\n print \"SubdivideQuad needs a 4-tuple, got\", q\n return pts\n\n #print \"Points:\\n\", pts\n #print \"Quad:\\n\", q\n\n v1 = pts[q[1]] - pts[q[0]]\n v2 = pts[q[2]] - pts[q[1]]\n l1 = numpy.sqrt(numpy.dot(v1,v1))\n l2 = numpy.sqrt(numpy.dot(v2,v2))\n # print \"V1: \", v1, l1\n # print \"V2: \", v2, l2\n\n q2 = None\n\n if ( l2 > 1 and l2 > l1 ) :\n m1 = pts[q[0]] + v2 * numpy.array([.5],'f')\n m2 = pts[q[1]] + v2 * numpy.array([.5],'f')\n li = len(pts)\n pts = numpy.concatenate( [pts, [m1]] )\n pts = numpy.concatenate( [pts, [m2]] )\n q2 = ( (q[0], q[1], li+1, li), (li, li+1, q[2], q[3]) )\n elif ( l1 > 1 ) :\n m1 = pts[q[0]] + v1 * numpy.array([.5],'f')\n m2 = pts[q[3]] + v1 * numpy.array([.5],'f')\n li = len(pts)\n pts = numpy.concatenate( [pts, [m1]] )\n pts = numpy.concatenate( [pts, [m2]] )\n q2 = ( (q[0], li, li+1, q[3]), (li, q[1], q[2], li+1) )\n\n return pts, q2\n\n\n\ndef AddWalls () :\n\n m = _surface.Surface_Model()\n v = numpy.array( (\n (-1,-1,-1), (-1,1,-1), (1,1,-1), (1,-1,-1),\n (-1,-1,1), (-1,1,1), (1,1,1), (1,-1,1)\n ), numpy.float32 )\n #for i in range ( len(v) ) :\n # v[i] = v[i] * .1\n v = v * WD\n v = v.astype(numpy.float32)\n\n vi_floor = (\n (0,1,2), (0,2,3)\n )\n\n vi_walls = (\n (0,4,1), (1,4,5), (1,5,2), (2,5,6)\n )\n\n #vi_walls_mesh = (\n # (2,6,3), (6,7,3), (3,4,0), (3,7,4)\n # )\n\n # q = (2,3,7,6)\n # vi_walls_mesh = Quad2Tri ( q )\n # v, q2 = SubdivideQuad (v, q)\n\n qt = ( (2,6,7,3), )\n v_wall_1, qt = SubdivideQuadRec ( v, qt, 5 )\n vi_wall_1_mesh = ()\n for q in qt :\n vi_wall_1_mesh = vi_wall_1_mesh + Quad2Tri (q)\n\n qt = ( (3,7,4,0), )\n v_wall_2, qt = SubdivideQuadRec ( v, qt, 5 )\n vi_wall_2_mesh = ()\n for q in qt :\n vi_wall_2_mesh = vi_wall_2_mesh + Quad2Tri (q)\n\n red = (1,0,0,1)\n grey = (.7,.7,.7,1)\n\n g_floor = m.add_group(v, vi_floor, red)\n g_walls = m.add_group(v, vi_walls, red)\n g_wall_1_mesh = m.add_group(v_wall_1, vi_wall_1_mesh, grey)\n g_wall_2_mesh = m.add_group(v_wall_2, vi_wall_2_mesh, grey)\n\n g_wall_1_mesh.set_display_style(g_wall_1_mesh.Mesh)\n g_wall_2_mesh.set_display_style(g_wall_2_mesh.Mesh)\n\n chimera.openModels.add([m])\n\n return m\n\n\n\n\ndef AddWireWalls ( dim, ctr ) :\n\n dim[0] = dim[0]/2.0; dim[1] = dim[1]/2.0; dim[2] = dim[2]/2.0\n\n m = _surface.Surface_Model()\n v = numpy.array( (\n (ctr[0]-dim[0],ctr[1]-dim[1],ctr[2]-dim[2]),\n (ctr[0]-dim[0],ctr[1]+dim[1],ctr[2]-dim[2]),\n (ctr[0]+dim[0],ctr[1]+dim[1],ctr[2]-dim[2]),\n (ctr[0]+dim[0],ctr[1]-dim[1],ctr[2]-dim[2]),\n (ctr[0]-dim[0],ctr[1]-dim[1],ctr[2]+dim[2]),\n (ctr[0]-dim[0],ctr[1]+dim[1],ctr[2]+dim[2]),\n (ctr[0]+dim[0],ctr[1]+dim[1],ctr[2]+dim[2]),\n (ctr[0]+dim[0],ctr[1]-dim[1],ctr[2]+dim[2])\n ), numpy.float32 )\n\n # v = v * lengthX2\n # v = v.astype(numpy.float32)\n\n qt = ( (2,6,7,3), )\n qt = ( (3,7,4,0), )\n\n vi_floor = ((0,1,2), (0,2,3))\n vi_walls = ((0,4,1), (1,4,5), (1,5,2), (2,5,6),\n (2,6,7), (2,7,3), (3,7,4), (3,4,0) )\n\n red = (1,0,0,0.1)\n grey = (.2,.2,.2,0.0)\n\n g_floor = m.add_group(v, vi_floor, grey)\n g_walls = m.add_group(v, vi_walls, grey)\n\n g_floor.set_display_style(g_floor.Mesh)\n g_walls.set_display_style(g_walls.Mesh)\n\n chimera.openModels.add([m])\n\n return m\n\n\n\n\ndef SphereMesh (r, div, color, patchpts, pos = Vector(0,0,0)) :\n\n m = _surface.Surface_Model()\n\n v = numpy.array( [ [0+pos.x,0+pos.y,r+pos.z], ], numpy.float32 )\n vi = ()\n\n at = 1\n l = int ( numpy.ceil (float(div)*3.0/2.0) )\n if div < 10 : l = div*2\n print \"SphereMesh:\", div, 'x', l\n lat = 0\n\n for phi_i in range(div) :\n\n phi = 90.0 - ( float(phi_i+1) * 180.0/float(div+1) )\n #print \"%.2f: \" % phi,\n z = r * numpy.sin(phi * numpy.pi/180)\n s = r * numpy.cos(phi * numpy.pi/180)\n\n for psi_i in range (l) :\n psi = float(psi_i) * 360.0/float(l)\n\n #print \"%.0f(%d)(%d)\" % (psi, at, at-l),\n x = s * numpy.sin(psi * numpy.pi/180)\n y = s * numpy.cos(psi * numpy.pi/180)\n\n pt = numpy.array( [ [x+pos.x,y+pos.y,z+pos.z], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt] )\n\n if phi_i == 0 :\n if psi_i > 0 :\n vi = vi + ( (at-1, at, 0), )\n if psi_i == l-1 :\n vi = vi + ( (at, 1, 0), )\n else :\n if psi_i > 0 :\n tris = Quad2Tri ( [at-1, at, at-l, at-l-1] )\n vi = vi + tris\n if psi_i == l-1 :\n tris = Quad2Tri ( [at, at-l+1, at-l*2+1, at-l] )\n vi = vi + tris\n\n if phi_i == div-1 :\n if psi_i > 0 :\n vi = vi + ( (at, at-1, lat+l), )\n if psi_i == l-1 :\n vi = vi + ( (at-l+1, at, lat+l), )\n\n at = at + 1\n\n\n lat = len ( v )\n\n pt = numpy.array( [ [0+pos.x,0+pos.y,-r+pos.z], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt] )\n\n\n sph = m.add_group( v, vi, color )\n #sph.set_display_style(sph.Mesh)\n\n\n if patchpts :\n vcolors = ()\n\n for i in range ( len(v) ) :\n vp = chimera.Vector ( v[i][0], v[i][1], v[i][2] ) - pos\n inP = None\n for pt in patchpts:\n if (pt[0] - vp).length < (r/3) :\n inP = pt[1]\n break\n if inP :\n if inP < 0.0 :\n vcolors = vcolors + ( (-inP*.6+.4, .4, .4, 1), )\n else :\n vcolors = vcolors + ( (.4, .4, inP*.6+.4, 1), )\n else :\n vcolors = vcolors + ( (color), )\n\n print len ( vcolors ), len ( v )\n\n sph.set_vertex_colors( vcolors )\n\n\n chimera.openModels.add([m])\n\n return m\n\n\n\n\ndef CylinderMesh (r1, r2, Length, div, color) :\n\n m = _surface.Surface_Model()\n chimera.openModels.add([m])\n\n v = None\n vi = ()\n\n # print \"CylinderMesh:\", div\n\n at = 0\n for psi_i in range(div) :\n\n psi = float(psi_i) * 360.0/float(div)\n\n #print \"%.0f(%d)(%d)\" % (psi, at, at-l),\n x1 = r1 * numpy.sin(psi * numpy.pi/180)\n y1 = r1 * numpy.cos(psi * numpy.pi/180)\n\n x2 = r2 * numpy.sin(psi * numpy.pi/180)\n y2 = r2 * numpy.cos(psi * numpy.pi/180)\n\n if psi_i == 0 :\n v = numpy.array( [ [x1,y1,0], ], numpy.float32 )\n else :\n pt1 = numpy.array( [ [x1,y1,0], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt1] )\n\n pt2 = numpy.array( [ [x2,y2,Length], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt2] )\n\n at = at + 2\n\n if psi_i == 0 :\n pass\n else :\n tris = Quad2Tri ( [at-4, at-2, at-1, at-3] )\n vi = vi + tris\n\n if psi_i == div-1 :\n tris = Quad2Tri ( [at-2, 0, 1, at-1] )\n vi = vi + tris\n\n\n pt1 = numpy.array( [ [0,0,0], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt1] )\n\n pt1 = numpy.array( [ [0,0,Length], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt1] )\n\n if 0 and r1 > .01 :\n print \"capping 1\"\n vi = vi + ( (at, 0, at-2), )\n for i in range ( (at-2)/2 ) :\n vi = vi + ( (at, (i+1)*2, (i+0)*2), )\n\n if 0 and r2 > .01 :\n print \"capping 2\"\n vi = vi + ( (at+1, at-1, 1), )\n for i in range ( (at-2)/2 ) :\n vi = vi + ( (at+1, (i+0)*2+1, (i+1)*2+1), )\n\n\n sph = m.add_group( v, vi, color )\n return m\n\n\n# execfile(\"c:\\greg\\chimera\\Blob\\gui.py\")\n\ndef ReadMesh (patchpts = None, pos=Vector(0,0,0)) :\n\n m = _surface.Surface_Model()\n\n com = Vector (0,0,0)\n rad = Vector (0,0,0)\n numv = 0\n\n aV = []\n fp = open ( \"C:\\\\greg\\\\chimera\\\\Blob\\\\psu_points.txt\", 'r' )\n for line in fp :\n n = line.split(',')\n for i in range( len(n) ) :\n c = n[i].split()\n if len(c) == 3 :\n #v = numpy.array([[float(c[0]),float(c[1]),float(c[2])]], 'f' )\n aV = aV + [ [float(c[0]), float(c[1]), float(c[2])], ]\n numv = numv + 1\n v = Vector ( float(c[0]), float(c[1]), float(c[2]) )\n com = com + v\n if v.length > rad.length :\n rad = v\n fp.close()\n\n v = numpy.array( aV, 'f' )\n print \"COM:\", com/float(numv)\n print \"Rad:\", rad.length\n scale = 1.0/rad.length\n\n xf = chimera.Xform.rotation ( Vector(0,0,1), 180 )\n xf.multiply ( chimera.Xform.rotation ( Vector(1,0,0), -90 ) )\n for i in range (numv) :\n vec = xf.apply ( Vector ( v[i][0], v[i][1], v[i][2] ) * scale )\n v[i][0] = vec.x\n v[i][1] = vec.y\n v[i][2] = vec.z\n\n\n vi = []\n vs = []\n fp = open ( \"C:\\\\greg\\\\chimera\\\\Blob\\\\psu_tris.txt\", 'r' )\n for line in fp :\n n = line.split(',')\n for t in n :\n try :\n ivi = int(t)\n if ivi == -1 :\n #print 'tri', vs\n vi = vi + [vs]\n vs = []\n else :\n vs = vs + [ivi]\n except:\n #print \"bad token:\", t\n continue\n fp.close()\n\n color = (0.6039, 0.8431, 0.898, 1.0)\n sph = m.add_group( v, vi, color )\n #sph.set_display_style(sph.Mesh)\n\n\n if patchpts :\n vcolors = ()\n\n for i in range ( len(v) ) :\n vp = chimera.Vector ( v[i][0], v[i][1], v[i][2] ) - pos\n inP = None\n for pt in patchpts:\n if (pt[0] - vp).length < (1.0/2.5) :\n inP = pt[1]\n break\n if inP :\n if inP < 0.0 :\n vcolors = vcolors + ( (-inP*.6+.4, .4, .4, 1), )\n else :\n vcolors = vcolors + ( (.4, .4, inP*.6+.4, 1), )\n else :\n vcolors = vcolors + ( (color), )\n\n print len ( vcolors ), len ( v )\n\n sph.set_vertex_colors( vcolors )\n\n\n chimera.openModels.add([m])\n return m\n\n\n\ndef ReadMesh2 (fname, m) :\n\n if m == None :\n m = _surface.SurfaceModel()\n\n com = Vector (0,0,0)\n rad = Vector (0,0,0)\n numv = 0\n\n aV = []\n fp = open ( fname, 'r' )\n\n l1 = fp.readline()\n numV, numT = l1.split ()\n numV = int ( numV )\n numT = int ( numT )\n\n print \"%d verts, %d tris\" % (numV, numT)\n\n verts = numpy.ones ( [numV, 3] )\n tris = numpy.ones ( [numT, 3], numpy.int )\n\n\n for vi in range ( numV ) :\n line = fp.readline()\n c = line.split(' ')\n if len(c) == 3 :\n verts[vi] = c\n\n for ti in range ( numT ) :\n line = fp.readline()\n n = line.split(' ')\n tris[ti] = n\n\n fp.close()\n\n color = (0.6039, 0.8431, 0.898, 1.0)\n patch = m.addPiece( verts, tris, color )\n #sph.set_display_style(sph.Mesh)\n #sph.set_vertex_colors( vcolors )\n\n return m\n\n\n\ndef MeshFromVertsTris (verts, tris, color=None, m=None) :\n\n if m == None :\n m = _surface.SurfaceModel()\n\n com = Vector (0,0,0)\n rad = Vector (0,0,0)\n numv = 0\n\n print \" - mesh from %d verts, %d tris\" % (len(verts), len(tris))\n\n if color == None :\n color = (0.6039, 0.8431, 0.898, 1.0)\n\n patch = m.addPiece( verts, tris, color )\n #sph.set_display_style(sph.Mesh)\n #sph.set_vertex_colors( vcolors )\n\n return m\n\n\n\n\n\ndef MakeColors ( n ) :\n\n # create n colors by interpolating between 5 basic colors:\n\n clrs = numpy.array ( [[.9,.3,.3], [.9,.9,.3], [.3,.9,.3],\n [.3,.9,.9], [.3,.3,.9]] )\n\n if n == 1 :\n return [ clrs[0] ]\n\n at = 0.000001\n d = -0.00001 + float ( len(clrs) - 1 ) / float ( n - 1 )\n\n colors = []\n\n for i in range (n) :\n l = int ( numpy.floor ( at ) )\n u = int ( numpy.ceil ( at ) )\n print \"color %d (%f) [%d-%d]\" % (i, at, l, u),\n clr = clrs[l] * (at-l) + clrs[u] * (u-at)\n print \" %f %f %f\" % (clr[0], clr[1], clr[2])\n colors.append ( clr )\n at = at + d\n\n return colors\n\n\n\ndef AddDiffRandColor ( clist, on_black = True, tol = .5 ) :\n\n R,G,B = None, None, None\n random.seed()\n\n for tot in range(1000) :\n\n if on_black :\n R = random.random()*.6+.4\n G = random.random()*.6+.4\n B = random.random()*.6+.4\n else :\n R = random.random()*.7\n G = random.random()*.7\n B = random.random()*.7\n\n tooclose = False\n for c in clist :\n r,g,b = c[0], c[1], c[2];\n if abs(r-R) + abs(g-G) + abs(b-B) < tol :\n tooclose = True\n\n if not tooclose: break\n\n clist.append ( numpy.array ( [R,G,B] ) )\n print \" - New rand color: %.3f %.3f %.3f\"%(R,G,B)\n\n return clist\n\ndef MakeDiffRandColors ( n ) :\n\n clrs = []\n for i in range (n) :\n AddDiffRandColor ( clrs )\n\n return clrs\n\n\ndef rrange ( n ) :\n\n i = range ( n )\n for n in i :\n ri = int ( numpy.round ( random.random() * n ) )\n # print \"swapping %d with %d\" % (n, ri)\n t = i[n]\n i[n] = i[ri]\n i[ri] = t\n\n return i\n\n\n\n\n\ndef AlignXf ( pos, v ) :\n Z = v\n Z.normalize()\n dZ = Vector( random.random(), random.random(), random.random() )\n dZ.normalize()\n X = chimera.cross ( Z, dZ )\n X.normalize ()\n Y = chimera.cross ( Z, X )\n Y.normalize ()\n\n xf = chimera.Xform.xform (\n X.x, Y.x, Z.x, pos.x,\n X.y, Y.y, Z.y, pos.y,\n X.z, Y.z, Z.z, pos.z )\n\n #xf3 = chimera.Xform.xform (\n # d, 0, 0, 0,\n # 0, d, 0, 0,\n # 0, 0, d, 0 )\n #print xf3\n\n return xf\n\n\ndef AddArrow ( pos, v, d, mxf, clr=(0,1,1,1), rad=0.2 ) :\n\n #d = v.length\n # mxf = a[0].molecule.openState.xform\n\n xf = AlignXf ( pos, v )\n if mxf != None : xf.premultiply ( mxf )\n a = CylinderMesh ( rad, rad, d-(rad*3), 10, clr )\n a.openState.xform = xf\n\n xf = AlignXf ( pos+(v*(d-(rad*3))), v )\n if mxf != None : xf.premultiply ( mxf )\n b = CylinderMesh ( rad*3, 0.01, rad*3, 10, clr )\n b.openState.xform = xf\n\n return [a, b]\n\n\n\nclass MyBBox :\n\n\n def __init__ (self) :\n\n self.llf = chimera.Point (1e99, 1e99, 1e99)\n self.urb = chimera.Point (-1e99, -1e99, -1e99)\n self.center = chimera.Point (0, 0, 0)\n self.dim = chimera.Point (0, 0, 0)\n\n\n def add (self, pt) :\n\n if pt[0] < self.llf[0] : self.llf[0] = pt[0]\n if pt[1] < self.llf[1] : self.llf[1] = pt[1]\n if pt[2] < self.llf[2] : self.llf[2] = pt[2]\n\n if pt[0] > self.urb[0] : self.urb[0] = pt[0]\n if pt[1] > self.urb[1] : self.urb[1] = pt[1]\n if pt[2] > self.urb[2] : self.urb[2] = pt[2]\n\n self.center[0] = (self.urb[0] + self.llf[0]) / 2.0\n self.center[1] = (self.urb[1] + self.llf[1]) / 2.0\n self.center[2] = (self.urb[2] + self.llf[2]) / 2.0\n\n self.dim = self.urb - self.llf\n\n\n def AddModel (self) :\n\n self.model = AddWireWalls ( self.dim, self.center )\n\n #T = chimera.Xform.translation ( self.center.toVector() )\n #S = chimera.Xform.translation ( 1,2,3 )\n #self.model.openState.xform = xf.translation(1,2,3)\n\n\n\n\ndef Measure ( mol ) :\n\n B = MyBBox ()\n\n for at in mol.atoms :\n B.add ( at.coord() )\n\n print \"B-box: (%.2f %.2f %.2f) - (%.2f %.2f %.2f), center (%.2f %.2f %.2f), dim (%.2f %.2f %.2f)\" % (\n B.llf.x, B.llf[1], B.llf[2], B.urb[0], B.urb[1], B.urb[2],\n B.center[0], B.center[1], B.center[2], B.dim[0], B.dim[1], B.dim[2] )\n", "id": "10316065", "language": "Python", "matching_score": 4.485047340393066, "max_stars_count": 6, "path": "Segger/Mesh.py" }, { "content": "import chimera\nimport numpy\n\ndef AxesMod ( COM=[0,0,0], U=None, Extents=[30,30,30], rad=1.0, f=1.0,\n alignTo = None ) :\n\n import _surface\n mol = _surface.SurfaceModel()\n chimera.openModels.add([mol], sameAs = alignTo)\n\n axes = AddAxes ( rad, Extents[0]*f, Extents[1]*f, Extents[2]*f, 1.0, mol )\n axes.name = \"Axes\"\n\n if U != None :\n R = numpy.array([\n [ U[0,0], U[0,1], U[0,2], 0.0 ],\n [ U[1,0], U[1,1], U[1,2], 0.0 ],\n [ U[2,0], U[2,1], U[2,2], 0.0 ] ] )\n\n T = numpy.array([\n [ 1.0, 0.0, 0.0, COM[0] ],\n [ 0.0, 1.0, 0.0, COM[1] ],\n [ 0.0, 0.0, 1.0, COM[2] ] ] )\n\n import Matrix\n M = Matrix.multiply_matrices ( T, R )\n\n ps = []\n for p in axes.surfacePieces :\n v, t = numpy.copy(p.geometry[0]), numpy.copy(p.geometry[1])\n ps.append ( [v,t,p.color] )\n axes.removePiece ( p )\n\n import _contour\n for p in ps :\n _contour.affine_transform_vertices( p[0], M )\n axes.addPiece ( p[0], p[1], p[2] )\n\n from random import random as rand\n clr = ( rand()*.7, rand()*.7, rand()*.7, 1.0 )\n # for p in axes.surfacePieces : p.color = clr\n\n #for g in axes.surfacePieces :\n # g.initial_v = numpy.copy ( g.geometry[0] )\n\n return axes\n\n\ndef AxesMod1 ( COM=[0,0,0], U=None, length=10.0, exfac=10.0, rad=1.0,\n alignTo = None, axes=None ) :\n\n import _surface\n toaxes = axes\n if toaxes == None :\n toaxes = _surface.SurfaceModel()\n chimera.openModels.add([toaxes], sameAs = alignTo)\n toaxes.name = \"Axes\"\n\n #axes = AddAxes ( rad, Extents[0]*f, Extents[1]*f, Extents[2]*f, 1.0, mol )\n pos = chimera.Vector(0,0,0)\n #mol = AddArrow2 ( pos, chimera.Vector(1,0,0), lX, (cF,0,0,1), rad, mol )\n #mol = AddArrow2 ( pos, chimera.Vector(0,1,0), lY, (0,cF,0,1), rad, mol )\n naxes = AddArrow3 ( pos, chimera.Vector(0,0,1), length, (0,0,1,1), rad, _surface.SurfaceModel() )\n\n if U != None :\n S = numpy.array([\n [ 1.0, 0.0, 0.0, 0 ],\n [ 0.0, 1.0, 0.0, 0 ],\n [ 0.0, 0.0, 2.0, 0 ] ] )\n\n P = numpy.array([\n [ 1.0, 0.0, 0.0, 0 ],\n [ 0.0, 1.0, 0.0, 0 ],\n [ 0.0, 0.0, 1.0, -length ] ] )\n\n R = numpy.array([\n [ U[0,0], U[0,1], U[0,2], 0.0 ],\n [ U[1,0], U[1,1], U[1,2], 0.0 ],\n [ U[2,0], U[2,1], U[2,2], 0.0 ] ] )\n\n T = numpy.array([\n [ 1.0, 0.0, 0.0, COM[0] ],\n [ 0.0, 1.0, 0.0, COM[1] ],\n [ 0.0, 0.0, 1.0, COM[2] ] ] )\n\n import Matrix\n M = Matrix.multiply_matrices ( T, R )\n M = Matrix.multiply_matrices ( M, P )\n M = Matrix.multiply_matrices ( M, S )\n\n import _contour\n for p in naxes.surfacePieces :\n v, t = numpy.copy(p.geometry[0]), numpy.copy(p.geometry[1])\n _contour.affine_transform_vertices( v, M )\n toaxes.addPiece ( v,t,p.color )\n\n\n return toaxes\n\n\n\ndef AddArrow2 ( pos, v, d, clr=(0,1,1,1), rad=0.2, mol=None ) :\n\n if mol == None :\n import _surface\n mol = _surface.SurfaceModel()\n chimera.openModels.add ( [mol] ) # , sameAs = alignTo)\n mol.name = \"Box\"\n\n xf = AlignXf ( pos, v )\n mol = CylinderMesh2 (rad, rad, d-(rad*2), 40, clr, xf, mol )\n\n xf = AlignXf ( pos+(v*(d-(rad*3))), v )\n mol = CylinderMesh2 (rad*2, 0.01, rad*2, 40, clr, xf, mol )\n\n return mol\n\n\n\ndef AddArrow3 ( pos, v, d, clr=(0,1,1,1), rad=0.2, mol=None ) :\n\n xf = AlignXf ( pos, v )\n mol = CylinderMesh2 (rad, rad, d-(rad*1.5), 40, clr, xf, mol )\n\n xf = AlignXf ( pos+(v*(d-(rad*1.5))), v )\n mol = CylinderMesh2 (rad*3.0, 0.01, rad*1.5, 40, clr, xf, mol )\n\n return mol\n\n\n\ndef AddArrow4 ( pos, v, d, clr=(0,1,1,1), rad=0.2, mol=None, hrad=3.0, hlen=3.0 ) :\n\n xf = AlignXf ( pos, v )\n mol = CylinderMesh2 (rad, rad, d-hlen, 10, clr, xf, mol )\n\n xf = AlignXf ( pos+(v*(d-(hlen))), v )\n mol = CylinderMesh2 (hrad, 0.01, hlen, 10, clr, xf, mol )\n\n return mol\n\n\n\ndef AddCylinderSolid ( pos, v, d, clr=(0,1,1,1), rad=0.2, mol=None ) :\n\n xf = AlignXf ( pos, v )\n mol = CylinderMesh2 (rad, rad, d, 10, clr, xf, mol )\n\n #xf = AlignXf ( pos+v, pos+(v*1.00001) )\n #mol = CylinderMesh2 (rad, 0.00001, 0, 10, clr, xf, mol )\n\n #xf = AlignXf ( pos, pos+(v*0.00001) )\n #mol = CylinderMesh2 (rad, 0.00001, rad, 10, clr, xf, mol )\n\n return mol\n\n\n\n\ndef CylinderSurf ( pos, v, d, clr=(0,1,1,1), rad=0.2, mol=None ) :\n\n xf = AlignXf ( pos, v )\n v, vi = CylMesh ( rad, rad, d, 8, clr, xf )\n sp = mol.addPiece ( v, vi, clr )\n return sp\n\n\n\ndef AddAxes ( rad, lX, lY, lZ, cF, mol ) :\n\n pos = chimera.Vector(0,0,0)\n mol = AddArrow2 ( pos, chimera.Vector(1,0,0), lX, (cF,0,0,1), rad, mol )\n mol = AddArrow2 ( pos, chimera.Vector(0,1,0), lY, (0,cF,0,1), rad, mol )\n mol = AddArrow2 ( pos, chimera.Vector(0,0,1), lZ, (0,0,cF,1), rad, mol )\n mol.name = \"XYZ (RGB) Axes\"\n\n return mol\n\n\n\ndef AlignXf ( pos, v ) :\n Z = v\n Z.normalize()\n from random import random as rand\n dZ = chimera.Vector( rand(), rand(), rand() )\n dZ.normalize()\n X = chimera.cross ( Z, dZ )\n X.normalize ()\n Y = chimera.cross ( Z, X )\n Y.normalize ()\n\n xf = chimera.Xform.xform (\n X.x, Y.x, Z.x, pos[0],\n X.y, Y.y, Z.y, pos[1],\n X.z, Y.z, Z.z, pos[2] )\n\n #xf3 = chimera.Xform.xform (\n # d, 0, 0, 0,\n # 0, d, 0, 0,\n # 0, 0, d, 0 )\n #print xf3\n\n return xf\n\n\ndef Quad2Tri ( vi ) :\n t1 = (vi[0], vi[1], vi[2])\n t2 = (vi[0], vi[2], vi[3])\n return t1, t2\n\n\n\ndef AddVert ( verts, v ) :\n\n if verts == None :\n #return numpy.array( [ [v[0], v[1], v[2]], ], numpy.float32 )\n return numpy.array( [ v ], numpy.float32 )\n else :\n #pt = numpy.array( [ [v[0], v[1], v[2]], ], numpy.float32 )\n #return numpy.concatenate ( [verts, v] )\n return numpy.concatenate ( [verts, numpy.array( [ v ], numpy.float32 ) ] )\n\n\n\n\n\ndef TriangleMesh ( p1, p2, p3, color, xf, mol ) :\n\n if color == None :\n from random import random as rand\n color = ( rand()*.7, rand()*.7, rand()*.7, 1.0 )\n\n\n v = None\n vi = []\n\n v = AddVert ( v, p1 )\n v = AddVert ( v, p2 )\n v = AddVert ( v, p3 )\n\n tris = [(0,1,2)]\n vi.extend(tris)\n\n sph = mol.addPiece ( v, vi, color )\n return mol\n\n\n\ndef TriangleMeshDiv ( p1, p2, p3, div, color, xf, mol ) :\n\n if color == None :\n from random import random as rand\n color = ( rand()*.7, rand()*.7, rand()*.7, 1.0 )\n\n\n v = None\n vi = []\n\n v1 = p2 - p1\n v2 = p3 - p1\n\n v1l = numpy.sqrt ( numpy.sum ( v1*v1 ) )\n v2l = numpy.sqrt ( numpy.sum ( v2*v2 ) )\n N = numpy.ceil ( v1l / div )\n d1 = v1l / N\n d2 = v2l / N\n\n print \" - v1 len: %.3f, d: %.3f, numdiv: %d, reald1: %.3f, reald2: %.3f\" % (v1l, div, int(N), d1, d2 )\n\n if ( N <= 1 ) :\n v = AddVert ( v, p1 )\n v = AddVert ( v, p2 )\n v = AddVert ( v, p3 )\n vi.extend( [(0,1,2)] )\n sph = mol.addPiece ( v, vi, color )\n return mol\n\n v1n = v1 / v1l\n v2n = v2 / v2l\n\n v = AddVert ( v, p1 )\n prev_line_i = 0\n\n for i in range (1, int(N)+1 ) :\n\n #print \"row \", i\n\n side_p1 = p1 + i * d1 * v1n\n side_p2 = p1 + i * d2 * v2n\n\n v = AddVert ( v, side_p1 )\n prev_i = prev_line_i + i\n\n sv = side_p2 - side_p1\n svl = numpy.sqrt ( numpy.sum ( sv*sv ) )\n svn = sv / svl\n\n Nmid = i-1\n if Nmid == 0 :\n v = AddVert ( v, side_p2 )\n vi.extend( [(0,1,2)] )\n #print \" v \"\n prev_line_i += 1\n\n else :\n ds = svl / (Nmid+1.0)\n\n for j in range (1, Nmid+1) :\n\n v = AddVert ( v, j*ds*svn + side_p1 )\n\n tri = [(prev_line_i,prev_i,prev_i+1)]\n vi.extend( tri )\n #print \" -\", j, tri\n\n tri = [(prev_line_i,prev_i+1,prev_line_i+1)]\n vi.extend( tri )\n #print \" -\", j, tri\n\n prev_i += 1\n prev_line_i += 1\n\n v = AddVert ( v, side_p2 )\n tri = [(prev_line_i,prev_i,prev_i+1)]\n #print \" +\", tri\n vi.extend( tri )\n\n prev_line_i += 1\n\n\n\n sph = mol.addPiece ( v, vi, color )\n sph.displayStyle = sph.Mesh\n return sph\n\n\n\n\ndef CylMesh (r1, r2, Length, div, color, xf) :\n\n v = None\n vi = []\n\n # print \"CylinderMesh:\", div\n\n at = 0\n for psi_i in range(div) :\n\n psi = float(psi_i) * 360.0/float(div)\n\n #print \"%.0f(%d)(%d)\" % (psi, at, at-l),\n x1 = r1 * numpy.sin(psi * numpy.pi/180)\n y1 = r1 * numpy.cos(psi * numpy.pi/180)\n\n x2 = r2 * numpy.sin(psi * numpy.pi/180)\n y2 = r2 * numpy.cos(psi * numpy.pi/180)\n\n p = chimera.Point ( x1,y1,0 );\n if xf : p = xf.apply ( p )\n\n if psi_i == 0 :\n v = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n else :\n pt1 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt1] )\n\n p = chimera.Point ( x2,y2,Length );\n if xf : p = xf.apply ( p )\n pt2 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt2] )\n\n at = at + 2\n\n if psi_i == 0 :\n pass\n else :\n tris = Quad2Tri ( [at-3, at-1, at-2, at-4] )\n vi.extend(tris)\n\n if psi_i == div-1 :\n tris = Quad2Tri ( [at-1, 1, 0, at-2] )\n vi.extend(tris)\n\n pi1 = len(v)\n p = chimera.Point ( 0,0,0 );\n if xf : p = xf.apply ( p )\n pt1 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n #v = numpy.concatenate ( [v, pt1] )\n\n pi2 = len(v)\n p = chimera.Point ( 0,0,Length );\n if xf : p = xf.apply ( p )\n pt1 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n #v = numpy.concatenate ( [v, pt1] )\n\n for i in range(div) :\n ti1 = i*2\n ti2 = (i+1)*2 if i < div-1 else (0)\n #vi.append( (pi1, ti1, ti2) )\n\n bi1 = i*2+1\n bi2 = (i+1)*2+1 if i < div-1 else (1)\n #vi.append( (pi2, bi2, bi1) )\n\n\n return v, vi\n\n\n\ndef CylinderMesh2 (r1, r2, Length, div, color, xf, mol) :\n\n v, vi = CylMesh ( r1, r2, Length, div, color, xf )\n sph = mol.addPiece ( v, vi, color )\n return mol\n\n\n\ndef AddCylinder ( pos, v, d, clr=(0,1,1,1), rad=0.2, mol=None ) :\n\n xf = AlignXf ( pos, v )\n mol = CylMesh2 ( rad, rad, d, 20, clr, xf, mol )\n return mol\n\n\n\ndef CylMesh2 (r1, r2, Length, div, color, xf, mod) :\n\n v = None\n vi = []\n\n # print \"CylinderMesh:\", div\n\n topv = None\n botv = None\n\n at = 0\n for psi_i in range(div) :\n\n psi = float(psi_i) * 360.0/float(div)\n\n #print \"%.0f(%d)(%d)\" % (psi, at, at-l),\n x1 = r1 * numpy.sin(psi * numpy.pi/180)\n y1 = r1 * numpy.cos(psi * numpy.pi/180)\n\n x2 = r2 * numpy.sin(psi * numpy.pi/180)\n y2 = r2 * numpy.cos(psi * numpy.pi/180)\n\n p = chimera.Point ( x1,y1,0 );\n if xf : p = xf.apply ( p )\n\n if psi_i == 0 :\n v = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n topv = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n else :\n pt1 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt1] )\n topv = numpy.concatenate ( [topv, pt1] )\n\n p = chimera.Point ( x2,y2,Length );\n if xf : p = xf.apply ( p )\n pt2 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt2] )\n\n if psi_i == 0 :\n botv = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n else :\n botv = numpy.concatenate ( [botv, pt2] )\n\n at = at + 2\n\n if psi_i == 0 :\n pass\n else :\n tris = Quad2Tri ( [at-3, at-1, at-2, at-4] )\n vi.extend(tris)\n\n if psi_i == div-1 :\n tris = Quad2Tri ( [at-1, 1, 0, at-2] )\n vi.extend(tris)\n\n pi1 = len(topv)\n p = chimera.Point ( 0,0,0 );\n if xf : p = xf.apply ( p )\n pt1 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n topv = numpy.concatenate ( [topv, pt1] )\n\n pi2 = len(botv)\n p = chimera.Point ( 0,0,Length );\n if xf : p = xf.apply ( p )\n pt1 = numpy.array( [ [p[0], p[1], p[2]], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt1] )\n botv = numpy.concatenate ( [botv, pt1] )\n\n topvi = []\n botvi = []\n\n for i in range(div) :\n ti1 = i\n ti2 = (i+1) if i < div-1 else (0)\n topvi.append( (pi1, ti1, ti2) )\n\n bi1 = i\n bi2 = (i+1) if i < div-1 else (0)\n botvi.append( (pi2, bi2, bi1) )\n\n\n sp = mod.addPiece ( v, vi, color )\n sp = mod.addPiece ( topv, topvi, color )\n sp = mod.addPiece ( botv, botvi, color )\n\n return mod\n\n\n\ndef PlaneMesh ( w, h, d, color, xf, mol ) :\n\n if mol == None :\n import _surface\n mol = _surface.SurfaceModel()\n chimera.openModels.add ( [mol] ) # , sameAs = alignTo)\n mol.name = \"Box\"\n\n atX = -w/2\n atY = -h/2\n\n if xf == None :\n xf = chimera.Xform()\n\n numx = int ( max ( numpy.ceil ( w / d ), 2 ) )\n numy = int ( max ( numpy.ceil ( h / d ), 2 ) )\n\n dx = w / float(numx-1)\n dy = h / float(numx-1)\n\n print \" - plane - w %.2f, h %.2f, %d/%d\" % (w, h, numx, numy)\n\n v = numpy.zeros ( [numx*numy,3] )\n vi = []\n for j in range ( numy ) :\n for i in range ( numx ) :\n v[j*numx+i] = xf.apply ( chimera.Point ( atX + dx*i, atY + dy*j, 0 ) )\n #vs.append ( p.data() )\n\n if i > 0 and j > 0 :\n p1 = j*numx+i\n p2 = j*numx+i-1\n p3 = (j-1)*numx+i-1\n p4 = (j-1)*numx+i\n vi.extend( Quad2Tri ( [p1,p2,p3,p4] ) )\n\n sph = mol.addPiece ( v, vi, color )\n return sph\n\n\n\n\ndef BoxMesh (w, h, l, color, xf, mol) :\n\n if mol == None :\n import _surface\n mol = _surface.SurfaceModel()\n chimera.openModels.add ( [mol] ) # , sameAs = alignTo)\n mol.name = \"Box\"\n\n\n v = numpy.zeros ( [8,3] )\n\n # print \"BoxMesh:\", div\n\n at = 0\n\n w = w / 2.0\n h = h / 2.0\n l = l / 2.0\n\n if xf == None :\n xf = chimera.Xform()\n\n\n v[0] = xf.apply ( chimera.Point ( -w,-h,-l ) )\n v[1] = xf.apply ( chimera.Point ( w,-h,-l ) )\n v[2] = xf.apply ( chimera.Point ( w,h,-l ) )\n v[3] = xf.apply ( chimera.Point ( -w,h,-l ) )\n\n v[4] = xf.apply ( chimera.Point ( -w,-h,l ) )\n v[5] = xf.apply ( chimera.Point ( w,-h,l ) )\n v[6] = xf.apply ( chimera.Point ( w,h,l ) )\n v[7] = xf.apply ( chimera.Point ( -w,h,l ) )\n\n vi = []\n vi.extend( Quad2Tri ( [0,3,2,1] ) )\n vi.extend( Quad2Tri ( [1,5,6,2] ) )\n vi.extend( Quad2Tri ( [2,6,7,3] ) )\n vi.extend( Quad2Tri ( [0,4,5,1] ) )\n vi.extend( Quad2Tri ( [0,4,7,3] ) )\n vi.extend( Quad2Tri ( [5,4,7,6] ) )\n\n sph = mol.addPiece ( v, vi, color )\n return sph\n\n\n\n\ndef BoxArrowMesh (w, h, l, al, color, xf, mol) :\n\n if mol == None :\n import _surface\n mol = _surface.SurfaceModel()\n chimera.openModels.add ( [mol] ) # , sameAs = alignTo)\n mol.name = \"Box\"\n\n\n v = numpy.zeros ( [14,3] )\n\n # print \"BoxMesh:\", div\n\n at = 0\n\n w = w / 2.0\n h = h / 2.0\n l = l / 2.0\n\n if xf == None :\n xf = chimera.Xform()\n\n\n v[0] = xf.apply ( chimera.Point ( -w,-h,-l ) )\n v[1] = xf.apply ( chimera.Point ( w,-h,-l ) )\n v[2] = xf.apply ( chimera.Point ( w,h,-l ) )\n v[3] = xf.apply ( chimera.Point ( -w,h,-l ) )\n\n v[4] = xf.apply ( chimera.Point ( -w,-h,l-al ) )\n v[5] = xf.apply ( chimera.Point ( w,-h,l-al ) )\n v[6] = xf.apply ( chimera.Point ( w,h,l-al ) )\n v[7] = xf.apply ( chimera.Point ( -w,h,l-al ) )\n\n vi = []\n vi.extend( Quad2Tri ( [0,3,2,1] ) )\n vi.extend( Quad2Tri ( [1,5,6,2] ) )\n vi.extend( Quad2Tri ( [2,6,7,3] ) )\n vi.extend( Quad2Tri ( [0,4,5,1] ) )\n vi.extend( Quad2Tri ( [0,4,7,3] ) )\n vi.extend( Quad2Tri ( [5,4,7,6] ) )\n\n v[8] = xf.apply ( chimera.Point ( -w,-h-al/2.0,l-al ) )\n v[9] = xf.apply ( chimera.Point ( w,-h-al/2.0,l-al ) )\n v[10] = xf.apply ( chimera.Point ( w,h+al/2.0,l-al ) )\n v[11] = xf.apply ( chimera.Point ( -w,h+al/2.0,l-al ) )\n\n v[12] = xf.apply ( chimera.Point ( -w,0,l ) )\n v[13] = xf.apply ( chimera.Point ( w,0,l ) )\n\n vi.extend( Quad2Tri ( [8,11,10,9] ) )\n vi.extend( Quad2Tri ( [8,9,13,12] ) )\n vi.extend( Quad2Tri ( [11,12,13,10] ) )\n vi.extend( [(8,12,11)] )\n vi.extend( [(9,10,13)] )\n\n sph = mol.addPiece ( v, vi, color )\n return sph\n\n\n\n\n\ndef SphereMesh (r, div, color, pos, mol) :\n\n posx,posy,posz = pos[0], pos[1], pos[2]\n v = numpy.array( [ [0+posx,0+posy,r+posz], ], numpy.float32 )\n vi = ()\n\n at = 1\n l = int ( numpy.ceil (float(div)*3.0/2.0) )\n if div < 10 : l = div*2\n #print \"SphereMesh:\", div, 'x', l\n lat = 0\n\n for phi_i in range(div) :\n\n phi = 90.0 - ( float(phi_i+1) * 180.0/float(div+1) )\n #print \"%.2f: \" % phi,\n z = r * numpy.sin(phi * numpy.pi/180)\n s = r * numpy.cos(phi * numpy.pi/180)\n\n for psi_i in range (l) :\n psi = float(psi_i) * 360.0/float(l)\n\n #print \"%.0f(%d)(%d)\" % (psi, at, at-l),\n x = s * numpy.sin(psi * numpy.pi/180)\n y = s * numpy.cos(psi * numpy.pi/180)\n\n pt = numpy.array( [ [x+posx,y+posy,z+posz], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt] )\n\n if phi_i == 0 :\n if psi_i > 0 :\n vi = vi + ( (at-1, at, 0), )\n if psi_i == l-1 :\n vi = vi + ( (at, 1, 0), )\n else :\n if psi_i > 0 :\n tris = Quad2Tri ( [at-1, at, at-l, at-l-1] )\n vi = vi + tris\n if psi_i == l-1 :\n tris = Quad2Tri ( [at, at-l+1, at-l*2+1, at-l] )\n vi = vi + tris\n\n if phi_i == div-1 :\n if psi_i > 0 :\n vi = vi + ( (at, at-1, lat+l), )\n if psi_i == l-1 :\n vi = vi + ( (at-l+1, at, lat+l), )\n\n at = at + 1\n\n\n lat = len ( v )\n\n pt = numpy.array( [ [0+posx,0+posy,-r+posz], ], numpy.float32 )\n v = numpy.concatenate ( [v, pt] )\n\n\n sph = mol.addPiece( v, vi, color )\n return sph\n\n\n\n\ndef prAxes ( points ) :\n\n com = numpy.sum(points, axis=0) / len(points)\n C = chimera.Vector ( com[0], com[1], com[2] )\n\n comv = numpy.ones_like ( points ) * com\n points = points - comv\n\n i = numpy.matrix ( [[1,0,0], [0,1,0], [0,0,1]] )\n ii = i * numpy.sum ( numpy.multiply ( points, points ) )\n p_t = numpy.transpose(points)\n td = numpy.tensordot ( points, p_t, axes=[0,1] )\n\n I0 = ii - td\n\n try :\n U, S, V = numpy.linalg.svd( I0 )\n except :\n print \"- error computing SVD - prob. singular matrix\"\n return []\n\n #U[0,0] = U[0,0] * -1.0\n #U[1,0] = U[1,0] * -1.0\n #U[2,0] = U[2,0] * -1.0\n\n #U[0,2] = U[0,2] * -1.0\n #U[1,2] = U[1,2] * -1.0\n #U[2,2] = U[2,2] * -1.0\n\n return [C, U, S, V]\n\n\n\ndef map_points (fmap, useThreshold = True):\n\n from _contour import affine_transform_vertices as transform_vertices\n\n mat = fmap.data.full_matrix()\n threshold = fmap.surface_levels[0]\n\n if useThreshold == False :\n #threshold = -1e9\n threshold = 1e-5\n #print \" - not using threshold\"\n\n import _volume\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n nz = numpy.nonzero( fpoint_weights )[0]\n if len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n transform_vertices( fpoints, fmap.data.ijk_to_xyz_transform )\n\n if 0 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return fpoints, fpoint_weights\n\n\n\ndef AxesModOffset ( COM=[0,0,0], U=None, Extents=[30,30,30], rad=1.0, f=1.0,\n\t\t\t alignTo = None ) :\n\n import _surface\n mol = _surface.SurfaceModel()\n chimera.openModels.add([mol], sameAs = alignTo)\n\n pos = chimera.Vector(0,0,0)\n axes = AddArrow2 ( pos, chimera.Vector(0,1,0), lY, (cF,.3,.3,1), rad, mol )\n\n axes.name = \"Riboarrow\"\n\n if U != None :\n R = numpy.array([\n\t\t\t\t\t\t [ U[0,0], U[0,1], U[0,2], 0.0 ],\n\t\t\t\t\t\t [ U[1,0], U[1,1], U[1,2], 0.0 ],\n\t\t\t\t\t\t [ U[2,0], U[2,1], U[2,2], 0.0 ] ] )\n\n T = numpy.array([\n\t\t\t\t\t\t [ 1.0, 0.0, 0.0, COM[0] ],\n\t\t\t\t\t\t [ 0.0, 1.0, 0.0, COM[1] ],\n\t\t\t\t\t\t [ 0.0, 0.0, 1.0, COM[2] ] ] )\n\n Ti = numpy.array([\n\t\t\t\t\t\t [ 1.0, 0.0, 0.0, Extents[0]*0.7 ],\n\t\t\t\t\t\t [ 0.0, 1.0, 0.0, -Extents[1]/2.0 ],\n\t\t\t\t\t\t [ 0.0, 0.0, 1.0, -Extents[0]*0.7 ] ] )\n\n import Matrix\n M = Matrix.multiply_matrices ( R, Ti )\n M = Matrix.multiply_matrices ( T, M )\n\n ps = []\n for p in axes.surfacePieces :\n v, t = numpy.copy(p.geometry[0]), numpy.copy(p.geometry[1])\n ps.append ( [v,t,p.color] )\n axes.removePiece ( p )\n\n import _contour\n for p in ps :\n _contour.affine_transform_vertices( p[0], M )\n axes.addPiece ( p[0], p[1], p[2] )\n\n from random import random as rand\n clr = ( rand()*.7, rand()*.7, rand()*.7, 1.0 )\n\t# for p in axes.surfacePieces : p.color = clr\n\n #for g in axes.surfacePieces :\n # g.initial_v = numpy.copy ( g.geometry[0] )\n\n return axes\n\n\ndef SurfCtrRad ( surfMod ) :\n\n COM = numpy.array ( [ 0,0,0 ], numpy.float32 )\n N = 0.0;\n rad = 0.0;\n\n for sp in surfMod.surfacePieces :\n for p in sp.geometry[0] :\n COM = COM + p;\n N = N + 1.0;\n r = numpy.sqrt ( (p**2).sum() )\n if r > rad :\n rad = r\n\n COM = COM / N;\n comPt = chimera.Point ( COM[0], COM[1], COM[2] )\n #comVec = comPt.toVector ()\n # print \" - com: \", self.comp\n\n return comPt, rad\n\n\n\n\ndef GetMod ( name ) :\n\n for m in chimera.openModels.list() :\n if m.name == name :\n return m\n\n return None\n", "id": "6693320", "language": "Python", "matching_score": 3.4451584815979004, "max_stars_count": 6, "path": "Segger/axes.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport VolumeViewer\nfrom sys import stderr\nfrom time import clock\nimport sets\nimport FitMap\n\nfrom axes import prAxes\nimport regions\nimport graph\nfrom Segger import dev_menus, timing, seggerVersion\n\nOML = chimera.openModels.list\n\nREG_OPACITY = 0.45\n\n\n# http://geomalgorithms.com/a06-_intersect-2.html\n\n\n\nfrom segment_dialog import current_segmentation, segmentation_map\n\n\nclass ISeg_Dialog ( chimera.baseDialog.ModelessDialog ):\n\n title = \"iSeg - Icosahedral Segmentation (Segger v\" + seggerVersion + \")\"\n name = \"segger_iseg\"\n buttons = ( \"Close\" )\n help = 'https://github.com/gregdp/segger'\n\n def fillInUI(self, parent):\n\n self.group_mouse_mode = None\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n parent.columnconfigure(0, weight = 1)\n\n row = 0\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n l = Tkinter.Label(f, text=' ')\n l.grid(column=0, row=row, sticky='w')\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" 1. Tools -> Higher-Order Structure -> Icosahedron Surface.\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" - show & match icosahedron to current map (change Orientation if necesary)\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n\n l = Tkinter.Label(ff, text = \" 2. Make icosahedral surface mesh\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n\n l = Tkinter.Label(ff, text = \" \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Make\", command=self.Icos2)\n b.grid (column=1, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Toggle Display - Mesh/Solid\", command=self.ToggleDisp)\n b.grid (column=3, row=0, sticky='w', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n\n l = Tkinter.Label(ff, text = \" 3. Push outward\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" # iterations: \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n self.numIt = Tkinter.StringVar(ff)\n self.numIt.set ( \"100\" )\n e = Tkinter.Entry(ff, width=7, textvariable=self.numIt)\n e.grid(column=1, row=0, sticky='w', padx=5, pady=1)\n\n l = Tkinter.Label(ff, text = \", stiffness: \", anchor = 'w')\n l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)\n\n self.springF = Tkinter.StringVar(ff)\n self.springF.set ( \"0.2\" )\n e = Tkinter.Entry(ff, width=7, textvariable=self.springF)\n e.grid(column=3, row=0, sticky='w', padx=5, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"Push\", command=self.Icos2Push)\n b.grid (column=4, row=0, sticky='w', padx=5, pady=1)\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" - Set radius:\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n\n l = Tkinter.Label(ff, text = \" \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n sv = Tkinter.StringVar(ff)\n sv.trace(\"w\", lambda name, index, mode, sv=sv: self.set_rad_changed_cb(sv.get()) )\n self.setRad = sv\n\n e = Tkinter.Entry(ff, width=7, textvariable=sv )\n e.grid(column=1, row=0, sticky='w', padx=5, pady=1)\n\n\n # Radius\n #rs = Hybrid.Scale(ff, '', 1, 1500, 0.01, 1150, length=200)\n #rs.frame.grid(row = row, column = 1, sticky = 'ew', padx=5, pady=1, columnspan=10)\n #rs.entry.config ( width=100 )\n\n #rs.callback(self.radius_changed_cb)\n #rs.entry.bind('<KeyPress-Return>', self.radius_changed_cb)\n #self.radius = rs\n\n\n self.rad = Tkinter.DoubleVar(ff)\n self.rad.set ( 100 )\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n if smod != None :\n print \"Found faces...\"\n verts, tris = smod.icosVerts0, smod.icosTris\n p1 = smod.icosVerts [ tris[0][0] ]\n r = numpy.sqrt ( numpy.sum(p1*p1) )\n p1 = smod.icosVerts0 [ tris[0][0] ]\n r0 = numpy.sqrt ( numpy.sum(p1*p1) )\n print \" - rad %.4f, orig: %.4f\" % (r, r0)\n self.rad.set ( r )\n\n\n self.radius = Tkinter.Scale(ff, from_=0, to=1500, variable=self.rad, orient=Tkinter.HORIZONTAL, length=350, command=self.radius_changed_cb)\n self.radius.grid(column=2, row=0, sticky='w', padx=5, pady=1, columnspan=10)\n\n\n\n row = row + 1\n\n\n #ff = Tkinter.Frame(f)\n #ff.grid(column=0, row=row, sticky='w')\n #w = Scale(from_=0, to=100, resolution=0.1)\n\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n\n l = Tkinter.Label(ff, text = \" 5. Cross-correlation / Mask densities between\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" start radius: \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n self.startRad = Tkinter.StringVar(ff)\n e = Tkinter.Entry(ff, width=7, textvariable=self.startRad)\n e.grid(column=1, row=0, sticky='w', padx=5, pady=1)\n\n l = Tkinter.Label(ff, text = \", end radius: \", anchor = 'w')\n l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)\n\n self.endRad = Tkinter.StringVar(ff)\n e = Tkinter.Entry(ff, width=7, textvariable=self.endRad)\n e.grid(column=3, row=0, sticky='w', padx=5, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"CC\", command=self.Icos2CC)\n b.grid (column=4, row=0, sticky='w', padx=5, pady=1)\n\n #b = Tkinter.Button(ff, text=\"+CC\", command=self.Icos2PushCC)\n #b.grid (column=5, row=0, sticky='w', padx=5, pady=1)\n\n\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" 6. Radii separated by commas:\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n self.segRads = Tkinter.StringVar(ff)\n\n if 0 or dev_menus :\n self.segRads.set ( \"\" )\n\n e = Tkinter.Entry(ff, width=40, textvariable=self.segRads)\n e.grid(column=1, row=0, sticky='w', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Mask Map\", command=self.Icos2Map0)\n b.grid (column=1, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Group Regions\", command=self.Segment2)\n b.grid (column=2, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=7, pady=7, sticky='we')\n\n\n row = row + 1\n self.msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg=\"red\")\n self.msg.grid(column=0, row=row, sticky='ew', padx=5, pady=1)\n row += 1\n\n\n def umsg ( self, txt ) :\n print txt\n self.status ( txt )\n\n def status ( self, txt ) :\n txt = txt.rstrip('\\n')\n self.msg.configure(text = txt)\n self.msg.update_idletasks()\n\n\n\n\n def Icos2 ( self ) :\n\n imod = self.GetMod (\"Icosahedron\")\n\n axmods = []\n for m in chimera.openModels.list() :\n if m.name == \"Icosahedron Faces\" :\n axmods.append ( m )\n\n if len(axmods) > 0 :\n chimera.openModels.close ( axmods )\n\n if imod == None :\n self.umsg ( \"No Icosahedron model found - please follow step 2.\" )\n return\n\n\n if len(imod.surfacePieces) <> 1 :\n self.umsg ( \"Please set 'Subdivision factor' to 1\" )\n return\n\n\n print len(imod.surfacePieces[0].geometry[1]), \" tris\"\n print len(imod.surfacePieces[0].geometry[0]), \" verts\"\n\n if len(imod.surfacePieces[0].geometry[1]) <> 20 :\n self.umsg ( \"Please set 'Subdivision factor' to 1\" )\n return\n\n\n self.umsg ( \"Building Icos2\" )\n\n\n import _surface\n surf_mod = _surface.SurfaceModel()\n surf_mod.name = \"Icosahedron Faces\"\n chimera.openModels.add([surf_mod], sameAs = imod)\n\n import axes; reload (axes)\n\n self.icos_vecs = []\n from numpy import arccos, pi\n\n\n for p in imod.surfacePieces :\n v, t = p.geometry[0], p.geometry[1]\n #print len(v), len(t)\n\n #for pt in v :\n # print \" - pt: \", pt\n\n surf_mod.icosVerts0 = numpy.copy ( v )\n surf_mod.icosVerts = numpy.copy ( v )\n surf_mod.icosTris = numpy.copy ( t )\n surf_mod.nvecs = numpy.zeros ( (len(t), 3) )\n surf_mod.sps = []\n\n\n for ti, tri in enumerate ( t ) :\n #print \" - tri: \", tri,\n p1 = v [ tri[0] ]\n p2 = v [ tri[1] ]\n p3 = v [ tri[2] ]\n\n mp = (p1 + p2 + p3) / 3.0\n pv = chimera.Vector ( mp[0], mp[1], mp[2] )\n r = pv.length\n pv.normalize()\n #print mp\n #self.icos_vecs.append ( pv )\n mp = mp / r\n\n #cyl = axes.AddCylinderSolid ( chimera.Vector(0,0,0), pv, r, (.6,.4,.4,1), 10.0, surf_mod )\n #cyl.name = \"Icosahedron_Axes\"\n\n sp = axes.TriangleMeshDiv ( p1, p2, p3, 50.0, None, None, surf_mod )\n #sp = surf_mod.surfacePieces [ len(surf_mod.surfacePieces)-1 ]\n sp.N = numpy.array ( pv, numpy.float32 )\n #surf_mod.nvecs.append ( mp )\n surf_mod.nvecs[ti] = mp\n surf_mod.sps.append ( sp )\n sp.ind = ti\n\n\n #p1v = chimera.Vector ( p1[0], p1[1], p1[2] ); p1v.normalize ()\n #p2v = chimera.Vector ( p2[0], p2[1], p2[2] ); p2v.normalize ()\n #p3v = chimera.Vector ( p3[0], p3[1], p3[2] ); p3v.normalize ()\n\n #a1 = arccos ( p1v * pv ) * 180.0 / pi\n #a2 = arccos ( p2v * pv ) * 180.0 / pi\n #a3 = arccos ( p3v * pv ) * 180.0 / pi\n\n #a12 = arccos ( p1v * p2v ) * 180.0 / pi\n\n #print a1, a2, a3, a12\n\n #if ti >= 0 :\n # break\n\n p1 = surf_mod.icosVerts0 [ surf_mod.icosTris[0][0] ]\n r0 = numpy.sqrt ( numpy.sum(p1*p1) )\n\n self.umsg ( \"Made Icos2 from %d sps in %s -> %d sps, rad %.1f\" % (len(imod.surfacePieces), imod.name, len(surf_mod.surfacePieces), r0 ) )\n\n self.rad.set ( r0 )\n\n\n\n\n def ToggleDisp ( self ) :\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n if smod == None :\n self.status ( \"Did not find Icos2\" )\n return\n\n import _surface\n nmod = _surface.SurfaceModel()\n nmod.name = smod.name\n\n nmod.icosVerts0 = smod.icosVerts0\n nmod.icosVerts = smod.icosVerts\n nmod.icosTris = smod.icosTris\n nmod.nvecs = smod.nvecs\n nmod.sps = []\n\n for spi, sp in enumerate ( smod.sps ) :\n\n v, t = sp.geometry\n #print \" sp %d - %d verts, %d tris\" % (spi, len(v), len(t) )\n\n if len(v) > 0 and len(t) > 0 :\n ns = nmod.addPiece ( v, t, sp.color )\n nmod.sps.append ( ns )\n ns.N = sp.N\n ns.ind = spi\n if hasattr ( sp, 'verts0' ) :\n ns.verts0 = sp.verts0\n\n if sp.displayStyle == sp.Mesh :\n ns.displayStyle = sp.Solid\n else :\n ns.displayStyle = sp.Mesh\n\n chimera.openModels.close ( [smod] )\n #chimera.openModels.add([nmod], sameAs = smod)\n chimera.openModels.add ( [nmod] )\n smod = nmod\n\n\n self.status ( \"Toggle Display %s - %d surfaces\" % ( smod.name, len(smod.surfacePieces) ) )\n\n\n\n def NearMaps ( self, sp ) :\n\n #print \" - making near maps\"\n\n verts, tris = sp.geometry\n\n nmaps = [ None ] * len(verts)\n for vi in range (len(verts)) :\n #nsets[vi] = sets.Set()\n nmaps[vi] = {}\n\n def setn ( vi1, vi2 ) :\n #nsets[ t[0] ].add ( t[1] )\n s = nmaps[vi1]\n if vi2 not in s :\n v = verts[vi1] - verts[vi2]\n s[vi2] = numpy.sqrt ( numpy.sum(v*v) )\n\n for t in tris :\n setn ( t[0], t[1] )\n setn ( t[0], t[2] )\n setn ( t[1], t[0] )\n setn ( t[1], t[2] )\n setn ( t[2], t[0] )\n setn ( t[2], t[1] )\n\n return nmaps\n\n\n\n\n def Icos2Push ( self ) :\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n\n if smod == None :\n self.status ( \"Did not find Icos2\" )\n return\n\n N, f = 0, 0.0\n try :\n N = int ( self.numIt.get() )\n except :\n self.umsg ( \"Invalid # iterations: \" + self.numIt.get() )\n return\n\n try :\n f = float ( self.springF.get() )\n except :\n self.umsg ( \"Invalid stiffness: \" + self.springF.get() )\n return\n\n\n\n self.Icos2PushN ( smod, N, f )\n #self.Icos2PushNSym ( smod, 50 )\n self.fi = 2\n\n self.status ( \"Pushing done - %d sps pushed\" % len(smod.surfacePieces) )\n\n\n\n # 700, .2 -- 875,921,964,1005,1025,1039,1150\n\n\n def Icos2PushN ( self, smod, N, springf ) :\n\n print \" - pushing %s, %d surfaces - %d iter \" % ( smod.name, len(smod.surfacePieces), N )\n\n for spi, sp in enumerate ( smod.surfacePieces ) :\n verts, tris = sp.geometry\n\n #print \" - surface piece %d points %d tris, \" % (len(verts), len(tris)), sp.N\n\n if not hasattr ( sp, 'nmaps' ) :\n sp.nmaps = self.NearMaps (sp)\n\n for iter in range ( N ) : # SGIV: 600\n for vi in range ( len(verts) ) :\n\n nmap = sp.nmaps[vi]\n\n f = 0.0\n if len(nmap) >= 6 :\n f = 1.0 # SGIV: 1\n\n #vv = verts[vi]\n #vvl = numpy.sqrt ( numpy.sum(vv*vv) )\n #vv = vv / vvl\n #fN = numpy.sum(vv*sp.N)\n\n fv = 0.1 * sp.N\n\n if 1 :\n\t\t for vj, eqd in nmap.iteritems() :\n\t\t v = verts[vj] - verts[vi]\n\t\t vl = numpy.sqrt ( numpy.sum(v*v) )\n\t\t vn = v / vl\n\t\t ff = vl - eqd\n\t\t fv = fv + springf * ff * vn # SGIV: 0.2\n\n verts[vi] = verts[vi] + f * fv\n\n if iter % 10 == 0 :\n self.status ( \"Pushing %d/%d - iter %d/%d - springf %.1f\" % (spi+1,len(smod.surfacePieces), iter, N, springf ) )\n\n sp.geometry = (verts,tris)\n sp.verts0 = numpy.copy ( verts )\n\n\n def Icos2PushNSym ( self, smod, N ) :\n\n print \" - pushing - sym - %s, %d surfaces - %d iter \" % ( smod.name, len(smod.surfacePieces), N )\n\n sp = smod.sps[0]\n verts,tris = sp.geometry\n\n if not hasattr ( sp, 'nmaps' ) :\n sp.nmaps = self.NearMaps (sp)\n\n for iter in range ( N ) : # SGIV: 600\n for vi in range ( len(verts) ) :\n\n nmap = sp.nmaps[vi]\n\n f = 0.0\n if len(nmap) >= 6 :\n f = 1.0 # SGIV: 1\n\n #vv = verts[vi]\n #vvl = numpy.sqrt ( numpy.sum(vv*vv) )\n #vv = vv / vvl\n #fN = numpy.sum(vv*sp.N)\n\n fv = 0.1 * sp.N\n\n if 1 :\n\t for vj, eqd in nmap.iteritems() :\n\t v = verts[vj] - verts[vi]\n\t vl = numpy.sqrt ( numpy.sum(v*v) )\n\t vn = v / vl\n\t ff = vl - eqd\n\t fv = fv + 0.2 * ff * vn # SGIV: 0.2\n\n verts[vi] = verts[vi] + f * fv\n\n if iter % 10 == 0 :\n self.status ( \"Pushing - iter %d/%d\" % ( iter, N ) )\n\n sp.geometry = (verts,tris)\n sp.verts0 = verts\n\n\n verts0, tris0 = smod.icosVerts0, smod.icosTris\n p1 = verts0 [ tris0[0][0] ]\n p2 = verts0 [ tris0[0][1] ]\n p3 = verts0 [ tris0[0][2] ]\n #mp = (p1 + p2 + p3) / 3.0\n a0 = numpy.array ( [p1,p2,p3] )\n #print a0\n\n import chimera.match\n\n for ti, tri in enumerate ( smod.icosTris[1:] ) :\n\n q1 = verts [ tri[0] ]\n q2 = verts [ tri[1] ]\n q3 = verts [ tri[2] ]\n a1 = numpy.array ( [q1,q2,q3] )\n #print a2\n\n xf = chimera.match.matchPositions ( numpy.array(a1,numpy.float), numpy.array(a0,numpy.float) )\n\n sp1 = smod.sps[ti]\n verts1, tris1 = sp1.geometry\n\n newv = numpy.zeros ( (len(verts),3) )\n\n for vi, v in enumerate ( verts ) :\n tp = xf[0].apply ( chimera.Point( v[0], v[1], v[2] ) )\n #print v, \"->\", tp\n newv[vi] = numpy.array ( tp )\n\n sp1.geometry = (newv,tris1)\n sp1.verts0 = newv\n\n\n\n\n\n\n\n def Icos2PushCC ( self ) :\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n\n if smod == None :\n self.status ( \"Did not find Icos2\" )\n return\n\n print \"Push/CC...\"\n\n self.Icos2PushN ( smod, 100 )\n\n for i in range ( 20 ) :\n self.Icos2PushN ( smod, 100 )\n self.fi = 200 + i*100\n self.Icos2CC ()\n self.updateIcos2 ( 1110 )\n\n\n delattr ( self, 'fi' )\n\n\n\n def Icos2CC ( self ) :\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n\n if smod == None :\n self.umsg ( \"No Icos2 found\" )\n return\n\n dmap = segmentation_map()\n if dmap == None :\n self.umsg ( \"No map selected\" )\n return\n\n\n start_rad, end_rad = 0, 0\n try :\n start_rad = int ( self.startRad.get() )\n except :\n self.umsg ( \"Invalid start radius: \" + self.startRad.get() )\n return\n\n try :\n end_rad = int ( self.endRad.get() )\n except :\n self.umsg ( \"Invalid end radius: \" + self.endRad.get() )\n return\n\n\n if end_rad <= start_rad :\n self.umsg ( \"End rad should be larger than start rad :) \" )\n return\n\n\n self.umsg ( \"CC in %s\" % dmap.name )\n\n fname = \"IcosCC.txt\"\n if hasattr ( self, 'fi' ) :\n fname = \"IcosCC_%d.txt\" % self.fi\n\n\n p1 = smod.icosVerts [ smod.icosTris[0][0] ]\n rS = numpy.sqrt ( numpy.sum(p1*p1) )\n print \" - rad before: \", rS\n\n ccs = []\n #fp = open ( fname, \"w\" )\n for rad in range ( start_rad, end_rad+1 ) :\n self.updateIcos2 ( rad )\n cc = self.IcosCC ( smod, dmap )\n self.status ( \"Rad: %d, CC: %.4f\" % (rad, cc) )\n #fp.write ( \"%d\\t%f\\n\" % (rad, cc) )\n ccs.append ( [rad, cc] )\n\n #fp.close ()\n\n self.updateIcos2 ( rS )\n\n\n def save ( okay, dialog ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n self.umsg ( \"Saved CCs to: \" + path )\n f = open ( path, \"w\" )\n for rad,cc in ccs :\n f.write ( \"%d\\t%f\\n\" % (rad, cc) )\n f.close()\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Cross Correlations',\n filters = [('TXT', '*.txt', '.txt')],\n initialfile = \"rad_cc.txt\", command = save )\n\n\n\n\n\n\n\n def IcosCC ( self, smod, dmap ) :\n #newv = numpy.zeros_like ( verts )\n numv = len(smod.surfacePieces[0].geometry[0]) * len(smod.surfacePieces)\n #print \"%d verts, %d sps, %d points\" % ( len(smod.surfacePieces[0].geometry[0]), len(smod.surfacePieces), numv )\n newv = numpy.zeros ( (numv,3) )\n\n for spi, sp in enumerate ( smod.sps ) :\n verts, tris = sp.geometry\n v0 = spi * len(smod.surfacePieces[0].geometry[0])\n v1 = v0 + len(smod.surfacePieces[0].geometry[0])\n newv[v0:v1] = verts\n\n #print newv\n map_values = dmap.interpolated_values ( newv, dmap.openState.xform )\n #print map_values\n\n olap, cor = FitMap.overlap_and_correlation ( numpy.ones_like(map_values), map_values )[:2]\n #print olap, cor\n return cor\n\n\n def set_rad_changed_cb ( self, newRad ) :\n\n #print newRad\n try :\n nrad = int ( newRad )\n self.radius.set ( nrad )\n except :\n pass\n\n\n def radius_changed_cb(self, newRad) :\n\n #radius = self.radius.value(1000)\n #print \"Radius: \", newRad\n #self.setRad.set ( newRad )\n\n\n radius = int ( newRad )\n\n self.updateIcos2 ( radius )\n\n\n\n def updateIcos2 ( self, rad ) :\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n if smod == None :\n #self.umsg ( \"No Icosahedron2 model found\" )\n return\n\n verts, tris = smod.icosVerts0, smod.icosTris\n p1 = verts [ tris[0][0] ]\n p2 = verts [ tris[0][1] ]\n p3 = verts [ tris[0][2] ]\n mp = (p1 + p2 + p3) / 3.0\n rad0 = numpy.sqrt ( numpy.sum(p1*p1) )\n rad1 = numpy.sqrt ( numpy.sum(mp*mp) )\n\n fscale = rad / rad0\n sphf = 1.0 - min ( rad, rad0 ) / rad0\n #self.status ( \"Rad: %.3f -- rad: %.3f, midRad: %.3f, f: %.3f\" % (rad, rad0, rad1, sphf) )\n\n for spi, sp in enumerate ( smod.surfacePieces ) :\n #sp0 = imod.surfacePieces[spi]\n verts, tris = sp.geometry\n\n if not hasattr ( sp, 'verts0' ) :\n sp.verts0 = verts\n #print \"set init verts\"\n\n #print \" - surface piece %d points %d tris, \" % (len(verts), len(tris)), sp.N\n newv = numpy.zeros_like ( verts )\n\n for vi, v in enumerate ( verts ) :\n iv = fscale * sp.verts0[vi]\n newv[vi] = iv\n #vv = v / numpy.sqrt ( numpy.sum (v*v) )\n #sv = vv * min ( rad, rad0 )\n #newv[vi] = sphf * sv + (1.0-sphf) * iv\n\n sp.geometry = (newv,tris)\n\n\n for vi, v in enumerate ( smod.icosVerts0 ) :\n smod.icosVerts[vi] = fscale * smod.icosVerts0[vi]\n\n #p1 = smod.icosVerts [ tris[0][0] ]\n #r = numpy.sqrt ( numpy.sum(p1*p1) )\n #p1 = smod.icosVerts0 [ tris[0][0] ]\n #r0 = numpy.sqrt ( numpy.sum(p1*p1) )\n #print \"Icos - rad %.4f, orig: %.4f\" % (r, r0)\n\n\n\n\n def GetMod ( self, name ) :\n\n for m in chimera.openModels.list() :\n if m.name == name :\n return m\n return None\n\n\n def MakeTNorms ( self, smod ) :\n\n self.umsg ( \"Making triangle norms for %d\" % len(smod.sps) )\n\n for spi, sp in enumerate ( smod.sps ) :\n\n verts2, tris2 = sp.geometry\n\n #sp.tdirs = [None] * len(tris2)\n sp.tdirs = numpy.zeros ( ( len(tris2), 3 ) )\n sp.tnorms = [None] * len(tris2)\n\n\n for ti, tri in enumerate ( tris2 ) :\n p1 = verts2 [ tri[0] ]\n p2 = verts2 [ tri[1] ]\n p3 = verts2 [ tri[2] ]\n mp = (p1 + p2 + p3) / 3.0\n l = numpy.sqrt ( numpy.sum(mp*mp) )\n sp.tdirs[ti] = mp / l\n\n v1 = p2 - p1\n v2 = p3 - p1\n N = numpy.cross ( v1, v2 )\n l = numpy.sqrt ( numpy.sum(N*N) )\n sp.tnorms [ti] = N / l\n\n\n\n def MinRad2 ( self, smod ) :\n minr = 1e9\n for sp in smod.surfacePieces :\n verts2, tris2 = sp.geometry\n for v in verts2 :\n r = numpy.sum ( v * v )\n if r < minr :\n minr = r\n #return numpy.sqrt ( minr )\n return minr\n\n def MaxRad2 ( self, smod ) :\n maxr = 0\n for sp in smod.surfacePieces :\n verts2, tris2 = sp.geometry\n for v in verts2 :\n r = numpy.sum ( v * v )\n if r > maxr :\n maxr = r\n #return numpy.sqrt ( maxr )\n return maxr\n\n\n def PIsOutside ( self, p, smod ) :\n\n #print \"pt - %d surfps\" % len(surfm.surfacePieces)\n\n #min_i = 0\n #max_d = -1e7\n #max_n = None\n #for nvi, nv in enumerate ( smod.nvecs ) :\n # d = numpy.dot ( p, nv )\n # if d > max_d :\n # min_i = nvi\n # max_d = d\n # max_n = nv\n\n max_i = numpy.argmax ( numpy.sum ( smod.nvecs * p, axis = 1 ) )\n max_n = smod.nvecs [ max_i ]\n\n tri = smod.icosTris [ max_i ]\n\n p1 = smod.icosVerts [ tri[0] ]\n #p2 = smod.icosVerts [ tri[1] ]\n #p3 = smod.icosVerts [ tri[2] ]\n\n #v1 = p2 - p1\n #v2 = p3 - p1\n #N = numpy.cross ( v1, v2 )\n\n pv = p - p1\n d = numpy.dot ( pv, max_n )\n if d <= 0.0 :\n #print \" - inside the tri \", min_i\n return False\n\n #return True\n\n\n sp = smod.sps[max_i]\n\n #if sp.ind != min_i and not hasattr (sp, 'flagged') :\n # print sp.ind, \"?\"\n # sp.flagged = True\n\n\n verts2, tris2 = sp.geometry\n\n #if not hasattr ( sp, 'tdirs' ) :\n #sp.tdirs = [None] * len(tris2)\n #sp.tnorms = [None] * len(tris2)\n\n #min_i = 0\n #max_d = -1e7\n\n #for ti, tri in enumerate ( tris2 ) :\n # d = numpy.dot ( p, sp.tdirs[ti] )\n # if d > max_d :\n # max_d = d\n # min_i = ti\n\n\n max_i = numpy.argmax ( numpy.sum ( sp.tdirs * p, axis = 1 ) )\n\n tri = tris2[max_i]\n\n p1 = verts2 [ tri[0] ]\n pv = p - p1\n d = numpy.dot ( pv, sp.tnorms [max_i] )\n if d <= 0.0 :\n #print \" - inside the tri \", min_i\n return False\n\n\n return True\n\n\n\n\n def Icos2Map0 ( self ) :\n\n smod = self.GetMod ( \"Icosahedron Faces\" )\n if smod == None :\n self.umsg ( \"No Icosahedron2 model found\" )\n return\n\n\n dmap = segmentation_map()\n if dmap == None :\n self.umsg ( \"Select a map in Segment Map dialog\" )\n return\n\n\n sepRs = self.segRads.get().split(\",\")\n print \"Sep rads:\", sepRs\n\n if len(sepRs) != 2 :\n self.umsg ( \"Enter two radii separated by a comma\" )\n return\n\n\n\n try :\n start_rad = int ( sepRs[0] )\n except :\n self.umsg ( \"Invalid start radius: \" + sepRs[0] )\n return\n\n try :\n end_rad = int ( sepRs[1] )\n except :\n self.umsg ( \"Invalid end radius: \" + sepRs[1] )\n return\n\n\n if end_rad <= start_rad :\n self.umsg ( \"End rad should be larger than start rad :) \" )\n return\n\n\n self.umsg ( \"Mask %s, %d -> %d\" % (dmap.name,start_rad,end_rad) )\n\n self.MakeTNorms ( smod )\n\n\n import time\n start = time.time()\n\n mm = dmap.full_matrix ()\n #m1 = numpy.zeros_like ( mm )\n\n # transform to index reference frame of ref_map\n f1 = dmap.data.ijk_to_xyz_transform\n\n from _contour import affine_transform_vertices as transform_vertices\n #f2 = xform_matrix ( mask_map.openState.xform )\n #f3 = xform_matrix ( ref_map.openState.xform.inverse() )\n #f4 = ref_map.data.xyz_to_ijk_transform\n #tf = multiply_matrices( f2, f1 )\n #tf = multiply_matrices( f3, tf )\n #tf = multiply_matrices( f4, tf )\n\n nm = numpy.zeros_like ( mm )\n\n self.updateIcos2 ( start_rad )\n minr, maxr = self.MinRad2 ( smod ), self.MaxRad2 ( smod )\n print \" - start rad %d -- min rad %.1f, max rad %.1f\" % ( start_rad, numpy.sqrt(minr), numpy.sqrt(maxr))\n\n done = time.time()\n elapsed = done - start\n print \"Took: \", elapsed\n\n\n pt = numpy.array ( [[0,0,0]], numpy.float32 )\n p = pt[0]\n\n for i in range ( dmap.data.size[0] ) :\n self.status ( \"Masking %s, outside radius %d, %d/%d\" % (dmap.name, start_rad, i+1, dmap.data.size[0]) )\n p[0] = i * f1[0][0] + f1[0][3]\n for j in range ( dmap.data.size[1] ) :\n p[1] = j * f1[1][1] + f1[1][3]\n for k in range ( dmap.data.size[2] ) :\n \t#p[2] = k * f1[2][2] + f1[2][3]\n #pt = numpy.array ( [[i,j,k]], numpy.float32 )\n #p[0],p[1],p[2] = ti,tj,tk\n #transform_vertices ( pt, f1 )\n\t\t\t\t\tp[2] = k * f1[2][2] + f1[2][3]\n\t\t\t\t\tptr = numpy.sum ( p*p )\n\t\t\t\t\tif ptr < minr :\n\t\t\t\t\t pass\n\t\t\t\t\telif ptr > maxr :\n\t\t\t\t\t nm[k,j,i] = mm[k,j,i]\n\t\t\t\t\telif self.PIsOutside ( pt[0], smod ) :\n\t\t\t\t\t nm[k,j,i] = mm[k,j,i]\n\n\n self.updateIcos2 ( end_rad )\n minr, maxr = self.MinRad2 ( smod ), self.MaxRad2 ( smod )\n print \" - end rad %d -- min rad %.1f, max rad %.1f\" % (start_rad, numpy.sqrt(minr), numpy.sqrt(maxr))\n\n for i in range ( dmap.data.size[0] ) :\n self.status ( \"Masking %s, inside radius %d, %d/%d\" % (dmap.name, end_rad, i+1, dmap.data.size[0]) )\n p[0] = i * f1[0][0] + f1[0][3]\n for j in range ( dmap.data.size[1] ) :\n p[1] = j * f1[1][1] + f1[1][3]\n for k in range ( dmap.data.size[2] ) :\n #pt = numpy.array ( [[i,j,k]], numpy.float32 )\n #p[0],p[1],p[2] = ti,tj,tk\n #transform_vertices ( pt, f1 )\n\t\t\t\t\tp[2] = k * f1[2][2] + f1[2][3]\n\t\t\t\t\tptr = numpy.sum ( p*p )\n\t\t\t\t\tif ptr < minr :\n\t\t\t\t\t continue\n\t\t\t\t\telif ptr > maxr :\n\t\t\t\t\t nm[k,j,i] = 0.0\n\t\t\t\t\telif self.PIsOutside ( p, smod ) :\n\t\t\t\t\t nm[k,j,i] = 0.0\n\n\n\n\n ndata = VolumeData.Array_Grid_Data ( nm, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n try : nvg = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nvg = VolumeViewer.volume.volume_from_grid_data ( ndata )\n nvg.name = dmap.name + \"__%d--to--%d_fast\" % (start_rad, end_rad)\n\n done = time.time()\n elapsed = done - start\n print \"Took: \", elapsed\n\n\n def Icos2Map0 ( self ) :\n\n dmap = segmentation_map()\n if dmap == None :\n self.umsg ( \"Select a map in Segment Map dialog\" )\n return\n\n\n mm = dmap.full_matrix ()\n #m1 = numpy.zeros_like ( mm )\n\n # transform to index reference frame of ref_map\n f1 = dmap.data.ijk_to_xyz_transform\n\n nm = numpy.zeros_like ( mm )\n\n minr, maxr = 300, 400\n pt = numpy.array ( [[0,0,0]], numpy.float32 )\n p = pt[0]\n\n im, jm, km = dmap.data.size[0]/2, dmap.data.size[1]/2, dmap.data.size[2]/2\n\n for i in range ( dmap.data.size[0] ) :\n self.status ( \"Masking %s %.1f->%.1f, %d/%d\" % (dmap.name, minr, maxr, i+1, dmap.data.size[0]) )\n di = abs(i-im) * dmap.data.step[0]\n\n for j in range ( dmap.data.size[1] ) :\n dj = abs(j-jm) * dmap.data.step[1]\n\n for k in range ( dmap.data.size[2] ) :\n dk = abs(k-km) * dmap.data.step[2]\n r = numpy.sqrt ( di*di + dj*dj + dk*dk )\n if dk >= minr and dk < maxr :\n nm[k,j,i] = mm[k,j,i]\n\n\n ndata = VolumeData.Array_Grid_Data ( nm, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n try : nvg = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nvg = VolumeViewer.volume.volume_from_grid_data ( ndata )\n nvg.name = dmap.name + \"__%.0f--to--%.0f\" % (minr, maxr)\n\n\n\n\n\n def Segment2 ( self ) :\n\n dmap = segmentation_map()\n if dmap == None :\n self.umsg ( \"Please select a map in the Segment Map Dialog\" )\n return\n\n smod = current_segmentation ()\n if smod == None :\n self.umsg ( \"Please select a Current Segmentation in the Segment Map dialog\" )\n return\n\n print \"Seg has %d regions\" % (len(smod.regions))\n\n\n imod2 = self.GetMod ( \"Icosahedron Faces\" )\n if imod2 == None :\n self.umsg ( \"No Icosahedron2 model found\" )\n return\n\n\n sepRs = []\n for rstr in self.segRads.get().split(\",\") :\n try :\n radv = float(rstr)\n except :\n self.umsg ( \"Error parsing distances; enter only numbers and commas\" )\n return\n\n sepRs.append ( radv )\n\n print \"Sep rads:\", sepRs\n regs = list(smod.regions)\n sregs = []\n\n f1 = dmap.data.ijk_to_xyz_transform\n from _contour import affine_transform_vertices as transform_vertices\n\n\n self.MakeTNorms ( imod2 )\n\n\n for i, srad in enumerate ( sepRs ) :\n\n self.umsg ( \"Segmenting using %s - rad %.1f - %d regs\" % ( imod2.name, srad, len(regs) ) )\n self.updateIcos2 ( srad )\n\n gregs, left_regs = [], []\n\n for ri, r in enumerate ( regs ) :\n\n p = r.max_point\n #pt = numpy.array ( [ [ p[2],p[1],p[0] ] ], numpy.float32 )\n pt = numpy.array ( [ [ p[2],p[1],p[0] ] ], numpy.float32 )\n transform_vertices ( pt, f1 )\n\n c = r.center_of_points()\n ptc = numpy.array ( [ c ], numpy.float32 )\n\n #print ri, p, c, pt[0]\n #return\n\n if self.PIsOutside ( ptc[0], imod2 ) :\n #print \" - outside\"\n left_regs.append ( r )\n else :\n #print \" - inside\"\n gregs.append ( r )\n\n if ri % 1000 == 0 :\n self.status ( \"Segmenting using %s - rad %.1f - %s/%s regs\" % ( imod2.name, srad, \"{:,}\".format(ri), \"{:,}\".format(len(regs)) ) )\n\n\n sregs.append ( gregs )\n regs = left_regs\n print \" - rad %.1f - %d regions inside\" % ( srad, len(gregs) )\n\n print \" - remaining %d regions\" % ( len(regs) )\n sregs.append ( regs )\n\n\n for i, regs in enumerate (sregs) :\n self.status ( \"Segmenting, layer %d - %d regs\" % (i, len(regs)) )\n if len(regs) > 1 :\n try :\n smod.join_regions ( regs )\n except :\n self.umsg ( \"An error occurred - regions may have changed - please start again.\" )\n smod.display_regions()\n return\n\n smod.display_regions()\n\n self.umsg ( \"Done, created %d groups based on radial distances\" % len(sregs) )\n\n from segment_dialog import volume_segmentation_dialog\n volume_segmentation_dialog().ReportRegionCount ( smod )\n\n\n\n\n\n\n def LineCC ( self ) :\n\n dmap = segmentation_map()\n if dmap == None :\n umsg ( \"No map selected\" )\n return\n\n\n from chimera import Molecule\n mlist = OML(modelTypes = [Molecule])\n if len(mlist) == 0 :\n umsg ( \"No molecule found\" )\n return\n\n mol = mlist[0]\n\n print \"Doing line CC in \" + dmap.name + \" using mol \" + mol.name\n\n print dmap.openState.xform\n print mol.openState.xform\n\n\n rccs = []\n rmap = None\n rmap_pos = None\n rpoints, rpoint_weights = None, None\n xf = None\n\n resolution = 10.0\n\n for ri, res in enumerate ( mol.residues ) :\n try :\n cat = res.atomsMap[\"CA\"][0]\n except :\n continue\n\n if rmap == None :\n rmap = makeMap ( \"#%d:%d@CA\" % (mol.id, res.id.position)\n , resolution, 1, (.5, .5, .5, 1.0), \"resmap\" )\n rmap_pos = cat.coord().toVector()\n print \" - sphere map pos \", rmap_pos\n #rpoints, rpoint_weights = fit_points (rmap)\n rpoints, rpoint_weights = fit_points_old (rmap)\n xf = rmap.openState.xform\n\n break\n\n\n for radi in range ( 0, 1300, 1 ) :\n\n #d = cat.coord() - rmap_pos\n d = chimera.Vector(0,0,radi) - rmap_pos\n #print chimera.Vector(0,0,radi)\n trx = chimera.Xform.translation ( d )\n #xf = dmap.openState.xform.inverse\n xf2 = xf.__copy__()\n xf2.multiply ( trx )\n\n rmap.openState.xform = xf2\n break\n\n if 1 :\n rmap_values = dmap.interpolated_values ( rpoints, xf2 )\n olap, corr = overlap_and_correlation ( rpoint_weights, rmap_values )\n\n if radi % 100 == 0 :\n print \" %d - overlap: %f, cross-correlation: %f\" % (radi, olap, corr)\n\n rccs.append ( [radi,corr] )\n #print corr,\n\n #chimera.openModels.close ( rmap )\n\n\n fp = open ( \"lineCC.txt\", \"w\" )\n for rad, cc in rccs :\n fp.write ( \"%d\\t%f\\n\" % (rad, cc) )\n\n fp.close ()\n\n\n\ndef overlap_and_correlation ( v1, v2 ):\n import FitMap\n olap, cor = FitMap.overlap_and_correlation ( v1, v2 )[:2]\n return olap, cor\n\n\n\n\n\ndef fit_points_old ( fmap, threshold = None ) :\n\n f_m = fmap.data.full_matrix();\n size = list(f_m.shape);\n size.reverse()\n\n points = VolumeData.grid_indices(size, numpy.single) # i,j,k indices\n _contour.affine_transform_vertices( points, fmap.data.ijk_to_xyz_transform )\n weights = numpy.ravel(f_m).astype(numpy.single)\n\n threshold = fmap.surface_levels[0]\n #threshold = .3 * max ( numpy.ravel(f_m).astype(numpy.single) )\n\n ge = numpy.greater_equal(weights, threshold)\n points = numpy.compress(ge, points, 0)\n weights = numpy.compress(ge, weights)\n nz = numpy.nonzero( weights )[0]\n\n if len(nz) < len (weights) :\n points = numpy.take( points, nz, axis=0 )\n weights = numpy.take(weights, nz, axis=0)\n\n #mass = numpy.sum(weights, dtype=numpy.single)\n #fmap.rotation_center = numpy.dot(weights,points) / mass\n\n if 1 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return points, weights\n\n\n\ndef makeMap ( sel_str, res, gridSpacing, clr, map_name ) :\n\n cmd = \"molmap %s %.3f sigmaFactor 0.187 gridSpacing %.3f replace false\" % (\n sel_str, res, gridSpacing )\n #print \">>>\", cmd\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = map_name\n if 0 :\n #print \" - saving to:\", map_name\n mv.write_file ( map_name, \"mrc\" )\n xf = mv.openState.xform\n #print \" - closing:\", map_name\n chimera.openModels.close ( mv )\n mv = VolumeViewer.open_volume_file ( map_name )[0]\n #print \" - opened:\", mv.name\n mv.openState.xform = xf\n break\n\n if mv == None :\n umsg (\"Map not generated.\")\n return\n\n mv.surface_levels[0] = 0.001\n\n ro = VolumeViewer.volume.Rendering_Options()\n mv.update_surface ( False, ro )\n for sp in mv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n sp.color = ( clr[0], clr[1], clr[2], clr[3] )\n\n return mv\n\n\n\n\ndef show_dialog (closeOld = True):\n\n from chimera import dialogs\n\n d = dialogs.find ( ISeg_Dialog.name, create=False )\n if d :\n if closeOld :\n d.toplevel_widget.update_idletasks ()\n d.Close()\n d.toplevel_widget.update_idletasks ()\n else :\n return d\n\n dialogs.register ( ISeg_Dialog.name, ISeg_Dialog, replace = True)\n\n d = dialogs.find ( ISeg_Dialog.name, create=True )\n # Avoid transient dialog resizing when created and mapped for first time.\n d.toplevel_widget.update_idletasks ()\n d.enter()\n\n return d\n\n\n\n# -----------------------------------------------------------------------------\n#\n", "id": "11790961", "language": "Python", "matching_score": 11.176777839660645, "max_stars_count": 6, "path": "Segger/iseg_dialog.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport VolumeViewer\nfrom sys import stderr\nfrom time import clock\nimport sets\nimport FitMap\n\nfrom axes import prAxes\nimport regions\nimport graph\nfrom Segger import dev_menus, timing, seggerVersion\n\nOML = chimera.openModels.list\n\nREG_OPACITY = 0.45\n\n\n# http://geomalgorithms.com/a06-_intersect-2.html\n\n\n\nfrom segment_dialog import current_segmentation, segmentation_map\n\n\nclass RSeg_Dialog ( chimera.baseDialog.ModelessDialog ):\n\n title = \"rSeg - Radial Segmentation (Segger v\" + seggerVersion + \")\"\n name = \"segger_rseg\"\n buttons = ( \"Close\" )\n help = 'https://cryoem.slac.stanford.edu/ncmi/resources/software/segger'\n\n def fillInUI(self, parent):\n\n self.group_mouse_mode = None\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n parent.columnconfigure(0, weight = 1)\n\n row = 0\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n l = Tkinter.Label(f, text=' ')\n l.grid(column=0, row=row, sticky='w')\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"1. Select map in Segment Map dialog, press Segment button.\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"2. Optional - for icosahedral (not round) shells: \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" A. Use Tools -> Higher-Order Structure -> Icosahedron Surface.\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" B. Match Icosahedron to current map & segmentation.\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" C. From Icosahedron Surface \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Find Axes\", command=self.Icos)\n b.grid (column=1, row=0, sticky='w', padx=5, pady=1)\n\n if dev_menus :\n\n b = Tkinter.Button(ff, text=\"Line CC\", command=self.LineCC)\n b.grid (column=2, row=0, sticky='w', padx=5, pady=1)\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"3. Make histogram of distances from center of map to center of each region,\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" using\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n self.numBins = Tkinter.StringVar(ff)\n self.numBins.set ( \"600\" )\n e = Tkinter.Entry(ff, width=10, textvariable=self.numBins)\n e.grid(column=1, row=0, sticky='w', padx=5, pady=1)\n\n l = Tkinter.Label(ff, text = \"bins\", anchor = 'w')\n l.grid(column=2, row=0, sticky='ew', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Make Histogram\", command=self.MakeHist)\n b.grid (column=3, row=0, sticky='w', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"4. Plot histogram (e.g. using plot.ly), find distances with low values.\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"5. Enter distances at which to separate regions, separated by commas:\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n self.segRads = Tkinter.StringVar(ff)\n\n if 0 or dev_menus :\n self.segRads.set ( \"1006\" )\n\n e = Tkinter.Entry(ff, width=40, textvariable=self.segRads)\n e.grid(column=1, row=0, sticky='w', padx=5, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"Group\", command=self.Segment)\n b.grid (column=2, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n l = Tkinter.Label(f, text=' ')\n l.grid(column=0, row=row, sticky='w')\n\n\n row += 1\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=7, pady=7, sticky='we')\n\n\n row = row + 1\n self.msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg=\"red\")\n self.msg.grid(column=0, row=row, sticky='ew', padx=5, pady=1)\n row += 1\n\n\n def umsg ( self, txt ) :\n print txt\n self.status ( txt )\n\n def status ( self, txt ) :\n txt = txt.rstrip('\\n')\n self.msg.configure(text = txt)\n self.msg.update_idletasks()\n\n\n\n\n def Icos ( self ) :\n\n imod = None\n axmod = None\n for m in chimera.openModels.list() :\n if m.name == \"Icosahedron\" :\n imod = m\n if m.name == \"Icosahedron_Axes\" :\n axmod = m\n\n\n if axmod == None :\n pass\n else :\n chimera.openModels.close ( [axmod] )\n\n\n if imod == None :\n self.umsg ( \"No Icosahedron model found - please follow step 2.\" )\n return\n\n\n if len(imod.surfacePieces) <> 1 :\n self.umsg ( \"Please set 'Subdivision factor' to 1\" )\n return\n\n\n print len(imod.surfacePieces[0].geometry[1]), \" tris\"\n print len(imod.surfacePieces[0].geometry[0]), \" verts\"\n\n if len(imod.surfacePieces[0].geometry[1]) <> 20 :\n self.umsg ( \"Please set 'Subdivision factor' to 1\" )\n return\n\n\n self.umsg ( \"Building axes...\" )\n\n\n import _surface\n surf_mod = _surface.SurfaceModel()\n chimera.openModels.add([surf_mod], sameAs = imod)\n\n import axes; reload (axes)\n\n self.icos_vecs = []\n from numpy import arccos, pi\n\n\n\n for p in imod.surfacePieces :\n v, t = p.geometry[0], p.geometry[1]\n #print len(v), len(t)\n\n #for pt in v :\n # print \" - pt: \", pt\n\n for tri in t :\n #print \" - tri: \", tri,\n p1 = v [ tri[0] ]\n p2 = v [ tri[1] ]\n p3 = v [ tri[2] ]\n mp = (p1 + p2 + p3) / 3.0\n pv = chimera.Vector ( mp[0], mp[1], mp[2] )\n r = pv.length\n pv.normalize()\n #print mp\n self.icos_vecs.append ( pv )\n\n cyl = axes.AddCylinderSolid ( chimera.Vector(0,0,0), pv, r, (.6,.4,.4,1), 10.0, surf_mod )\n cyl.name = \"Icosahedron_Axes\"\n\n p1v = chimera.Vector ( p1[0], p1[1], p1[2] ); p1v.normalize ()\n p2v = chimera.Vector ( p2[0], p2[1], p2[2] ); p2v.normalize ()\n p3v = chimera.Vector ( p3[0], p3[1], p3[2] ); p3v.normalize ()\n\n a1 = arccos ( p1v * pv ) * 180.0 / pi\n a2 = arccos ( p2v * pv ) * 180.0 / pi\n a3 = arccos ( p3v * pv ) * 180.0 / pi\n\n a12 = arccos ( p1v * p2v ) * 180.0 / pi\n\n # print a1, a2, a3, a12\n\n\n minAng = 1e9\n pv1 = self.icos_vecs[0]\n for pv2 in self.icos_vecs[1:] :\n dp = pv1 * pv2\n ang = arccos ( dp )\n #print ang * 180.0 / pi\n\n self.umsg ( \"Axes built.\" )\n\n\n\n\n def MakeHist ( self ) :\n\n segMap = segmentation_map()\n if segMap == None :\n self.umsg ( \"Please select a map in the Segment Map Dialog\" )\n return\n\n import axes\n reload(axes)\n pts, weights = axes.map_points ( segMap )\n print len(pts)\n\n COM, U, S, V = prAxes ( pts )\n\n print \" - COM : \", COM\n\n\n smod = current_segmentation ()\n if smod == None :\n self.umsg ( \"Please select a Current Segmentation in the Segment Map dialog\" )\n return\n\n print \"Seg has %d regions\" % (len(smod.regions))\n\n\n if hasattr(self, 'icos_vecs') :\n self.umsg ( \"Making (icosahedrally corrected) histogram...\" )\n else :\n self.umsg ( \"Making histogram...\" )\n\n nregs, last = len(smod.regions), 0\n regs = list(smod.regions)\n distByReg = {}\n for ri, r in enumerate ( regs ) :\n\n if 0 and r.surface_piece != None :\n if r.surface_piece.display == False :\n print \"i\" + ri,\n continue\n try :\n p = r.center_of_points ()\n except :\n print \"+\"\n continue\n\n rvec = chimera.Vector ( p[0], p[1], p[2] ) - chimera.Vector (COM[0], COM[1], COM[2])\n rad = 0.0\n\n if hasattr(self, 'icos_vecs') :\n for ivec in self.icos_vecs :\n irad = ivec * rvec\n if irad > rad :\n rad = irad\n else :\n rad = rvec.length\n\n\n distByReg[r] = rad\n at = int(numpy.floor( 10.0 * (ri+1) / nregs ))\n if at > last :\n #print at,\n if hasattr(self, 'icos_vecs') :\n self.status ( \"Making (icosahedrally corrected) histogram %d regions, at %d\" % (len(regs), ri+1) )\n else :\n self.status ( \"Making histogram %d regions, at %d\" % (len(regs), ri+1) )\n last = at; at += 1\n\n print \"\"\n\n dists = distByReg.values ()\n maxDist = max (dists) + 0.01\n minDist = min (dists)\n nbins = int ( self.numBins.get() )\n dr = (maxDist - minDist) / float(nbins)\n print \"%d dists - max %.2f, min %.2f, nb %d, dr %.2f\" % (len(dists), maxDist, minDist, nbins, dr)\n\n bins = []\n for i in range (nbins) :\n bins.append ( [] )\n\n print \"bad bins: \",\n for regm, rad in distByReg.iteritems() :\n bini = int ( numpy.floor ( (rad - minDist) / dr ) )\n if bini >= len(bins) :\n print bini,\n bini = len(bins)-1\n bins[bini].append (regm)\n\n print \"\"\n\n\n\n if 0 :\n f = open ( \"rads.txt\", \"w\" )\n for k,regs in enumerate ( bins ) :\n v = len(regs)\n vmin = minDist + k * dr\n vmax = minDist + (k+1) * dr\n rm = .5 * (vmin + vmax)\n vn = v / (4 * 3.14 * rm * rm)\n f.write ( \"%d\\t%.2f\\t%.2f\\t%d\\t%f\\n\" % (k, vmin, vmax, v, vn) )\n f.close()\n\n self.distByReg = distByReg\n #print self.distByReg\n\n\n\n def save ( okay, dialog ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n self.umsg ( \"Saved plot to: \" + path )\n f = open ( path, \"w\" )\n for k,regs in enumerate ( bins ) :\n v = len(regs)\n vmin = minDist + k * dr\n vmax = minDist + (k+1) * dr\n rm = .5 * (vmin + vmax)\n vn = v / (4 * 3.14 * rm * rm)\n f.write ( \"%.2f,%d\\n\" % (vmin, v) )\n f.close()\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Histogram',\n filters = [('TXT', '*.txt', '.txt')],\n initialfile = \"dist_hist.txt\", command = save )\n\n\n\n\n def Segment ( self ) :\n\n segMap = segmentation_map()\n if segMap == None :\n self.umsg ( \"Please select a map in the Segment Map Dialog\" )\n return\n\n smod = current_segmentation ()\n if smod == None :\n self.umsg ( \"Please select a Current Segmentation in the Segment Map dialog\" )\n return\n\n print \"Seg has %d regions\" % (len(smod.regions))\n\n\n print \"Seg rads:\", self.segRads.get()\n\n\n if hasattr(self, 'distByReg') :\n print \"Found distByReg\"\n else :\n self.umsg ( \"Make Histogram first.\" )\n return\n\n\n\n sepRs = []\n for rstr in self.segRads.get().split(\",\") :\n try :\n radv = float(rstr)\n except :\n self.umsg ( \"Error parsing distances; enter only numbers and commas\" )\n return\n\n sepRs.append ( radv )\n sepRs.append ( 1e99 )\n\n\n self.umsg ( \"Segmenting...\" )\n\n\n print \"Sep rads:\", sepRs\n sregs = []\n for r in sepRs :\n sregs.append ( [] )\n\n for reg, rad in self.distByReg.iteritems() :\n #if reg.surface_piece != None :\n # if reg.surface_piece.display == False :\n # continue\n\n minRad = 0.0\n for i, maxRad in enumerate ( sepRs ) :\n if rad > minRad and rad <= maxRad :\n sregs[i].append ( reg )\n break\n\n for i, regs in enumerate (sregs) :\n print \"%d - %d regs\" % (i, len(regs))\n if len(regs) > 1 :\n try :\n smod.join_regions ( regs )\n except :\n self.umsg ( \"An error occurred - regions may have changed - please start again.\" )\n smod.display_regions()\n return\n\n smod.display_regions()\n\n self.umsg ( \"Done, created %d groups based on radial distances\" % len(sregs) )\n\n from segment_dialog import volume_segmentation_dialog\n volume_segmentation_dialog().ReportRegionCount ( smod )\n\n\n\n\n def GetMod ( self, name ) :\n\n for m in chimera.openModels.list() :\n if m.name == name :\n return m\n return None\n\n\n\n\n def LineCC ( self ) :\n\n dmap = segmentation_map()\n if dmap == None :\n umsg ( \"No map selected\" )\n return\n\n\n from chimera import Molecule\n mlist = OML(modelTypes = [Molecule])\n if len(mlist) == 0 :\n umsg ( \"No molecule found\" )\n return\n\n mol = mlist[0]\n\n print \"Doing line CC in \" + dmap.name + \" using mol \" + mol.name\n\n print dmap.openState.xform\n print mol.openState.xform\n\n\n rccs = []\n rmap = None\n rmap_pos = None\n rpoints, rpoint_weights = None, None\n xf = None\n\n resolution = 10.0\n\n for ri, res in enumerate ( mol.residues ) :\n try :\n cat = res.atomsMap[\"CA\"][0]\n except :\n continue\n\n if rmap == None :\n rmap = makeMap ( \"#%d:%d@CA\" % (mol.id, res.id.position)\n , resolution, 1, (.5, .5, .5, 1.0), \"resmap\" )\n rmap_pos = cat.coord().toVector()\n print \" - sphere map pos \", rmap_pos\n #rpoints, rpoint_weights = fit_points (rmap)\n rpoints, rpoint_weights = fit_points_old (rmap)\n xf = rmap.openState.xform\n\n break\n\n\n for radi in range ( 0, 1300, 1 ) :\n\n #d = cat.coord() - rmap_pos\n d = chimera.Vector(0,0,radi) - rmap_pos\n #print chimera.Vector(0,0,radi)\n trx = chimera.Xform.translation ( d )\n #xf = dmap.openState.xform.inverse\n xf2 = xf.__copy__()\n xf2.multiply ( trx )\n\n rmap.openState.xform = xf2\n break\n\n if 1 :\n rmap_values = dmap.interpolated_values ( rpoints, xf2 )\n olap, corr = overlap_and_correlation ( rpoint_weights, rmap_values )\n\n if radi % 100 == 0 :\n print \" %d - overlap: %f, cross-correlation: %f\" % (radi, olap, corr)\n\n rccs.append ( [radi,corr] )\n #print corr,\n\n #chimera.openModels.close ( rmap )\n\n\n fp = open ( \"lineCC.txt\", \"w\" )\n for rad, cc in rccs :\n fp.write ( \"%d\\t%f\\n\" % (rad, cc) )\n\n fp.close ()\n\n\n\ndef overlap_and_correlation ( v1, v2 ):\n import FitMap\n olap, cor = FitMap.overlap_and_correlation ( v1, v2 )[:2]\n return olap, cor\n\n\n\n\n\ndef fit_points_old ( fmap, threshold = None ) :\n\n f_m = fmap.data.full_matrix();\n size = list(f_m.shape);\n size.reverse()\n\n points = VolumeData.grid_indices(size, numpy.single) # i,j,k indices\n _contour.affine_transform_vertices( points, fmap.data.ijk_to_xyz_transform )\n weights = numpy.ravel(f_m).astype(numpy.single)\n\n threshold = fmap.surface_levels[0]\n #threshold = .3 * max ( numpy.ravel(f_m).astype(numpy.single) )\n\n ge = numpy.greater_equal(weights, threshold)\n points = numpy.compress(ge, points, 0)\n weights = numpy.compress(ge, weights)\n nz = numpy.nonzero( weights )[0]\n\n if len(nz) < len (weights) :\n points = numpy.take( points, nz, axis=0 )\n weights = numpy.take(weights, nz, axis=0)\n\n #mass = numpy.sum(weights, dtype=numpy.single)\n #fmap.rotation_center = numpy.dot(weights,points) / mass\n\n if 1 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return points, weights\n\n\n\ndef makeMap ( sel_str, res, gridSpacing, clr, map_name ) :\n\n cmd = \"molmap %s %.3f sigmaFactor 0.187 gridSpacing %.3f replace false\" % (\n sel_str, res, gridSpacing )\n #print \">>>\", cmd\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = map_name\n if 0 :\n #print \" - saving to:\", map_name\n mv.write_file ( map_name, \"mrc\" )\n xf = mv.openState.xform\n #print \" - closing:\", map_name\n chimera.openModels.close ( mv )\n mv = VolumeViewer.open_volume_file ( map_name )[0]\n #print \" - opened:\", mv.name\n mv.openState.xform = xf\n break\n\n if mv == None :\n umsg (\"Map not generated.\")\n return\n\n mv.surface_levels[0] = 0.001\n\n ro = VolumeViewer.volume.Rendering_Options()\n mv.update_surface ( False, ro )\n for sp in mv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n sp.color = ( clr[0], clr[1], clr[2], clr[3] )\n\n return mv\n\n\n\n\ndef show_dialog (closeOld = True):\n\n from chimera import dialogs\n\n d = dialogs.find ( RSeg_Dialog.name, create=False )\n if d :\n if closeOld :\n d.toplevel_widget.update_idletasks ()\n d.Close()\n d.toplevel_widget.update_idletasks ()\n else :\n return d\n\n dialogs.register ( RSeg_Dialog.name, RSeg_Dialog, replace = True)\n\n d = dialogs.find ( RSeg_Dialog.name, create=True )\n # Avoid transient dialog resizing when created and mapped for first time.\n d.toplevel_widget.update_idletasks ()\n d.enter()\n\n return d\n\n\n\n# -----------------------------------------------------------------------------\n#\n", "id": "4342388", "language": "Python", "matching_score": 4.695650100708008, "max_stars_count": 6, "path": "Segger/rseg_dialog.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n# LICENCE - please see: https://opensource.org/licenses/MIT\n\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport VolumeViewer\nimport VolumeViewer.volume\nfrom sys import stderr\nfrom time import clock\nimport VolumeViewer\nimport json\nimport ttk\n\nOML = chimera.openModels.list\n\n\nREG_OPACITY = 0.45\n\n\ndef umsg ( txt ) :\n print txt\n status ( txt )\n\ndef status ( txt ) :\n txt = txt.rstrip('\\n')\n msg.configure(text = txt)\n msg.update_idletasks()\n\n\nclass BioMovie ( chimera.baseDialog.ModelessDialog ) :\n\n title = \"BioMovie 0.9.2\"\n name = \"BioMovie\"\n buttons = ( \"Close\" )\n help = 'https://cryoem.slac.stanford.edu/ncmi/resources/software/biomovie'\n\n\n def fillInUI(self, parent) :\n\n # these vars shoul be overwritted from the movie script\n self.framesPath = \"/Users/greg/dev/mol/chimera/frames/\"\n self.ffmpegPath = None\n self.scriptFun = None\n self.movieFormat = \".mov\"\n\n\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n\n row = 0\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n #f = Tkinter.Frame(parent)\n #f.grid(column=0, row=row, sticky='ew')\n\n #l = Tkinter.Label(f, text=' ')\n #l.grid(column=0, row=row, sticky='w')\n\n parent.columnconfigure(0, weight = 1)\n\n\n if 1 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n l = Tkinter.Label(ff, text=\" \")\n l.grid(column=0, row=0, sticky='w')\n\n c = Hybrid.Checkbutton(ff, 'Save', False )\n c.button.grid (column=1, row=0, sticky='w')\n self.makeMovie = c.variable\n\n c = Hybrid.Checkbutton(ff, 'Stop', False )\n c.button.grid (column=2, row=0, sticky='w')\n self.stopMovie = c.variable\n\n l = Tkinter.Label(ff, text=\" Movie Name: \")\n l.grid(column=3, row=0, sticky='w')\n\n self.movieName = Tkinter.StringVar(parent)\n self.movieName.set ( \"_movie_name\" )\n e = Tkinter.Entry(ff, width=24, textvariable=self.movieName)\n e.grid(column=4, row=0, sticky='w', padx=5, pady=5)\n\n b = Tkinter.Button(ff, text=\"Go\", command=self.Go)\n b.grid (column=5, row=0, sticky='w', padx=5)\n\n\n if 0 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n l = Tkinter.Label(ff, text=\" \")\n l.grid(column=0, row=0, sticky='w')\n\n b = Tkinter.Button(ff, text=\"Cycle\", command=self.Cycle)\n b.grid (column=1, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"FromInitPos\", command=self.FromInitPos)\n #b.grid (column=2, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Threshold\", command=self.CycleThr)\n b.grid (column=3, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Rot-X\", command=self.RotateX)\n b.grid (column=4, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Rot-Y\", command=self.RotateY)\n b.grid (column=5, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Rot-Z\", command=self.RotateZ)\n b.grid (column=6, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Rock\", command=self.Rock)\n b.grid (column=8, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"Conf\", command=self.Conf)\n #b.grid (column=9, row=0, sticky='w', padx=5)\n\n\n\n\n if 1 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='news')\n\n self.id_keyName = {}\n\n self.tree = ttk.Treeview(ff)\n\n #self.tree[\"columns\"]=(\"one\",\"two\",\"three\")\n self.tree.column(\"#0\", width=300, minwidth=100, stretch=Tkinter.YES)\n #self.tree.column(\"one\", width=150, minwidth=150, stretch=Tkinter.NO)\n #self.tree.column(\"two\", width=400, minwidth=200)\n #self.tree.column(\"three\", width=80, minwidth=50, stretch=Tkinter.NO)\n\n self.tree.heading(\"#0\",text=\"Views\",anchor=Tkinter.W)\n #self.tree.heading(\"one\", text=\"Date modified\",anchor=Tkinter.W)\n #self.tree.heading(\"two\", text=\"Type\",anchor=Tkinter.W)\n #self.tree.heading(\"three\", text=\"Size\",anchor=Tkinter.W)\n\n #self.tree.pack(side=Tkinter.TOP,fill=Tkinter.X)\n #self.tree.grid(column=0, row=0, sticky='nsew')\n #self.tree.pack(fill=Tkinter.BOTH, expand=1)\n #tree.place(x=0, y=0, relwidth=1, relheight=1)\n\n self.tree.grid(row = 0, column = 0, sticky='news')\n parent.columnconfigure(0, weight=1)\n parent.rowconfigure(row, weight = 1)\n ff.rowconfigure(0, weight = 1)\n ff.columnconfigure(0, weight=1)\n\n self.tree.bind('<<TreeviewSelect>>', self.select_mg_cb)\n self.tree.bind('<<TreeviewOpen>>', self.open_mg_cb)\n self.tree.bind('<<TreeviewClose>>', self.close_mg_cb)\n\n\n if 1 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n l = Tkinter.Label(ff, text=\" View Name: \")\n l.grid(column=0, row=0, sticky='w')\n\n self.keyName = Tkinter.StringVar(parent)\n self.keyName.set ( \"Side\" )\n e = Tkinter.Entry(ff, width=15, textvariable=self.keyName)\n e.grid(column=1, row=0, sticky='w', padx=5, pady=5)\n\n b = Tkinter.Button(ff, text=\"Save\", command=self.SetKey)\n b.grid (column=2, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"Apply\", command=self.ApplyKey)\n #b.grid (column=4, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Delete\", command=self.DeleteKey)\n b.grid (column=4, row=0, sticky='w', padx=5)\n\n #l = Tkinter.Label(ff, text=\" \")\n #l.grid(column=5, row=0, sticky='w')\n\n b = Tkinter.Button(ff, text=\"Load Views\", command=self.GetKeys)\n b.grid (column=6, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"Open\", command=self.OpenFiles)\n #b.grid (column=5, row=0, sticky='w', padx=5)\n\n # keys = views\n\n\n\n if 1 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n l = Tkinter.Label(ff, text=\" Move active models farther (+) or closer (-) by: \")\n l.grid(column=0, row=0, sticky='w')\n\n self.pushA = Tkinter.StringVar(parent)\n self.pushA.set ( \"10\" )\n e = Tkinter.Entry(ff, width=4, textvariable=self.pushA)\n e.grid(column=5, row=0, sticky='w', padx=5, pady=5)\n\n b = Tkinter.Button(ff, text=\"+\", command=self.Back10)\n b.grid (column=6, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"-\", command=self.Forward10)\n b.grid (column=7, row=0, sticky='w', padx=5)\n\n\n if 1 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n #l = Tkinter.Label(ff, text=\"Activate: \")\n #l.grid(column=3, row=0, sticky='w')\n\n b = Tkinter.Button(ff, text=\"Activate Sel\", command=self.ActivateSel)\n b.grid (column=8, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Activate All\", command=self.ActivateAll)\n b.grid (column=9, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Invert\", command=self.InvertSel)\n b.grid (column=10, row=0, sticky='w', padx=5)\n\n if 0 :\n l = Tkinter.Label(ff, text=\"Hide: \")\n l.grid(column=3, row=0, sticky='w')\n\n b = Tkinter.Button(ff, text=\"H-Sel\", command=self.HideSel)\n b.grid (column=10, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"H-All\", command=self.HideAll)\n b.grid (column=11, row=0, sticky='w', padx=5)\n\n\n b = Tkinter.Button(ff, text=\"Center on Sel\", command=self.ComSel)\n b.grid (column=12, row=0, sticky='w', padx=5)\n\n\n\n\n\n #row += 1\n #f = Tkinter.Frame(parent)\n #f.grid(column=0, row=row, sticky='ew')\n #l = Tkinter.Label(f, text=' ')\n #l.grid(column=0, row=row, sticky='w')\n\n\n row += 1\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=7, pady=7, sticky='we')\n\n\n row = row + 1\n global msg\n msg = Tkinter.Label(parent, width = 50, anchor = 'w', justify = 'left', fg=\"red\")\n msg.grid(column=0, row=row, sticky='ew')\n self.msg = msg\n row += 1\n\n\n self.GetKeys()\n\n\n\n\n def select_mg_cb (self, event):\n print \"Sel:\", self.tree.selection()\n print \"Focus:\", self.tree.focus()\n\n to = self.tree.focus()\n\n\n kname = self.id_keyName[to]\n print kname\n\n self.keyName.set (kname)\n self.ApplyKey ()\n\n\n\n def open_mg_cb (self, event):\n #print \"open\"\n #print self.tree.selection()\n #print self.tree.focus()\n pass\n\n def close_mg_cb (self, event):\n #print \"close\"\n #print self.tree.selection()\n #print self.tree.focus()\n pass\n\n\n\n def Go ( self ) :\n\n print \"Go - \"\n\n if self.scriptFun == None :\n umsg (\"No script set - run 'execfile ([path to script])' in IDLE first\")\n\n else :\n self.scriptFun()\n\n\n def GetModels (self) :\n\n self.oms = {}\n self.visOms = []\n self.allOms = []\n for mod in chimera.openModels.list () :\n om = AnimatableModel ()\n om.FromMod ( mod )\n self.oms[om.mod.name] = om\n self.allOms.append ( om )\n if mod.display :\n self.visOms.append ( om )\n\n print len(self.oms), \"amods,\", len(self.visOms), \"visible\"\n\n\n def GetMap (self, mname) :\n\n am = None\n for mod in chimera.openModels.list () :\n if mod.name == mname :\n if am != None :\n print \"WARNING: two models with same name found.\"\n return None\n om = AnimatableModel ()\n om.FromMod ( mod )\n am = om.FromMap()\n if am == None :\n print \"ERROR: asked for model with name %s, which was not found (i.e. is not an open model); returning a None object... This will likely cause an exception!\" % mname\n return am\n\n def GetMolecule (self, mname) :\n\n am = None\n for mod in chimera.openModels.list () :\n if mod.name == mname :\n if am != None :\n print \"WARNING: two models with same name %s found; will confuse movie script...\" % mname\n return None\n om = AnimatableModel ()\n om.FromMod ( mod )\n am = om.FromMol ()\n if am == None :\n print \"WARNING: asked for model with name %s, which was not found; returning a None object...\" % mname\n return am\n\n\n def GetMod (self, mname) :\n\n am = None\n for mod in chimera.openModels.list () :\n if mod.name == mname :\n if am != None :\n print \"WARNING: two models with same name found.\"\n return None\n om = AnimatableModel ()\n om.FromMod ( mod )\n\n if type(mod) == VolumeViewer.volume.Volume :\n am = om.FromMap()\n elif type(mod) == chimera.Molecule :\n am = om.FromMol()\n\n if am == None :\n print \"ERROR: asked for model with name %s, which was not found (i.e. is not an open model); returning a None object... This will likely cause an exception!\" % mname\n return am\n\n\n\n def GetVisMods (self) :\n\n self.visMods = []\n for mod in chimera.openModels.list () :\n if mod.display :\n om = AnimatableModel ()\n om.FromMod ( mod )\n self.visMods.append ( om )\n\n print len(self.visMods), \" visible\"\n\n\n\n def CycleThr ( self ) :\n\n self.ClearFrames ()\n self.GetAllAMods()\n\n dmap = self.oms[\"groel_r16.mrc\"].FromMap ()\n #mod = self.oms[\"c1_p.pdb\"].FromMol ()\n\n MOVIE = Movie ( self, \"groel_r16_thr\" )\n\n t = 0; d = 90;\n #t += d; d = 90\n\n #MOVIE.add ( Show (t, [dmap] ) )\n #MOVIE.add ( RotateMove (t, t+d, [dmap, mod], dmap.comv, [0,1,0], 360.0, [0,0,0], itype=\"linear\" ) )\n\n MOVIE.add ( VaryThr (t, t+d, dmap, 0.398, .184) )\n t+=d\n\n\n MOVIE.make ()\n\n\n\n def RotateZ ( self ) :\n\n self.ClearFrames ()\n self.GetAllAMods()\n\n\n #m1 = self.oms['groel_e16_5143.mrc'].FromMap ()\n #MOVIE = Movie ( self, \"groel_e16_rotate\" )\n\n\n mods = []\n dmap = None\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == VolumeViewer.volume.Volume :\n mp = self.oms[m.name].FromMap ()\n mods.append ( mp )\n if dmap == None :\n dmap = mp\n print \" --v \", m.name\n\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mods.append ( self.oms[m.name].FromMol () )\n print \" --m \", m.name\n\n #m1 = self.oms['groel_e4_6422.mrc'].FromMap ()\n #mm = self.oms['1xck_B.pdb'].FromMol ()\n\n MOVIE = Movie ( self, self.movieName.get() )\n\n\n t = 0; d = 360\n #MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [0,-1,0], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n MOVIE.add ( RotateM (t, t+d, mods, chimera.viewer.camera.center, [0,0,1], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n if 0 :\n t += d; d = 90\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [-1,0,0], 90.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n t += d; d = 360\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [0,0,1], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n t += d; d = 90\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [1,0,0], 90.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n\n MOVIE.make ()\n\n\n\n\n def RotateY ( self ) :\n\n self.ClearFrames ()\n self.GetAllAMods()\n\n\n #m1 = self.oms['groel_e16_5143.mrc'].FromMap ()\n #MOVIE = Movie ( self, \"groel_e16_rotate\" )\n\n\n mods = []\n dmap = None\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == VolumeViewer.volume.Volume :\n mp = self.oms[m.name].FromMap ()\n mods.append ( mp )\n if dmap == None :\n dmap = mp\n print \" --v \", m.name\n\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mods.append ( self.oms[m.name].FromMol () )\n print \" --m \", m.name\n\n #m1 = self.oms['groel_e4_6422.mrc'].FromMap ()\n #mm = self.oms['1xck_B.pdb'].FromMol ()\n\n MOVIE = Movie ( self, self.movieName.get() )\n\n #dm = self.oms[\"mt_threed_07symsf_08122019.hdf\"]\n #print dm.mod.name\n\n frameMul = 30\n\n t = 0; d = 10 * frameMul\n #MOVIE.add ( RotateM (t, t+d, mods, dm.comv, [0,-1,0], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n MOVIE.add ( RotateM (t, t+d, mods, chimera.viewer.camera.center, [0,-1,0], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n #MOVIE.add ( RotateMove (t, t+d, mods, dm, dm.comp, [0,-1,0], 360.0, [0,0,0], itype=\"linear\" ) )\n\n if 0 :\n t += d; d = 90\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [-1,0,0], 90.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n t += d; d = 360\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [0,0,1], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n t += d; d = 90\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [1,0,0], 90.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n\n MOVIE.make ()\n\n\n\n\n\n def RotateX ( self ) :\n\n self.ClearFrames ()\n self.GetAllAMods()\n\n\n #m1 = self.oms['groel_e16_5143.mrc'].FromMap ()\n #MOVIE = Movie ( self, \"groel_e16_rotate\" )\n\n\n mods = []\n dmap = None\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == VolumeViewer.volume.Volume :\n print \" --v \", m.name\n mp = self.oms[m.name].FromMap ()\n mods.append ( mp )\n if dmap == None :\n dmap = mp\n\n elif m.display == True and type(m) == chimera.Molecule :\n print \" --m \", m.name\n mods.append ( self.oms[m.name].FromMol () )\n\n elif m.display == True and type(m) == _surface.SurfaceModel :\n print \" --s \", m.name\n mods.append ( self.oms[m.name].FromSurf () )\n\n else :\n print \" - \", m.name, type(m)\n\n\n #m1 = self.oms['groel_e4_6422.mrc'].FromMap ()\n #mm = self.oms['1xck_B.pdb'].FromMol ()\n\n MOVIE = Movie ( self, self.movieName.get() )\n\n frameMul = 30\n\n t = 0; d = 10 * frameMul\n #MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [0,-1,0], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n MOVIE.add ( RotateM (t, t+d, mods, chimera.viewer.camera.center, [-1,0,0], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n if 0 :\n t += d; d = 90\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [-1,0,0], 90.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n t += d; d = 360\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [0,0,1], 360.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n t += d; d = 90\n MOVIE.add ( RotateM (t, t+d, mods, dmap.comv, [1,0,0], 90.0, itype=\"linear\") ) # chimera.viewer.camera.center\n\n\n MOVIE.make ()\n\n\n\n\n def RotateWithMods ( self ) :\n\n self.ClearFrames ()\n self.GetAllAMods()\n\n\n #m1 = self.oms['groel_e8_2221.mrc'].FromMap ()\n m1 = self.oms['groel_r8.mrc'].FromMap ()\n\n mols = []\n mols.append ( self.oms['1xck_A_f10.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f6.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f2.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f5.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f7.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f8.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f12.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f14.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f4.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f9.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f3.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f1.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f11.pdb'].FromMol () )\n mols.append ( self.oms['1xck_A_f13.pdb'].FromMol () )\n\n MOVIE = Movie ( self, \"groel_r8_rotate_mods\" )\n\n\n t = 0; d = 360\n MOVIE.add ( Hide(t, mols) )\n MOVIE.add ( RotateM (t, t+d, [m1]+mols, m1.comv, [0,1,0], 360.0, itype=\"linear\") )\n #t += d\n\n for ti in range ( 14 ) :\n MOVIE.add ( Show (t, [ mols[ti] ] ) )\n t += 25\n\n\n t += 60; d = 90\n MOVIE.add ( RotateM (t, t+d, [m1]+mols, m1.comv, [1,0,0], 90.0, itype=\"linear\") )\n\n t += d+60; d = 90\n MOVIE.add ( RotateM (t, t+d, [m1]+mols, m1.comv, [1,0,0], -90.0, itype=\"linear\") )\n t += d\n\n\n MOVIE.make ()\n\n\n\n def Rock ( self ) :\n\n self.ClearFrames ()\n self.GetAllAMods()\n\n mods = []\n dmap = []\n surfs = []\n\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == VolumeViewer.volume.Volume :\n mods.append ( self.oms[m.name].FromMap () )\n print \" --v \", m.name\n\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mods.append ( self.oms[m.name].FromMol () )\n print \" --m \", m.name\n\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == _surface.SurfaceModel :\n mods.append ( self.oms[m.name].FromSurf () )\n print \" --s \", m.name\n\n\n #m1 = self.oms['groel_e4_6422.mrc'].FromMap ()\n #mm = self.oms['1xck_B.pdb'].FromMol ()\n\n MOVIE = Movie ( self, self.movieName.get() )\n\n\n frameMul = 30\n\n t = 0; d = 10*frameMul\n\n c = chimera.viewer.camera.center\n p = mods[0].mod.openState.xform.inverse().apply ( chimera.Point(c[0],c[1],c[2]) )\n\n for i in range (1) :\n MOVIE.add ( Rock (t, t+d, mods, mods[0], p, [0,-1,0], 45.0, 1.0, itype=\"linear\") )\n t +=d\n\n\n MOVIE.make ()\n\n\n\n def SetKey ( self ) :\n\n K = self.keyName.get()\n umsg (\"Setting: \" + self.keyName.get())\n\n if not hasattr ( self, \"akeys\" ) :\n self.akeys = {}\n\n self.akeys[K] = {}\n\n for mod in chimera.openModels.list() :\n xf = mod.openState.xform\n self.akeys[K][mod.name] = Matrix.xform_matrix ( xf )\n #mod.kxf[K] = Matrix.xform_matrix ( xf )\n\n self.UpdateModKeys()\n\n self.WriteKeys()\n\n\n def KeysPath ( self ) :\n\n kpath = None\n\n # look for open maps first, get their folder\n for m in chimera.openModels.list() :\n if type(m) == VolumeViewer.volume.Volume :\n mdir, mpfile = os.path.split(m.data.path)\n kpath = os.path.join ( mdir, \"_views.txt\" )\n break\n\n if kpath != None :\n return kpath\n\n # otherwise try open models\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule :\n if hasattr ( m, 'openedAs' ) :\n path, molname = os.path.split ( m.openedAs[0] )\n kpath = os.path.join ( path, \"_views.txt\" )\n break\n\n return kpath\n\n\n\n def WriteKeys ( self ) :\n\n kpath = self.KeysPath()\n if kpath == None :\n return\n\n try :\n with open(kpath, 'w') as outfile:\n json.dump(self.akeys, outfile)\n except :\n #umsg ( \"did not save _keys.txt file\" )\n print \" - did not save _views.txt file\"\n return\n print ( \"_views.txt saved: \" + kpath )\n\n\n\n def GetKeys ( self ) :\n\n kpath = self.KeysPath()\n if kpath == None :\n return\n\n self.akeys = {}\n try :\n fin = open(kpath, 'r')\n self.akeys = json.load(fin)\n except :\n print \"no keys\"\n #print data\n\n self.UpdateModKeys ()\n\n print ( \"_views.txt loaded: \" + kpath )\n\n\n\n def UpdateModKeys ( self ) :\n\n self.tree.delete(*self.tree.get_children())\n self.id_keyName = {}\n\n if not hasattr ( self, \"akeys\" ) :\n self.akeys = {}\n return\n\n knames = self.akeys.keys()\n knames.sort()\n\n for kname in knames :\n kid = self.tree.insert(\"\", \"end\", \"\", text=kname)\n self.id_keyName[kid] = kname\n\n for mod in chimera.openModels.list() :\n mod.kxf = {}\n for kname in self.akeys.keys() :\n if mod.name in self.akeys[kname] :\n mod.kxf[kname] = self.akeys[kname][mod.name]\n else :\n if len(self.akeys[kname]) > 0 :\n mn = self.akeys[kname].keys()[0]\n mod.kxf[kname] = self.akeys[kname][mn]\n\n\n def DeleteKey ( self ) :\n\n fout = self.KeysPath()\n if fout == None :\n umsg ( \"Open a map/model before setting a key...\" )\n return\n\n\n to = self.tree.focus()\n\n if len(to) == 0 :\n umsg ( 'No view selected' )\n return\n\n K = self.id_keyName[to]\n umsg ( \"Deleting: \" + K )\n\n if not hasattr ( self, \"akeys\" ) :\n self.akeys = {}\n return\n\n if K in self.akeys :\n del self.akeys[K]\n\n self.UpdateModKeys ()\n self.WriteKeys()\n\n\n\n\n\n def ApplyKey ( self ) :\n\n\n K = self.keyName.get()\n print \" - applying key: \", self.keyName.get()\n\n if K in self.akeys :\n\n for mod in chimera.openModels.list() :\n if not hasattr ( mod, 'kxf' ) :\n print \" - \", mod.name, \"has no keys?\"\n else :\n if K in mod.kxf :\n mod.xf0 = chimera.Xform(mod.openState.xform)\n mod.xf1 = Matrix.chimera_xform ( mod.kxf[K] )\n #mod.openState.xform = Matrix.chimera_xform ( mod.kxf[K] )\n else :\n #print \" - \", mod.name, \" doesn't have key\", K\n pass\n\n\n else :\n umsg ( \"view \" + K + \" not found\" )\n\n self.InterpToKey ()\n\n\n\n def InterpToKey ( self ) :\n\n\n for mod in chimera.openModels.list() :\n\n if \" -surface.for.chain- \" in mod.name :\n continue\n\n if not hasattr ( mod, \"COM\" ) :\n if type(mod) == chimera.Molecule :\n sel = chimera.selection.OSLSelection ( \"#%d\" % mod.id )\n atoms = sel.atoms()\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = False )\n mod.COM, mod.U, mod.S, mod.V = prAxes ( points )\n mod.comp = chimera.Point ( mod.COM[0], mod.COM[1], mod.COM[2] )\n print \" %s (map) -- \" % mod.name, mod.comp\n elif type(mod) == VolumeViewer.volume.Volume :\n pts, weights = map_points ( mod )\n if len(pts) == 0 :\n pts, weights = map_points ( mod, False )\n mod.COM, mod.U, mod.S, mod.V = prAxes ( pts )\n mod.comp = chimera.Point ( mod.COM[0], mod.COM[1], mod.COM[2] )\n print \" %s (mol) -- \" % mod.name, mod.comp\n\n mod.fromPos = mod.xf0.apply ( mod.comp )\n mod.toPos = mod.xf1.apply ( mod.comp )\n mod.tvec = mod.toPos - mod.fromPos\n\n\n from quaternion import Quaternion, slerp\n\n #from chimera import tasks, CancelOperation\n #task = tasks.Task(\"Going to '%s' View 1/20\" % self.keyName.get(), modal = True)\n\n #try :\n N = 20\n for i in range ( N ) :\n f = i / float(N-1)\n #f1 = 1.0 - f0\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n for mod in chimera.openModels.list() :\n\n if \" -surface.for.chain- \" in mod.name :\n continue\n\n t0 = mod.xf0.getTranslation ()\n q0 = Quaternion ()\n q0.fromXform ( mod.xf0 )\n #q0i = q0.inverse ()\n\n t1 = mod.xf1.getTranslation ()\n q1 = Quaternion ()\n q1.fromXform ( mod.xf1 )\n\n Q = slerp ( q0, q1, f2 )\n Q.normalize()\n\n xf = chimera.Xform.translation ( mod.fromPos.toVector() + mod.tvec * f2 )\n xf.multiply ( Q.Xform () )\n xf.multiply ( chimera.Xform.translation ( mod.comp.toVector() * -1.0 ) )\n\n mod.openState.xform = xf\n\n if hasattr (mod, 'surfMods') :\n for cid, m in mod.surfMods.iteritems() :\n m.openState.xform = xf\n\n print \".\",\n #task.updateStatus( \"Going to '%s' View %d/%d\" % (self.keyName.get(),i+1,N) )\n chimera.viewer.postRedisplay()\n self.toplevel_widget.update_idletasks ()\n\n #except CancelOperation:\n # print \"Going to view canceled\"\n\n #finally:\n # print \"Going to view done\"\n # task.finished()\n\n print \"\"\n\n\n\n def OpenFiles ( self ) :\n\n dm = chimera.openModels.list()[0]\n #print dm.name, dm.data.path\n\n mdir, mpfile = os.path.split(dm.data.path)\n #print \" - \", mdir\n #print \" - \", mpfile\n\n fout = mdir + \"/anim.txt\"\n print \" -> \", fout\n\n self.akeys = {}\n try :\n fin = open(fout, 'r')\n self.akeys = json.load(fin)\n except :\n print \"no keys\"\n #print data\n\n print \"Views:\"\n print self.akeys.keys()\n\n k0 = self.akeys['0']\n files = k0.keys()\n files.sort()\n for f in files :\n if f == dm.name :\n print \" - skipping\", f\n else :\n print f\n chimera.openModels.open ( mdir + \"/\" + f )\n\n\n\n else :\n umsg ( \"key \" + K + \" not found\" )\n\n\n\n\n def Back10 ( self ) :\n\n a = float ( self.pushA.get() )\n\n for mod in chimera.openModels.list() :\n if mod.openState.active == True :\n xf = mod.openState.xform\n xf.premultiply ( chimera.Xform.translation(0,0,-a) )\n mod.openState.xform = xf\n\n\n def Forward10 ( self ) :\n\n a = float ( self.pushA.get() )\n\n for mod in chimera.openModels.list() :\n if mod.openState.active == True :\n xf = mod.openState.xform\n xf.premultiply ( chimera.Xform.translation(0,0,+a) )\n mod.openState.xform = xf\n\n\n def ActivateSel ( self ) :\n\n print \"a-sel\"\n\n amods = {}\n\n for mod in chimera.openModels.list() :\n #print mod.name, chimera.selection.containedInCurrent ( mod )\n if chimera.selection.containedInCurrent ( mod ) :\n mod.openState.active = True\n #print mod.name\n else :\n mod.openState.active = False\n\n def InvertSel ( self ) :\n\n print \"inv-sel\"\n\n amods = {}\n sel = []\n\n for mod in chimera.openModels.list() :\n #print mod.name, chimera.selection.containedInCurrent ( mod )\n if chimera.selection.containedInCurrent ( mod ) :\n mod.openState.active = False\n #print mod.name\n else :\n mod.openState.active = True\n sel.append ( mod )\n\n chimera.selection.clearCurrent ()\n for m in sel :\n #print \" - selecting: %s\" % m.name\n chimera.selection.addCurrent ( m )\n\n\n def ActivateAll ( self ) :\n\n print \"a-all\"\n\n amods = {}\n\n for mod in chimera.openModels.list() :\n mod.openState.active = True\n\n def HideSel ( self ) :\n\n print \"h-sel\"\n\n amods = {}\n\n for mod in chimera.openModels.list() :\n #print mod.name, chimera.selection.containedInCurrent ( mod )\n if chimera.selection.containedInCurrent ( mod ) :\n mod.display = False\n\n\n def HideAll ( self ) :\n\n print \"h-all\"\n\n amods = {}\n\n for mod in chimera.openModels.list() :\n mod.display = False\n\n\n def ComSel ( self ) :\n\n selMod = None\n for mod in chimera.openModels.list() :\n #print mod.name, chimera.selection.containedInCurrent ( mod )\n if chimera.selection.containedInCurrent ( mod ) :\n selMod = mod\n\n print \"Sel:\", selMod.name\n\n om = AnimatableModel ()\n om.FromMod ( selMod )\n #print om.COM\n\n if type(selMod) == VolumeViewer.volume.Volume :\n amod = om.FromMap()\n p0 = amod.mod.openState.xform.apply ( amod.comPt )\n print p0\n #print amod.comp_wc\n\n chimera.openModels.cofr = amod.comPt_wc\n\n elif type(selMod) == chimera.Molecule :\n\n selAts = chimera.selection.currentAtoms()\n #com, N =\n #for at in selAts :\n\n chimera.openModels.cofr = selAts[0].xformCoord()\n\n #amod = om.FromMol()\n\n\n\n\n def Reset ( self ) :\n\n print \"Resetting\"\n self.fri = 0\n\n\n\n def SaveFrame (self, framei, ncopies = 1 ) :\n\n chimera.viewer.postRedisplay()\n self.toplevel_widget.update_idletasks ()\n #chimera.printer.saveImage ( self.framesPath + \"%06d.png\" % framei )\n\n import os\n if not os.path.isdir (self.framesPath) :\n try :\n print \"Making folder for frames:\"\n print \" - \", self.framesPath\n os.mkdir ( self.framesPath )\n except :\n umsg (\"Could not make folder for frames... stopping. Please specify folder in your movie script using the framesPath variable.\")\n self.stopMovie.set(True)\n return\n\n fname = os.path.join ( self.framesPath, \"%06d.png\" % framei )\n chimera.printer.saveImage ( fname )\n\n #import shutil\n #for i in range (ncopies-1) :\n # framei += 1\n # shutil.copy ( self.framesPath + \"%06d.png\" % (framei-1), self.framesPath + \"%06d.png\" % framei )\n\n\n def SaveKeyFrame (self, framei, text ) :\n\n chimera.viewer.postRedisplay()\n self.toplevel_widget.update_idletasks ()\n chimera.printer.saveImage ( self.framesPath + \"_k_%06d_%s.png\" % (framei,text) )\n\n\n\n def ClearFrames (self) :\n\n import os\n\n if not os.path.isdir (self.framesPath) :\n return\n\n for f in os.listdir ( self.framesPath ) :\n if f.endswith(\".png\") :\n os.remove( os.path.join(self.framesPath,f) )\n\n\n def MakeMovie (self, name = \"movie\") :\n\n if self.ffmpegPath == None :\n self.ffmpegPath = FindFFmpeg()\n if self.ffmpegPath == None :\n print \" - did not find ffmpeg path\"\n return\n\n from os.path import join, split\n framesPath = join( self.framesPath, \"%06d.png\" )\n moviePath = join ( split(self.framesPath)[0], name + self.movieFormat )\n\n print \" - frames from:\", framesPath\n print \" - movie file:\", moviePath\n\n if self.movieFormat == \".mov\" :\n args = [ self.ffmpegPath, \"-r\", \"30\", \"-i\", framesPath, \"-y\",\n \"-qscale\", \"1\", \"-b\", \"9000\", \"-vcodec\", \"mpeg4\",\n \"-f\", \"mov\", moviePath ]\n else :\n args = [ self.ffmpegPath, \"-r\", \"30\", \"-i\", framesPath, \"-y\",\n \"-qscale\", \"1\", \"-b\", \"9000\", \"-vcodec\", \"mpeg4\",\n \"-f\", \"mp4\", moviePath ]\n\n # \"-f\", \"mp4\", self.framesPath + \"_\"+name+\".mp4\" ]\n\n print \"- running: \"\n for a in args : print a,\n print \"\"\n\n import subprocess\n subprocess.call ( args )\n print \"done!\\n\"\n\n\n\n\ndef FindFFmpeg () :\n\n # backtrack through path to chimera script to find executable\n print \"finding ffmpeg exec:\"\n import sys\n atPath = sys.argv[0]\n chiPath = None\n\n #print \" - start \", atPath\n\n for i in range (100) :\n\n # go back along the path, look for the ffmpeg binary\n atPath = os.path.split ( atPath )[0]\n #print \" --- at %s\" % atPath\n\n # mac\n from os.path import join as J\n tryPath = J(J(J(J(atPath,'Contents'),'Resources'),'bin'), 'ffmpeg')\n if os.path.isfile(tryPath) :\n print \" - found ffmpeg path: %s\" % tryPath\n chiPath = tryPath\n break\n\n # unix\n tryPath = J(J(atPath,'bin'),'ffmpeg')\n if os.path.isfile(tryPath) :\n print \" - found ffmpeg path: %s\" % tryPath\n chiPath = tryPath\n break\n\n # Windows\n tryPath = J(J(atPath,'bin'),'ffmpeg.exe')\n if os.path.isfile(tryPath) :\n print \" - found ffmpeg path: %s\" % tryPath\n chiPath = tryPath\n break\n\n\n if len(atPath) == 0 :\n break\n\n return chiPath\n\n\n\n\n# ----------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------\n# -----------------------------------------------------------------------------------------------------------------\n\n\n# This is the base object for all other classes\n# - basically sets the interpolation factor f based on start and end frame\n\nclass Frames(object) :\n\n def __init__ (self, startStep, endStep) :\n self.start = startStep\n self.end = endStep\n #print \" - frames - %d|%d\" % (self.start, self.end)\n AddAction (self)\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if self.end <= self.start :\n self.f = 1\n else :\n self.f = float (stepAt - self.start) / float (self.end - self.start)\n\n #print \" - (%d|%d|%d) f:%.2f\" % (self.start, stepAt, self.end, self.f),\n\n\n\nclass VaryThr (Frames) :\n\n def __init__ (self, startStep, endStep, animMod, startThr, endThr) :\n #print \" - vary thr - %s - %.3f|%.3f\" % (animMod.mod.name, self.startThr, self.endThr)\n super(VaryThr, self).__init__(startStep, endStep)\n self.animMod = animMod\n self.startThr = startThr\n self.endThr = endThr\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n #print \" - VT step\",\n super(VaryThr, self).step(stepAt)\n\n thr = self.startThr + self.f * (self.endThr - self.startThr)\n #print \" - thr:%.3f\" % thr\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.SetSurfThr ( thr )\n else :\n self.animMod.SetSurfThr ( thr )\n\n\n\nclass VaryAlpha (Frames) :\n\n def __init__ (self, startStep, endStep, animMod, startA, endA) :\n #print \" - vary A - %s - %.3f|%.3f\" % (animMod.mod.name, self.startA, self.endA)\n super(VaryAlpha, self).__init__(startStep, endStep)\n self.animMod = animMod\n self.startA = startA\n self.endA = endA\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n #print \" - A step\",\n super(VaryAlpha, self).step(stepAt)\n\n self.alphaAt = self.startA + self.f * (self.endA - self.startA)\n #print \" - a:%.3f\" % a\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.SetAlpha ( self.alphaAt )\n else :\n self.animMod.SetAlpha ( self.alphaAt )\n\n\n\nclass SetColor (Frames) :\n\n def __init__ (self, atStep, animMod, clr) :\n super(SetColor, self).__init__(atStep, atStep)\n self.start = self.end = atStep\n self.animMod = animMod\n self.toColor = clr\n self.triggered = False\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n #print \" - set color:\", self.animMod.mod.name, self.toColor\n\n if type (self.animMod) == list :\n\n for om in self.animMod :\n\n if type(om.mod) == VolumeViewer.volume.Volume :\n om.SetSurfColor ( self.toColor[0], self.toColor[1], self.toColor[2], self.toColor[3] )\n om.colorAt = self.toColor\n\n elif type(om.mod) == chimera.Molecule :\n\n TODO\n\n self.animMod.dispMode = self.toDispMode\n self.animMod.color = None\n self.animMod.alphaAt = 1\n\n self.animMod.colors = {}\n for res in self.animMod.mod.residues :\n if res.ribbonColor != None :\n self.animMod.colors[res.id.chainId] = res.ribbonColor.rgba()\n self.animMod.alphaAt = res.ribbonColor.rgba()[3]\n\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.UpdateDisp ()\n\n else :\n self.animMod.UpdateDisp ()\n\n else :\n\n if type(self.animMod.mod) == VolumeViewer.volume.Volume :\n self.animMod.SetSurfColor ( self.toColor[0], self.toColor[1], self.toColor[2], self.toColor[3] )\n self.animMod.mod.colorAt = self.toColor\n\n else :\n TODO\n\n\n\n\nclass VaryColor (Frames) :\n\n def __init__ (self, startStep, endStep, animMod, startC, endC) :\n #print \" - vary A - %s - %.3f|%.3f\" % (animMod.mod.name, self.startA, self.endA)\n super(VaryColor, self).__init__(startStep, endStep)\n self.animMod = animMod\n self.startC = numpy.array ( startC )\n self.endC = numpy.array ( endC )\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n #print \" - A step\",\n super(VaryColor, self).step(stepAt)\n\n C = self.startC + self.f * (self.endC - self.startC)\n self.colorAt = [C[0], C[1], C[2], C[3]]\n\n if type (self.animMod) == list :\n for om in self.animMod :\n\n if type(om.mod) == VolumeViewer.volume.Volume :\n om.SetSurfColor ( C[0], C[1], C[2], C[3] )\n\n elif type(om.mod) == chimera.Molecule :\n for r in om.mod.residues :\n r.ribbonColor = chimera.MaterialColor ( C[0], C[1], C[2], C[3] )\n\n else :\n TODO\n\n om.colorAt = self.colorAt\n\n else :\n if type(self.animMod.mod) == VolumeViewer.volume.Volume :\n self.animMod.SetSurfColor ( C[0], C[1], C[2], C[3] )\n self.animMod.colorAt = self.colorAt\n\n elif type(self.animMod.mod) == chimera.Molecule :\n\n #for r in self.animMod.mod.residues :\n # r.ribbonColor = chimera.MaterialColor ( C[0], C[1], C[2], C[3] )\n\n if hasattr (amod.mod, 'surfMods') :\n for cid, mod in amod.mod.surfMods.iteritems() :\n #mod.openState.xform = xf_to_pos\n TODO\n self.alphaAt = color[3]\n self.SetModSurfColor ( mod, (color[0], color[1], color[2], self.alphaAt) )\n\n\n else :\n TODO\n\n\n\n\nclass SetColorRes (Frames) :\n\n def __init__ (self, atStep, selStr, cmap) :\n super(SetColorRes, self).__init__(atStep, atStep)\n self.start = self.end = atStep\n self.selStr = selStr\n self.cmap = cmap\n self.triggered = False\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n #print \" - set color:\", self.animMod.mod.name, self.toColor\n\n sel = chimera.selection.OSLSelection ( self.selStr )\n for r in sel.residues() :\n C = self.cmap[r.id.chainId]\n r.ribbonColor = chimera.MaterialColor ( C[0], C[1], C[2], C[3] )\n\n\n\n\n\n\nclass SetDisp (Frames) :\n\n def __init__ (self, atStep, animMod, dmode) :\n super(SetDisp, self).__init__(atStep, atStep)\n self.start = self.end = atStep\n self.animMod = animMod\n self.toDispMode = dmode\n self.triggered = False\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n print \" - set disp:\", self.animMod.mod.name, self.toDispMode\n\n\n if type(self.animMod.mod) == VolumeViewer.volume.Volume :\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.SetMapDisplay ( self.toDispMode )\n om.dispAt = self.toDispMode\n\n else :\n self.animMod.SetMapDisplay ( self.toDispMode )\n self.animMod.toDisp = self.toDispMode\n\n\n elif type(self.animMod.mod) == chimera.Molecule :\n\n self.animMod.dispMode = self.toDispMode\n self.animMod.color = None\n self.animMod.alphaAt = 1\n\n self.animMod.colors = {}\n for res in self.animMod.mod.residues :\n if res.ribbonColor != None :\n self.animMod.colors[res.id.chainId] = res.ribbonColor.rgba()\n self.animMod.alphaAt = res.ribbonColor.rgba()[3]\n\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.UpdateDisp ()\n\n else :\n self.animMod.UpdateDisp ()\n\n\n\n\n\n\nclass SetAlpha (Frames) :\n\n def __init__ (self, atStep, animMod, alpha) :\n super(SetAlpha, self).__init__(atStep, atStep)\n self.start = self.end = atStep\n self.animMod = animMod\n self.toAlpha = alpha\n self.triggered = False\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.SetAlpha ( self.toAlpha )\n om.alphaAt = self.toAlpha\n\n else :\n print \" - set A:%.3f %s\" % (self.toAlpha, self.animMod.mod.name)\n self.animMod.SetAlpha ( self.toAlpha )\n self.animMod.alphaAt = self.toAlpha\n\n\n\n\nclass SetThr (Frames) :\n\n def __init__ (self, atStep, animMod, thr) :\n super(SetThr, self).__init__(atStep, atStep)\n self.start = self.end = atStep\n self.animMod = animMod\n self.toThr = thr\n self.triggered = False\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n print \" - set thr:%.3f\" % self.toThr\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.SetSurfThr ( self.toThr )\n else :\n self.animMod.SetSurfThr ( self.toThr )\n\n\n\n\n\nclass Hide (Frames) :\n\n def __init__ (self, atStep, animMod) :\n super(Hide, self).__init__(atStep, atStep)\n self.animMod = animMod\n self.start = self.end = atStep\n self.triggered = False\n #if type (animMod) == list :\n # print \" - hide - LIST - %.3f\" % (self.start)\n #else :\n # print \" - hide - %s - %.3f\" % (animMod.mod.name, self.start)\n\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.Hide ()\n\n if hasattr ( om.mod, 'morphMod' ) :\n print \"closing morph mod...\", om.mod.morphMod.name\n chimera.openModels.close ( [om.mod.morphMod] )\n del om.mod.morphMod\n\n else :\n self.animMod.Hide()\n\n om = self.animMod\n if hasattr ( om.mod, 'morphMod' ) :\n print \"closing morph mod...\", om.mod.morphMod.name\n chimera.openModels.close ( [om.mod.morphMod] )\n del om.mod.morphMod\n\n\nclass Show (Frames) :\n\n def __init__ (self, atStep, animMod) :\n super(Show, self).__init__(atStep, atStep)\n self.animMod = animMod\n self.start = self.end = atStep\n self.triggered = False\n #if type (animMod) == list :\n # print \" - show - LIST - %.3f\" % (self.start)\n #else :\n # print \" - show - %s - %.3f\" % (animMod.mod.name, self.start)\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n if type (self.animMod) == list :\n for om in self.animMod :\n om.Show ()\n else :\n self.animMod.Show()\n\n\nclass Select :\n\n def __init__ (self, atStep, animMod) :\n self.animMod = animMod\n self.start = self.end = atStep\n self.triggered = False\n if type (animMod) == list :\n print \" - select - LIST - %.3f\" % (self.start)\n else :\n print \" - select - %s - %.3f\" % (animMod.mod.name, self.start)\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n chimera.selection.clearCurrent ()\n\n if type (self.animMod) == list :\n mods = []\n for om in self.animMod :\n mods.append ( om.mod )\n chimera.selection.addCurrent ( mods )\n else :\n chimera.selection.addCurrent ( [self.animMod.mod] )\n\n\n\nclass ColorContacts :\n\n def __init__ (self, atStep, animMods) :\n self.animMods = animMods\n self.start = self.end = atStep\n self.triggered = False\n\n\n def step (self, stepAt) :\n\n if stepAt < self.start or self.triggered :\n return\n\n self.triggered = True\n\n for i in range ( len(self.animMods) ) :\n for j in range ( len(self.animMods) ) :\n if i != j :\n ColorMapsByContact ( self.animMods[i].mod, self.animMods[j].mod )\n\n\n\n\n\n# Interpolate models from one xform to another\n# - xforms are interpolated by separating rotation and center movement\n# - each model moves about its own center of mass, so they can appear disjoint\n# - this is good for 'exploding' views\n\nclass ToView (Frames) :\n\n def __init__ ( self, startStep, endStep, animMods, toKey, atype=\"cubic\" ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(ToView, self).__init__(startStep, endStep)\n\n self.toKey = toKey\n self.amods = animMods\n self.atype = atype\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n for amod in self.amods :\n\n xf0 = amod.mod.openState.xform\n xf1 = Matrix.chimera_xform ( amod.mod.kxf[self.toKey] )\n\n endCOM_LC = chimera.Point ( amod.COM[0], amod.COM[1], amod.COM[2] )\n endCOM_WC = xf1.apply ( endCOM_LC )\n\n startCOM_LC = chimera.Point ( amod.COM[0], amod.COM[1], amod.COM[2] )\n startCOM_WC = xf0.apply ( startCOM_LC )\n\n amod.t_vec = endCOM_WC - startCOM_WC\n amod.to_pos = endCOM_WC\n amod.comlc = startCOM_LC\n amod.comwc = startCOM_WC\n amod.xf0 = xf0\n amod.xf1 = xf1\n\n\n #super(Frames, self).step(stepAt)\n super(ToView, self).step(stepAt)\n\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n if self.atype == \"cubic\" :\n # cubic interpolation\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n for amod in self.amods :\n\n from quaternion import Quaternion, slerp\n t0 = amod.xf0.getTranslation ()\n q0 = Quaternion ()\n q0.fromXform ( amod.xf0 )\n #q0i = q0.inverse ()\n\n t1 = amod.xf1.getTranslation ()\n q1 = Quaternion ()\n q1.fromXform ( amod.xf1 )\n\n Q = slerp ( q0, q1, f2 )\n Q.normalize()\n\n tv = amod.t_vec * f2\n xf_to_pos = chimera.Xform.translation ( amod.comwc.toVector() + tv )\n xf_to_pos.multiply ( Q.Xform () )\n xf_to_pos.multiply ( chimera.Xform.translation ( amod.comlc.toVector() * -1.0 ) )\n\n amod.mod.openState.xform = xf_to_pos\n\n if hasattr (amod.mod, 'surfMods') :\n for cid, mod in amod.mod.surfMods.iteritems() :\n mod.openState.xform = xf_to_pos\n\n\n# Interpolate models from one xform to another\n# - xforms are interpolated by separating rotation and center movement\n# - each model moves about the center of mass of the ctrMod\n# - this is good for keeping models together in a complex\n\n\nclass XfInterpKs (Frames) :\n\n # same as above but keeps mods together, rotates around ctrMod's COM\n\n def __init__ ( self, startStep, endStep, ctrMod, animMods, fromKey, toKey, atype=\"cubic\" ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(XfInterpKs, self).__init__(startStep, endStep)\n\n #self.startMod = start\n #self.endMod = end\n self.animMod = ctrMod\n self.fromKey = fromKey\n self.toKey = toKey\n self.amods = animMods\n self.atype = atype\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n xf0 = self.animMod.mod.openState.xform\n if self.fromKey != None :\n xf0 = Matrix.chimera_xform ( self.animMod.mod.kxf[self.fromKey] )\n\n xf1 = Matrix.chimera_xform ( self.animMod.mod.kxf[self.toKey] )\n\n endCOM_LC = chimera.Point ( self.animMod.COM[0], self.animMod.COM[1], self.animMod.COM[2] )\n endCOM_WC = xf1.apply ( endCOM_LC )\n\n startCOM_LC = chimera.Point ( self.animMod.COM[0], self.animMod.COM[1], self.animMod.COM[2] )\n startCOM_WC = xf0.apply ( startCOM_LC )\n\n self.t_vec = endCOM_WC - startCOM_WC\n self.to_pos = endCOM_WC\n self.comlc = startCOM_LC\n self.comwc = startCOM_WC\n self.xf0 = xf0\n self.xf1 = xf1\n\n\n #super(Frames, self).step(stepAt)\n\n xf0 = self.xf0\n xf1 = self.xf1\n\n from quaternion import Quaternion, slerp\n t0 = xf0.getTranslation ()\n q0 = Quaternion ()\n q0.fromXform ( xf0 )\n #q0i = q0.inverse ()\n\n t1 = xf1.getTranslation ()\n q1 = Quaternion ()\n q1.fromXform ( xf1 )\n\n\n super(XfInterpKs, self).step(stepAt)\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n if self.atype == \"cubic\" :\n # cubic interpolation\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n #pos = t0 * f1 + t1 * f2\n\n if 0 :\n s = q0.s * f1 + q1.s * f2\n v = q0.v * f1 + q1.v * f2\n Q = Quaternion ( s, v )\n else :\n Q = slerp ( q0, q1, f2 )\n\n Q.normalize()\n\n tv = self.t_vec * f2\n xf_to_pos = chimera.Xform.translation ( self.comwc.toVector() + tv )\n xf_to_pos.multiply ( Q.Xform () )\n xf_to_pos.multiply ( chimera.Xform.translation ( self.comlc.toVector() * -1.0 ) )\n\n #self.animMod.mod.openState.xform = xf_to_pos\n\n for amod in self.amods :\n amod.mod.openState.xform = xf_to_pos\n\n if hasattr (amod.mod, 'surfMods') :\n for cid, mod in amod.mod.surfMods.iteritems() :\n mod.openState.xform = xf_to_pos\n\n\n\n\n\n\nclass SetView (Frames) :\n\n def __init__ ( self, startStep, animMods, toKey ) :\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(SetView, self).__init__(startStep, startStep)\n self.toKey = toKey\n self.amods = animMods\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n for amod in self.amods :\n\n if not hasattr ( amod.mod, 'kxf' ) :\n print \"SetView: %s doesn't have views set\" % amod.mod.name\n elif not self.toKey in amod.mod.kxf :\n print \"SetView: %s doesn't have view %s\" % (amod.mod.name, self.toKey)\n else :\n xf1 = Matrix.chimera_xform ( amod.mod.kxf[self.toKey] )\n amod.mod.openState.xform = xf1\n\n\n\n\nclass SetXf (Frames) :\n\n def __init__ ( self, startStep, amods, xfMod ) :\n\n print \" - xf set - %s\" % (xfMod.mod.name)\n super(SetXf, self).__init__(startStep, startStep)\n\n self.xfMod = xfMod\n self.amods = amods\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n for amod in self.amods :\n amod.mod.openState.xform = self.xfMod.mod.openState.xform\n\n\n\n\nclass ModInterp (Frames) :\n\n def __init__ ( self, startStep, endStep, mod, mod0, mod1 ) :\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(ModInterp, self).__init__(startStep, endStep)\n self.mod = mod\n self.mod0 = mod0\n self.mod1 = mod1\n\n\n\n\n def getResMap ( self, mol ) :\n rmap = {}\n for res in mol.residues:\n if res.id.chainId in rmap :\n rmap[res.id.chainId][res.id.position] = res\n else :\n rmap[res.id.chainId] = {}\n rmap[res.id.chainId][res.id.position] = res\n return rmap\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n #print \" - mod interp start: \"\n #print \" - %s, %d atoms\" % (self.mod.mod.name, len(self.mod.mod.atoms))\n #print \" - %s, %d atoms\" % (self.mod0.mod.name, len(self.mod0.mod.atoms))\n #print \" - %s, %d atoms\" % (self.mod1.mod.name , len(self.mod1.mod.atoms))\n\n self.R = self.getResMap ( self.mod.mod )\n self.R0 = self.getResMap ( self.mod0.mod )\n self.R1 = self.getResMap ( self.mod1.mod )\n\n for cid, rm in self.R.iteritems() :\n #print cid,\n rm0 = self.R0[cid]\n for ri, res in rm.iteritems() :\n res0 = rm0[ri]\n for at in res.atoms :\n try :\n at0 = res0.atomsMap[at.name][0]\n except :\n print \" - could not find res %d %s, at %s -- res %d %s\" % (res.id.position, res.type, at.name, res0.id.position, res0.type)\n blah\n at.setCoord( at0.coord() )\n #print \"\"\n\n\n super(ModInterp, self).step(stepAt)\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n # cubic interpolation\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n\n #for at, at0, at1 in zip ( self.mod.mod.atoms, self.mod0.mod.atoms, self.mod1.mod.atoms ) :\n # v = at0.coord().toVector() * f1 + at1.coord().toVector() * f2\n #at.setCoord ( chimera.Point( v[0], v[1], v[2] ) )\n #at.setCoord ( chimera.Point( *v ) )\n\n for cid, rm in self.R.iteritems() :\n #print \"cid\"\n rm0 = self.R0[cid]\n rm1 = self.R1[cid]\n for ri, res in rm.iteritems() :\n res0 = rm0[ri]\n res1 = rm1[ri]\n\n if hasattr(self.mod0, 'colors') :\n C = self.mod0.colors[cid]\n res.ribbonColor = chimera.MaterialColor ( C[0], C[1], C[2], 1.0 )\n\n #if res0.type != res1.type :\n # print \" - res %s[%d.%s.%s] ~~ %s[%d.%s.%s]\" % (self.mod0.mod.name, res0.id.position, res0.type, res0.id.chainId, self.mod1.mod.name, res1.id.position, res1.type, res1.id.chainId)\n # haha\n for at in res.atoms :\n try :\n at0 = res0.atomsMap[at.name][0]\n except :\n print \" - did not find atom %s,%d(%s).%s in %s, res %d(%s)\" % (at.name, res.id.position, res.type, res.id.chainId, res0.molecule.name, res0.id.position, res0.type )\n return\n\n try :\n at1 = res1.atomsMap[at.name][0]\n except :\n print \" - did not find atom %s,%d(%s).%s in %s, res %d(%s)\" % (at.name, res.id.position, res.type, res.id.chainId, res1.molecule.name, res1.id.position, res1.type )\n return\n\n v = at0.coord().toVector() * f1 + at1.coord().toVector() * f2\n at.setCoord ( chimera.Point( *v ) )\n\n #for atn in ['C', 'N', 'CA'] :\n # at0 = res0.atomsMap[atn][0]\n # at1 = res1.atomsMap[atn][0]\n # #at = res.atomsMap[atn][0]\n\n\n\n self.mod.UpdateDisp ()\n\n\n\nclass UpdateShape (Frames) :\n\n def __init__ ( self, startStep, endStep, shapeMod ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(ModInterp, self).__init__(startStep, endStep)\n\n #self.startMod = start\n #self.endMod = end\n self.mod = shapeMod\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n print \" - update shape start\"\n print self.mod.shape\n\n\n super(ModInterp, self).step(stepAt)\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n # cubic interpolation\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n\n\n\n\nclass HideRess (Frames) :\n\n def __init__ ( self, startStep, endStep, amod, ress=None ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(HideRess, self).__init__(startStep, endStep)\n\n #self.startMod = start\n #self.endMod = end\n self.amod = amod\n if ress == None :\n self.ress = range ( 0, len(self.amod.mod.residues) )\n else :\n self.ress = ress\n\n\n def getResMap ( self, mol ) :\n rmap = {}\n for res in mol.residues:\n if res.id.chainId in rmap :\n rmap[res.id.chainId][res.id.position] = res\n else :\n rmap[res.id.chainId] = {}\n rmap[res.id.chainId][res.id.position] = res\n return rmap\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n #print \" - setting ribbon off for %d res\" % len(self.amod.mod.residues)\n for res in self.amod.mod.residues :\n #nres.ribbonDisplay, nres.ribbonDrawMode = False, 2\n res.ribbonDisplay = False\n\n for at in res.atoms :\n at.display = False\n\n self.lastResShown = 0\n\n\n super(HideRess, self).step(stepAt)\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n if 0 :\n nRes = float ( len( self.ress ) )\n toResI = int( numpy.round(nRes * f) )\n\n for ri in self.ress [self.lastResShown : toResI] :\n self.amod.mod.residues[ri].ribbonDisplay = True\n #res.ribbonDisplay = True\n\n self.lastResShown = toResI\n #self.mod.UpdateDisp ()\n\n\n\nclass ShowRess (Frames) :\n\n def __init__ ( self, startStep, endStep, amod, ress=None ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(ShowRess, self).__init__(startStep, endStep)\n\n #self.startMod = start\n #self.endMod = end\n self.amod = amod\n if ress == None :\n self.ress = range ( 0, len(self.amod.mod.residues) )\n else :\n self.ress = ress\n\n self.lastResShown = 0\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n super(ShowRess, self).step(stepAt)\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n # cubic interpolation\n #f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n nRes = float ( len( self.ress ) )\n toResI = int( numpy.round(nRes * f) )\n\n for ri in self.ress [self.lastResShown : toResI] :\n res = self.amod.mod.residues[ri]\n res.ribbonDisplay = True\n\n self.lastResShown = toResI\n #self.mod.UpdateDisp ()\n\n\n\nclass ShowRessAts (Frames) :\n\n def __init__ ( self, startStep, endStep, amod, ress=None ) :\n super(ShowRessAts, self).__init__(startStep, endStep)\n\n self.amod = amod\n if ress == None :\n self.ress = range ( 0, len(self.amod.mod.residues) )\n else :\n self.ress = ress\n\n self.lastResShown = 0\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if 0 and stepAt == self.start :\n for res in self.amod.mod.residues :\n res.ribbonDisplay = False\n for at in res.atoms :\n at.display = False\n\n super(ShowRessAts, self).step(stepAt)\n f = self.f\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n # cubic interpolation\n #f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n nRes = float ( len( self.ress ) )\n toResI = int( numpy.round(nRes * f) )\n\n for ri in self.ress [self.lastResShown : toResI] :\n res = self.amod.mod.residues[ri]\n res.ribbonDisplay = True\n for at in res.atoms :\n at.display = True\n\n self.lastResShown = toResI\n\n\n\nclass ShowAts (Frames) :\n\n def __init__ ( self, startStep, amod, ress ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(ShowAts, self).__init__(startStep, startStep)\n\n #self.startMod = start\n #self.endMod = end\n self.amod = amod\n self.ress = ress\n\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n ac = { 'O' : chimera.MaterialColor( .9, .2, .2, 1.0 ),\n 'C' : chimera.MaterialColor( .7, .7, .7, 1.0 ),\n 'N' : chimera.MaterialColor( .2, .2, .9, 1.0 ),\n 'H' : chimera.MaterialColor( 1, 1, 1, 1.0 ),\n ' ' : chimera.MaterialColor( .2, .2, .2, 1.0 ),\n }\n\n rmap = {}\n for res in self.amod.mod.residues :\n rmap[res.id.position] = res\n\n for ri in self.ress :\n res = rmap[ri]\n for at in res.atoms :\n at.drawMode = at.EndCap\n at.display = True\n try :\n at.color = ac[at.name[0]]\n except :\n at.color = ac[\" \"]\n\n\n\nclass ShowSideChains (Frames) :\n\n def __init__ ( self, startStep, selStr ) :\n\n super(ShowSideChains, self).__init__(startStep, startStep)\n\n self.selStr = selStr\n\n\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n ac = { 'O' : chimera.MaterialColor( .9, .2, .2, 1.0 ),\n 'C' : chimera.MaterialColor( .7, .7, .7, 1.0 ),\n 'N' : chimera.MaterialColor( .2, .2, .9, 1.0 ),\n 'H' : chimera.MaterialColor( 1, 1, 1, 1.0 ),\n ' ' : chimera.MaterialColor( .2, .2, .2, 1.0 ),\n }\n\n sel = chimera.selection.OSLSelection ( self.selStr )\n for r in sel.residues() :\n for at in r.atoms :\n at.drawMode = at.EndCap\n at.display = True\n try :\n at.color = ac[at.name[0]]\n except :\n at.color = ac[\" \"]\n\n\nclass HideSideChains (Frames) :\n\n def __init__ ( self, startStep, selStr ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(HideSideChains, self).__init__(startStep, startStep)\n\n self.selStr = selStr\n\n\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n ac = { 'O' : chimera.MaterialColor( .9, .2, .2, 1.0 ),\n 'C' : chimera.MaterialColor( .7, .7, .7, 1.0 ),\n 'N' : chimera.MaterialColor( .2, .2, .9, 1.0 ),\n 'H' : chimera.MaterialColor( 1, 1, 1, 1.0 ),\n ' ' : chimera.MaterialColor( .2, .2, .2, 1.0 ),\n }\n\n sel = chimera.selection.OSLSelection ( self.selStr )\n for r in sel.residues() :\n for at in r.atoms :\n at.display = False\n\n\n\n\n\nclass HideAts (Frames) :\n\n def __init__ ( self, startStep, amod, ress ) :\n\n #print \" - xf interp - %s\" % (animMod.mod.name)\n super(HideAts, self).__init__(startStep, startStep)\n\n #self.startMod = start\n #self.endMod = end\n self.amod = amod\n self.ress = ress\n\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n if stepAt == self.start :\n\n rmap = {}\n for res in self.amod.mod.residues :\n rmap[res.id.position] = res\n\n for ri in self.ress :\n res = rmap[ri]\n for at in res.atoms :\n at.display = False\n\n\n\n\n\n\n# Rotate models around an axis\n\n\nclass Rotate (Frames) :\n\n def __init__ ( self, startStep, endStep, animMods, refMod, ctrPt, axis, totDeg, itype=\"cubic\" ) :\n # startStep : starting time step\n # endStep: ending time step\n # animMods: which models to animate\n # refMod: reference model; the transform of this model at the starting\n # time step will be used to determine center of rotation\n # ctrPt: center of rotation; refMod transform will be applied to this\n # axis: axis of rotation\n # totDeg: how many degrees to rotate in total\n # itype: interpolation type - either cubic or linear\n\n super(Rotate, self).__init__(startStep, endStep)\n\n self.animMods = animMods\n self.totDeg = totDeg\n self.axis = chimera.Vector ( axis[0], axis[1], axis[2] )\n self.ctrPt = chimera.Point ( ctrPt[0], ctrPt[1], ctrPt[2] )\n self.itype = itype\n self.refMod = refMod\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n from quaternion import Quaternion\n\n if stepAt == self.start :\n self.ctrPtWC = self.refMod.mod.openState.xform.apply ( self.ctrPt )\n for amod in self.animMods :\n amod.xf0 = amod.mod.openState.xform\n\n #super(Frames, self).step(stepAt)\n super(Rotate, self).step(stepAt)\n f = self.f\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f # cubic interpolation\n if not \"cubic\" == self.itype :\n f1, f2 = (1.0-f), f # linear interpolation\n\n deg = self.totDeg * f2\n #print \" - at deg \", deg\n\n for amod in self.animMods :\n\n xf = chimera.Xform ( amod.xf0 )\n xf.premultiply ( chimera.Xform.translation(self.ctrPtWC.toVector() * -1.0) )\n xf.premultiply ( chimera.Xform.rotation ( self.axis, deg ) )\n xf.premultiply ( chimera.Xform.translation(self.ctrPtWC.toVector()) )\n\n amod.mod.openState.xform = xf\n\n if hasattr (amod.mod, 'surfMods') :\n for cid, mod in amod.mod.surfMods.iteritems() :\n mod.openState.xform = xf\n\n\n\n\n\n\nclass Rock (Frames) :\n\n def __init__ ( self, startStep, endStep, animMods, comMod, COM, axis, totDeg, numCycles, itype=\"cubic\" ) :\n print \" - rock M\"\n print \" axis: \", axis\n print \" totDeg: \", totDeg\n print \" numCyc: \", numCycles\n super(Rock, self).__init__(startStep, endStep)\n\n self.animMods = animMods\n self.totDeg = float ( totDeg )\n self.N = float ( numCycles )\n self.axis = chimera.Vector ( axis[0], axis[1], axis[2] )\n self.comMod = comMod\n self.comlc = chimera.Point ( COM[0], COM[1], COM[2] )\n self.itype = itype\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n from quaternion import Quaternion\n\n if stepAt == self.start :\n #print \" - rotate M - first step\",\n self.comwc = self.comMod.mod.openState.xform.apply ( self.comlc )\n for amod in self.animMods :\n amod.xf0 = amod.mod.openState.xform\n amod.q0 = Quaternion ()\n amod.q0.fromXform ( amod.xf0 )\n amod.comlc = chimera.Point ( amod.COM[0], amod.COM[1], amod.COM[2] )\n amod.comwcv = amod.xf0.apply ( amod.comlc ).toVector() - self.comwc.toVector()\n\n #super(Frames, self).step(stepAt)\n\n super(Rock, self).step(stepAt)\n f = self.f\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f # cubic interpolation\n if not \"cubic\" == self.itype :\n f1, f2 = (1.0-f), f # linear interpolation\n\n #deg = self.totDeg * f2\n deg = self.totDeg * numpy.sin ( f2 * 2.0 * numpy.pi * self.N )\n #print \" - at deg \", deg\n\n for amod in self.animMods :\n\n xf_to_0 = chimera.Xform.translation ( amod.comlc.toVector() * -1.0 )\n xf_to_P = chimera.Xform.translation ( amod.comwcv )\n xf_rot0 = amod.q0.Xform ()\n xf_rotR = chimera.Xform.rotation ( self.axis, deg )\n xf_to_pos = chimera.Xform.translation ( self.comwc.toVector() )\n\n #xf_to_pos.multiply ( xf_to_com )\n xf_to_pos.multiply ( xf_rotR )\n xf_to_pos.multiply ( xf_to_P )\n xf_to_pos.multiply ( xf_rot0 )\n xf_to_pos.multiply ( xf_to_0 )\n\n amod.mod.openState.xform = xf_to_pos\n\n if hasattr (amod.mod, 'surfMods') :\n for cid, mod in amod.mod.surfMods.iteritems() :\n mod.openState.xform = xf_to_pos\n\n\n\n\nclass RockAts (Frames) :\n\n def __init__ ( self, startStep, endStep, animMods, comMod, selStr, axis, totDeg, numCycles, itype=\"cubic\" ) :\n super(RockAts, self).__init__(startStep, endStep)\n #print \" - rock Ats\"\n #print \" axis: \", axis\n #print \" totDeg: \", totDeg\n #print \" numCyc: \", numCycles\n\n self.animMods = animMods\n self.totDeg = float ( totDeg )\n self.N = float ( numCycles )\n self.axis = chimera.Vector ( axis[0], axis[1], axis[2] )\n self.comMod = comMod\n #self.comlc = chimera.Point ( COM[0], COM[1], COM[2] )\n self.selStr = selStr\n self.itype = itype\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n from quaternion import Quaternion\n\n if stepAt == self.start :\n sel = chimera.selection.OSLSelection ( self.selStr )\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( sel.atoms(), transformed = True )\n COM, U, S, V = prAxes ( points )\n comp = chimera.Point ( COM[0], COM[1], COM[2] )\n\n #print \" - rock Ats - first step - %s, \" % self.selStr, COM\n\n self.comwc = comp # self.comMod.mod.openState.xform.apply ( comp )\n for amod in self.animMods :\n amod.xf0 = amod.mod.openState.xform\n amod.q0 = Quaternion ()\n amod.q0.fromXform ( amod.xf0 )\n amod.comlc = chimera.Point ( amod.COM[0], amod.COM[1], amod.COM[2] )\n amod.comwcv = amod.xf0.apply ( amod.comlc ).toVector() - self.comwc.toVector()\n\n #super(Frames, self).step(stepAt)\n\n super(RockAts, self).step(stepAt)\n f = self.f\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f # cubic interpolation\n if not \"cubic\" == self.itype :\n f1, f2 = (1.0-f), f # linear interpolation\n\n #deg = self.totDeg * f2\n deg = self.totDeg * numpy.sin ( f2 * 2.0 * numpy.pi * self.N )\n #print \" - at deg \", deg\n\n for amod in self.animMods :\n\n xf_to_0 = chimera.Xform.translation ( amod.comlc.toVector() * -1.0 )\n xf_to_P = chimera.Xform.translation ( amod.comwcv )\n xf_rot0 = amod.q0.Xform ()\n xf_rotR = chimera.Xform.rotation ( self.axis, deg )\n xf_to_pos = chimera.Xform.translation ( self.comwc.toVector() )\n\n #xf_to_pos.multiply ( xf_to_com )\n xf_to_pos.multiply ( xf_rotR )\n xf_to_pos.multiply ( xf_to_P )\n xf_to_pos.multiply ( xf_rot0 )\n xf_to_pos.multiply ( xf_to_0 )\n\n amod.mod.openState.xform = xf_to_pos\n\n if hasattr (amod.mod, 'surfMods') :\n for cid, mod in amod.mod.surfMods.iteritems() :\n mod.openState.xform = xf_to_pos\n\n\n\n\n\n\nclass Scale (Frames) :\n\n def __init__ ( self, startStep, endStep, targetScale ) :\n\n print \" - xf scale\"\n super(Scale, self).__init__(startStep, endStep)\n\n self.targetScale = targetScale\n self.scaleSign = 1.0\n self.scaleIncr = targetScale / (endStep - startStep)\n\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n print \" - scale step %f\" % self.targetScale\n #super(Frames, self).step(stepAt)\n\n cmd = \"scale %f\" % self.targetScale\n chimera.runCommand ( cmd )\n\n\n\n\nclass Cycle (Frames) :\n\n def __init__ ( self, startStep, endStep, animMods, dstep ) :\n print \" - Cycle M\"\n print \" dstep: \", dstep\n print \" num models: \", len(animMods)\n super(Cycle, self).__init__(startStep, endStep)\n\n self.animMods = animMods\n self.dstep = dstep\n self.atMod = 0\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n super(Cycle, self).step(stepAt)\n #f = self.f\n #rangef = float (self.end - self.start)\n\n import numpy\n modf = numpy.floor ( float(self.atMod) / float(self.dstep) )\n modi = int ( modf ) % len(self.animMods)\n\n #print \" step %d (%d -> %d) d %d, modi: %d\" % (stepAt, self.start, self.end, self.dstep, modi)\n\n for ai, amod in enumerate (self.animMods) :\n\n if ai == modi :\n amod.mod.display = True\n else :\n amod.mod.display = False\n\n self.atMod += 1\n\n\n\n# chimera.runCommand ( \"clip hither -2\" )\n\nclass Clip (Frames) :\n\n def __init__ ( self, startStep, endStep, totalC ) :\n print \" - Clip\"\n print \" totalC: \", totalC\n super(Clip, self).__init__(startStep, endStep)\n\n self.totalC = totalC\n self.dc = float(totalC) / float(endStep-startStep+1)\n self.at = 0\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n super(Clip, self).step(stepAt)\n self.at += self.dc\n\n #print \" clip step %d (%d -> %d) d %.5f, total %d\" % (stepAt, self.start, self.end, self.dc, self.totalC)\n print \" clip %d - %d/%d\" % (stepAt, self.at, self.totalC)\n chimera.runCommand ( \"clip hither %.5f\" % self.dc )\n\n\n\nclass ClipOff (Frames) :\n\n def __init__ ( self, startStep ) :\n print \" - Clip Off\"\n super(ClipOff, self).__init__(startStep, startStep)\n\n self.done = False\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n super(ClipOff, self).step(stepAt)\n\n if self.done :\n return\n\n print \" clip off step %d (%d -> %d)\" % (stepAt, self.start, self.end)\n chimera.runCommand ( \"clip off\" )\n self.done = True\n\n\n\n\nclass VolMorph (Frames) :\n\n # er_dna\n\n def __init__ ( self, startStep, endStep, startM, endM ) :\n print \" - morph %s, %.1f -> %s, %.1f\" % ( startM.mod.name, startM.mod.surface_levels[0], endM.mod.name, endM.mod.surface_levels[0] )\n super(VolMorph, self).__init__(startStep, endStep)\n self.startM = startM\n self.endM = endM\n self.df_v = None\n\n\n def step ( self, stepAt ) :\n\n if stepAt < self.start or stepAt > self.end :\n return\n\n from quaternion import Quaternion\n\n\n if stepAt == self.start :\n print \" - morph - first step\"\n\n startM = self.startM.mod\n endM = self.endM.mod\n\n self.start_mat = startM.data.full_matrix()\n self.startSurfaceLevel = startM.surface_levels[0]\n f_mask = numpy.where ( self.start_mat > startM.surface_levels[0], numpy.ones_like(self.start_mat), numpy.zeros_like(self.start_mat) )\n self.startVol = numpy.sum ( f_mask ) * startM.data.step[0] * startM.data.step[1] * startM.data.step[2]\n print \" - start %s thr %.3f vol: %.3f\" % (startM.name, startM.surface_levels[0], self.startVol)\n print startM.surface_levels\n\n self.startColor = numpy.array ( startM.surfacePieces[0].color )\n #print \" - start color: \", self.startColor\n\n self.end_mat = endM.data.full_matrix().copy()\n self.endSurfaceLevel = endM.surface_levels[0]\n f_mask = numpy.where ( self.end_mat > endM.surface_levels[0], numpy.ones_like(self.end_mat), numpy.zeros_like(self.end_mat) )\n self.endVol = numpy.sum ( f_mask ) * endM.data.step[0] * endM.data.step[1] * endM.data.step[2]\n print \" - end %s thr %.3f vol: %.3f\" % (endM.name, endM.surface_levels[0], self.endVol)\n print endM.surface_levels\n\n self.endColor = numpy.array ( endM.surfacePieces[0].color )\n #print \" - end color: \", self.endColor\n\n fmap = endM\n dmap = startM\n import _contour\n n1, n2, n3 = fmap.data.size[0], fmap.data.size[1], fmap.data.size[2]\n f_points = VolumeData.grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices\n _contour.affine_transform_vertices( f_points, fmap.data.ijk_to_xyz_transform )\n\n d_vals = dmap.interpolated_values ( f_points, fmap.openState.xform )\n self.start_mat = d_vals.reshape( (n3,n2,n1) )\n\n #f_mask = numpy.where ( self.end_mat > endM.surface_levels[0], numpy.ones_like(self.end_mat), numpy.zeros_like(self.end_mat) )\n #self.endVol = numpy.sum ( f_mask ) * endM.data.step[0] * endM.data.step[1] * endM.data.step[2]\n #print \" - end vol after interp: %.3f\" % self.endVol\n\n self.endM.mod.morphMod = endM.writable_copy(require_copy = True, name = endM.name+'__morph')\n startM.display = False\n endM.display = False\n\n\n #try :\n # chimera.openModels.close ( self.df_v )\n #except :\n # pass\n\n super(VolMorph, self).step(stepAt)\n f = self.f\n #f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f # cubic interpolation\n #if not \"cubic\" == self.itype :\n f1, f2 = (1.0-f), f # linear interpolation\n #print f1, f2\n\n df_mat = self.start_mat * f1 + self.end_mat * f2\n\n #M = self.df_v.data.full_matrix()\n #M[:,:,:] = df_mat[:,:,:]\n #self.df_v.data.values_changed()\n\n morphMod = self.endM.mod.morphMod\n\n morphMod.data.full_matrix()[:,:,:] = df_mat[:,:,:]\n morphMod.data.values_changed()\n\n #sf = self.df_v.surface_level_for_enclosed_volume ( self.endVol )\n sf = self.startSurfaceLevel * f1 + self.endSurfaceLevel * f2\n #print \" - new surf level for end vol: %.2f\" % sf\n morphMod.surface_levels = [sf]\n\n col = self.startColor * f1 + self.endColor * f2\n\n ro = VolumeViewer.volume.Rendering_Options()\n morphMod.update_surface ( False, ro )\n for sp in morphMod.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n if len(v) == 0 and len(t) == 0 :\n sp.display = False\n else :\n sp.color = (col[0],col[1],col[2],col[3])\n\n\n if stepAt == self.end :\n #print \" - last step!\"\n self.endM.mod.display = True\n chimera.openModels.close ( [self.endM.mod.morphMod] )\n del self.endM.mod.morphMod\n\n\n# the active movie as global to avoid passing this around\nbioMovieActiveMovie = None\n\ndef AddAction (action) :\n global bioMovieActiveMovie\n if bioMovieActiveMovie == None :\n print \"No movie created, hence action was not added to any movie\"\n print \" - create a movie first, e.g. movie = biomovie.Movie(biomovie_dialog) \"\n else :\n bioMovieActiveMovie.add ( action )\n\n\nclass Movie :\n\n\n def __init__ (self, dlg ) :\n self.anims = []\n self.start = 10000000\n self.end = 0\n self.dlg = dlg\n self.keys = {}\n\n global bioMovieActiveMovie\n bioMovieActiveMovie = self\n\n\n def add ( self, anim ) :\n if anim.start < self.start :\n self.start = anim.start\n if anim.end > self.end :\n self.end = anim.end\n self.anims.append ( anim )\n\n\n def addKey ( self, framei, text ) :\n self.keys[framei] = text\n\n\n def make (self, saveMovie = True) :\n\n saveMovie = self.dlg.makeMovie.get()\n stop = self.dlg.stopMovie.get()\n\n if saveMovie :\n self.dlg.ClearFrames ()\n\n fri = 0\n\n for i in range ( self.start, self.end+1 ) :\n\n saveMovie = self.dlg.makeMovie.get()\n stop = self.dlg.stopMovie.get()\n #print \"%d - \" % i\n\n if stop :\n print \"Stopped\"\n break\n\n for anim in self.anims :\n anim.step ( i )\n\n\n chimera.viewer.postRedisplay()\n self.dlg.toplevel_widget.update_idletasks ()\n\n\n if i in self.keys :\n self.dlg.SaveKeyFrame ( i, self.keys[i] )\n\n if i % 30 == 0 :\n print \"%d/%d\" % (i+1,self.end+1)\n else :\n print \".\",\n\n if saveMovie :\n self.dlg.SaveFrame ( fri, 1 )\n fri += 1\n\n if saveMovie and not self.dlg.stopMovie.get() :\n self.dlg.MakeMovie ( self.dlg.movieName.get() )\n\n\n def run (self) :\n self.make ( False )\n\n\n\n\n\n\nclass AnimatableModel :\n\n def __init__ (self) :\n self.mod = None\n\n\n def FromMod ( self, fm ) :\n\n self.mod = fm\n self.xf0 = fm.openState.xform\n self.xf = fm.openState.xform\n #print \"New AM:\", fm.name\n\n if type(fm) == VolumeViewer.volume.Volume :\n self.type = \"map\"\n elif type(fm) == chimera.Molecule :\n self.type = \"mol\"\n\n if 0 and hasattr ( self.mod, \"COM\" ) :\n self.COM = self.mod.COM\n self.comPt = chimera.Point ( self.COM[0], self.COM[1], self.COM[2] )\n self.comVec = self.comp.toVector ()\n self.comPt_wc = self.mod.openState.xform.apply ( self.comPt )\n print \" - \", self.comPt_wc\n\n\n def FromMap ( self ) :\n\n self.pts, self.weights = map_points ( self.mod )\n if len(self.pts) == 0 :\n self.pts, self.weights = map_points ( self.mod, False )\n #print len(self.pts)\n self.COM, self.U, self.S, self.V = prAxes ( self.pts )\n #print \" - \" + self.mod.name + \", COM : \", self.COM\n\n self.comPt = chimera.Point ( self.COM[0], self.COM[1], self.COM[2] )\n self.comVec = self.comPt.toVector ()\n\n self.comPt_wc = self.mod.openState.xform.apply ( self.comPt )\n self.COMWC = [self.comPt_wc[0], self.comPt_wc[1], self.comPt_wc[2]]\n\n return self\n\n\n\n\n def FromMol (self) :\n\n #if hasattr ( self, \"COM\" ) :\n # return\n\n sel = chimera.selection.OSLSelection ( \"#%d\" % self.mod.id )\n atoms = sel.atoms()\n\n #print self.mod.name, \" - \", len(atoms), \"atoms\"\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = False )\n self.COM, self.U, self.S, self.V = prAxes ( points )\n\n self.comPt = chimera.Point ( self.COM[0], self.COM[1], self.COM[2] )\n self.comVec = self.comPt.toVector ()\n self.comPt_wc = self.mod.openState.xform.apply ( self.comPt )\n\n self.dispMode = [\"ribbon\"]\n\n # print \" - com: \", self.comp\n\n return self\n\n\n\n\n def FromSurf (self) :\n\n print self.mod.name, \" - from surf\"\n\n self.COM = numpy.array ( [ 0,0,0 ], numpy.float32 )\n N = 0.0;\n rad = 0.0;\n\n for sp in self.mod.surfacePieces :\n for p in sp.geometry[0] :\n self.COM = self.COM + p;\n N = N + 1.0;\n r = numpy.sqrt ( (p**2).sum() )\n if r > rad :\n rad = r\n\n self.COM = self.COM / N;\n self.comPt = chimera.Point ( self.COM[0], self.COM[1], self.COM[2] )\n self.comVec = self.comPt.toVector ()\n # print \" - com: \", self.comp\n\n return self\n\n\n\n\n def Rotate00 ( self, deg=5.0, center = None, axis = [0,0,1] ) :\n\n if ( center == None ) :\n center = self.COM\n\n rxf = chimera.Xform.rotation ( chimera.Vector(axis[0],axis[1],axis[2]), deg )\n txf0 = chimera.Xform.translation ( chimera.Vector(-center[0],-center[1],-center[2]) )\n txf = chimera.Xform.translation ( chimera.Vector(center[0],center[1],center[2]) )\n\n self.xf.multiply ( txf )\n self.xf.multiply ( rxf )\n self.xf.multiply ( txf0 )\n self.mod.openState.xform = self.xf\n\n\n def Show ( self ) :\n\n self.mod.display = True\n\n if hasattr (self.mod, 'surfMods') :\n #self.mod.display = False\n self.mod.display = True\n for cid, mod in self.mod.surfMods.iteritems() :\n try :\n self.mod.surfMods[cid].display = True\n except :\n pass\n\n\n def Hide ( self ) :\n\n self.mod.display = False\n\n if hasattr (self.mod, 'surfMods') :\n for cid, mod in self.mod.surfMods.iteritems() :\n try :\n self.mod.surfMods[cid].display = False\n except :\n pass\n\n\n\n def SetSurfThr ( self, thr ) :\n\n if self.mod.display == False :\n return\n\n if type ( self.mod ) != VolumeViewer.volume.Volume :\n return\n\n self.SetModThr ( self.mod, thr )\n if hasattr ( self, 'toAlpha' ) :\n self.SetAlpha ( self.toAlpha )\n\n\n\n def SetSurfColor ( self, r, g, b, a ) :\n\n self.SetModSurfColor ( self.mod, (r,g,b,a) )\n\n\n\n def SetModThr ( self, mod, thr ) :\n\n if mod.display == False :\n return\n\n if type ( mod ) != VolumeViewer.volume.Volume :\n return\n\n mod.region = ( mod.region[0], mod.region[1], [1,1,1] )\n mod.surface_levels[0] = thr\n\n ro = VolumeViewer.volume.Rendering_Options()\n ro.smoothing_factor = .2\n ro.smoothing_iterations = 5\n ro.surface_smoothing = True\n\n mod.update_surface ( False, ro )\n\n for sp in mod.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n\n\n def SetModSurfColor ( self, mod, clr ) :\n\n for sp in mod.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n sp.color = clr\n\n\n def SetMapDisplay ( disp ) :\n\n for sp in mod.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n if disp == \"mesh\" :\n sp.displayStyle = sp.Mesh\n sp.lineThickness = 2.0\n else :\n sp.displayStyle = sp.Solid\n\n\n\n\n def SetAlpha ( self, a ) :\n\n import Segger\n import Segger.regions\n\n if type(self.mod) == Segger.regions.Segmentation :\n print \"seg set a\"\n\n for r in self.mod.regions :\n if r.has_surface():\n cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]\n r.surface_piece.color = ( cr, cg, cb, a )\n\n elif type(self.mod) == VolumeViewer.volume.Volume :\n\n for sp in self.mod.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n c = sp.color\n sp.color = ( c[0], c[1], c[2], a )\n #sp.vertexColors = None\n\n if hasattr ( sp, \"vertexColors\" ) and sp.vertexColors != None :\n vcolors = []\n for vc in sp.vertexColors :\n vcolors.append ( (vc[0], vc[1], vc[2], a) )\n\n sp.vertexColors = vcolors\n\n elif type(self.mod) == chimera.Molecule :\n if hasattr (self.mod, 'surfMods') :\n #print \" - set a: \", self.mod.name, a\n for cid, mod in self.mod.surfMods.iteritems() :\n #chimera.openModels.close ( [mod] )\n color = self.colors[cid]\n self.SetModSurfColor ( self.mod.surfMods[cid], (color[0], color[1], color[2], a) )\n self.alphaAt = a\n #print cid,\n #print \"\"\n\n elif type(self.mod) == _surface.SurfaceModel :\n for sp in self.mod.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n c = sp.color\n sp.color = ( c[0], c[1], c[2], a )\n\n\n def SetXform ( self, f ) :\n\n from quaternion import Quaternion\n t0 = self.xf0.getTranslation ()\n q0 = Quaternion ()\n q0.fromXform ( self.xf0 )\n\n t1 = self.xf.getTranslation ()\n q1 = Quaternion ()\n q1.fromXform ( self.xf )\n\n\n # linear interpolation\n f1, f2 = (1.0-f), f\n\n # cubic interpolation\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n\n pos = t0 * f1 + t1 * f2\n s = q0.s * f1 + q1.s * f2\n v = q0.v * f1 + q1.v * f2\n\n Q = Quaternion ( s, v )\n Q.normalize()\n\n xf = Q.Xform ()\n # print \"- com pos: \", pos\n\n #tr0 = chimera.Xform.translation ( -sm.COM )\n tr = chimera.Xform.translation ( pos )\n\n #xf.multiply ( tr0 )\n xf.premultiply ( tr )\n\n #if rxf1.ref_mod :\n # xf.premultiply ( rxf1.ref_mod.openState.xform )\n\n self.mod.openState.xform = xf\n\n\n def UpdateDisp (self) :\n\n if not hasattr(self, 'dispMode') or self.dispMode == None :\n return\n\n if type( self.mod ) == chimera.Molecule :\n\n if self.dispMode[0] == \"ribbon\" :\n\n #self.mod.display = True\n\n if hasattr (self.mod, 'surfMods') :\n for cid, smod in self.mod.surfMods.iteritems() :\n chimera.openModels.close ( [smod] )\n\n self.mod.surfMods = {}\n\n elif self.dispMode[0] == \"surf\" :\n\n #self.mod.display = False\n #self.mod.display = True\n\n if hasattr (self.mod, 'surfMods') :\n for cid, smod in self.mod.surfMods.iteritems() :\n chimera.openModels.close ( [smod] )\n\n self.mod.surfMods = {}\n\n if hasattr(self, 'colors') :\n for cid, color in self.colors.iteritems() :\n\n modName = self.mod.name + \" -surface.for.chain- \" + cid\n closeMods = []\n for m in chimera.openModels.list() :\n if m.name == modName :\n closeMods.append ( m )\n break\n if len(closeMods) > 0 :\n chimera.openModels.close ( closeMods )\n\n res, step, thr = self.dispMode[1], self.dispMode[2], self.dispMode[3]\n self.mod.surfMods[cid] = self.GenStrucMap ( cid, step, res )\n self.mod.surfMods[cid].name = modName\n self.SetModThr ( self.mod.surfMods[cid], thr )\n self.alphaAt = color[3]\n self.SetModSurfColor ( self.mod.surfMods[cid], (color[0], color[1], color[2], self.alphaAt) )\n #print \" %d\" % len(self.mod.surfMods[cid].surfacePieces),\n #print \".\"\n\n\n\n def GenStrucMap ( self, cid, step, res ) :\n\n #cmd = \"molmap #%s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( mol.id, res, step )\n cmd = \"molmap #%s:.%s %f gridSpacing %f replace false\" % ( self.mod.id, cid, res, step )\n #print \" -\", cmd\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n break\n\n if mv == None :\n print \"- molmap not found...\"\n return None\n\n return mv\n\n\n\n\n\n# calculates 'principal axes' of a set of points\ndef prAxes ( points ) :\n\n com = numpy.sum(points, axis=0) / len(points)\n C = chimera.Vector ( com[0], com[1], com[2] )\n\n comv = numpy.ones_like ( points ) * com\n points = points - comv\n\n i = numpy.matrix ( [[1,0,0], [0,1,0], [0,0,1]] )\n ii = i * numpy.sum ( numpy.multiply ( points, points ) )\n p_t = numpy.transpose(points)\n td = numpy.tensordot ( points, p_t, axes=[0,1] )\n\n I0 = ii - td\n\n try :\n U, S, V = numpy.linalg.svd( I0 )\n except :\n print \"- error computing SVD - prob. singular matrix\"\n return []\n\n #U[0,0] = U[0,0] * -1.0\n #U[1,0] = U[1,0] * -1.0\n #U[2,0] = U[2,0] * -1.0\n\n #U[0,2] = U[0,2] * -1.0\n #U[1,2] = U[1,2] * -1.0\n #U[2,2] = U[2,2] * -1.0\n\n return [C, U, S, V]\n\n\n\n# returns grid points in the map above a given threshold value\ndef map_points (fmap, useThreshold = True):\n\n from _contour import affine_transform_vertices as transform_vertices\n\n mat = fmap.data.full_matrix()\n threshold = fmap.surface_levels[0]\n\n if useThreshold == False :\n #threshold = -1e9\n threshold = 1e-5\n #print \" - not using threshold\"\n\n import _volume\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n nz = numpy.nonzero( fpoint_weights )[0]\n if len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n transform_vertices( fpoints, fmap.data.ijk_to_xyz_transform )\n\n if 0 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return fpoints, fpoint_weights\n\n\n\n\n\n# ---------------------------------------------------------------------------------------------------------\n\n\ndef get_dialog () :\n\n from chimera import dialogs\n d = dialogs.find ( \"BioMovie\", create=False )\n return d\n\n\n\ndef close_dialog ():\n\n\tfrom chimera import dialogs\n\td = dialogs.find ( \"BioMovie\", create=False )\n\n\tif d :\n\t\tprint \" - found dialog\"\n\t\td.toplevel_widget.update_idletasks ()\n\t\td.Close()\n\t\td.toplevel_widget.update_idletasks ()\n\telse :\n\t\tprint \" - did not find dialog\"\n\n\n\ndef show_dialog ():\n\n close_dialog()\n\n from chimera import dialogs\n dialogs.register (\"BioMovie\", BioMovie, replace = True)\n d = dialogs.find ( \"BioMovie\", create=True )\n\n # Avoid transient dialog resizing when created and mapped for first time.\n d.toplevel_widget.update_idletasks ()\n d.enter()\n\n return d\n\n\n\ndef getMod ( name ) :\n for mol in chimera.openModels.list () :\n if mol.name == name :\n return mol\n return None\n\ndef getModById ( id ) :\n for mol in chimera.openModels.list () :\n try : mol.id\n except : continue\n if mol.id == id :\n return mol\n return None\n\ndef visMods () :\n for mol in chimera.openModels.list () :\n try :\n mol.shown()\n except :\n continue\n if mol.shown() == True :\n print mol.id, \" \", mol.name\n\n return None\n", "id": "11758504", "language": "Python", "matching_score": 7.104434967041016, "max_stars_count": 1, "path": "biomovie/biomovie.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport VolumeViewer\nfrom sys import stderr\nfrom time import clock\n\nfrom axes import prAxes\nimport regions; reload (regions)\nimport graph; reload(graph)\nfrom Segger import dev_menus, timing, seggerVersion, showDevTools\n\n#dev_menus = False\n#showDevTools = True\n\n\nOML = chimera.openModels.list\n\nREG_OPACITY = 0.45\n\n\ndef umsg ( txt ) :\n print txt\n status ( txt )\n\ndef status ( txt ) :\n txt = txt.rstrip('\\n')\n msg.configure(text = txt)\n msg.update_idletasks()\n\n\nclass Volume_Segmentation_Dialog ( chimera.baseDialog.ModelessDialog ):\n\n title = \"Segger (v\" + seggerVersion + \")\"\n name = \"segment map\"\n #buttons = ('Segment', 'Group', 'Ungroup', 'Options', 'Shortcuts', \"Tools\", \"Log\", \"Close\")\n buttons = ('Group', 'Ungroup', 'Options', 'Shortcuts', \"Tools\")\n help = 'https://github.com/gregdp/segger'\n\n def fillInUI(self, parent):\n\n self.group_mouse_mode = None\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n parent.columnconfigure(0, weight = 1)\n\n row = 1\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n file_menu_entries = (\n ('Open segmentation...', self.OpenSegmentation),\n ('Save segmentation', self.SaveSegmentation),\n ('Save segmentation as...', self.SaveSegmentationAs),\n (\"Save selected regions to .mrc file...\", self.WriteSelRegionsMRCFile),\n (\"Save all regions to .mrc file...\", self.WriteAllRegionsMRCFile),\n (\"Save each region to .mrc file...\", self.WriteEachRegionMRCFile),\n (\"Close segmentation\", self.CloseSeg),\n (\"Close all segmentations except displayed\", self.CloseHiddenSeg),\n (\"Close all segmentations\", self.CloseAll),\n (\"Associate Selected\", self.Associate),\n )\n\n fmenu = Hybrid.cascade_menu(menubar, 'File', file_menu_entries)\n\n import attributes\n regions_menu_entries = (\n 'separator',\n (\"Show all\", self.RegSurfsShowAll),\n (\"Show only selected\", self.RegSurfsShowOnlySelected),\n (\"Show adjacent\", self.RegSurfsShowAdjacent),\n ('Show grouping', self.ShowUngroupedSurfaces),\n ('Unshow grouping', self.ShowGroupSurfaces),\n (\"Hide\", self.RegSurfsHide),\n (\"Make transparent\", self.RegSurfsTransparent),\n (\"Make opaque\", self.RegSurfsOpaque),\n ('Color density map', self.ColorDensity),\n 'separator',\n ('Select groups', self.SelectGroups),\n ('Select boundary regions', self.SelectBoundaryRegions),\n (\"Invert selection\", self.Invert),\n (\"Regions overlapping current selection\", self.Overlapping),\n 'separator',\n #(\"Group selected\", self.JoinSelRegs),\n #(\"Ungroup selected\", self.UngroupSelRegs),\n #(\"Smooth and group\", self.SmoothAndGroupOneStep),\n (\"Delete selected regions\", self.DelSelRegs),\n (\"Delete all except selected\", self.DelExcSelRegs),\n \"separator\",\n (\"Enclosed volume\", self.RegionsVolume),\n (\"Mean and SD\", self.RegionMeanAndSD),\n #(\"Mask map with selected\", self.MaskMapWRegions),\n #(\"Mask another map with selected (shrink map)\", self.MaskAnotherMapWRegionsShrink),\n #(\"Mask another map with selected (keep map dimensions)\", self.MaskAnotherMapWRegions),\n #(\"Extract densities...\", self.ExtractDensities),\n (\"Subtract selected from map\", self.SubtractRegionsFromMap),\n (\"Show axes for selected\", self.ShowRegionAxesSelected),\n (\"Hide all axes\", self.HideRegionAxes),\n \"separator\",\n (\"Attributes table...\", attributes.show_region_attributes_dialog),\n (\"How many sub-regions\", self.ShowNumSubRegs)\n )\n\n self.regsVisMode = Tkinter.StringVar()\n self.regsVisMode.set ( 'Voxel_Surfaces' )\n\n rmenu = Hybrid.cascade_menu(menubar, 'Regions', regions_menu_entries)\n\n if dev_menus:\n rmenu.add_separator()\n for lbl, var, val, cmd in (\n (\"Surfaces around voxels\", self.regsVisMode,\n 'Voxel_Surfaces', self.RegsDispUpdate),\n (\"Map iso-surfaces\", self.regsVisMode,\n 'Iso_Surfaces', self.RegsDispUpdate),\n #(\" - delete files\", self.deleteCloseFiles, 1, None),\n ):\n rmenu.add_radiobutton(label = lbl, variable = var, value = val,\n command = cmd)\n #rmenu.add_separator()\n #for lbl, cmd in (#(\"Adjacency graph\",self.RegionsAdjGraph),\n #(\"Group connected\", self.GroupConnectedRegs),\n #(\"Select non-placed\", self.SelectNonPlacedRegions),\n #(\"Apply threshold\", self.RegsDispThr),\n #(\"Group connected\", self.GroupConnectedRegs),\n #(\"Group by contacts\", self.GroupByContacts),\n #(\"Group using all fits\", self.GroupUsingFits),\n #(\"Ungroup ALL\", self.UngroupAllRegs),\n #(\"Reduce map\", self.ReduceMap),\n #(\"Close\", self.CloseRegions),\n #):\n #rmenu.add_command(label = lbl, command = cmd)\n #self.deleteCloseFiles = Tkinter.IntVar()\n #self.deleteCloseFiles.set ( 1 )\n\n #rmenu.add_separator()\n #for lbl, cmd in (\n # (\"Mask map with selected (Cube Result)\", self.MaskMapWRegionsCube),\n # (\"Extract map cube around selected\", self.ExtractMapWRegionsCube),\n # ):\n # rmenu.add_command(label = lbl, command = cmd)\n\n if dev_menus:\n graph_menu_entries = (\n #('Save graph', self.SaveGraph),\n #('Load graph', self.LoadGraph),\n #'separator',\n ('Create graph with uniform link radii', self.Graph),\n #(' - Use maximum density between regions', self.GraphMaxD),\n #(' - Use average density between regions', self.GraphAvgD),\n (' - Use number of contacting voxel pairs', self.GraphN),\n 'separator',\n ('Close graph', self.CloseGraph),\n #('Break selected links', graph.break_selected_links),\n #('Link selected', graph.link_selected),\n #('Group regions using skeleton', self.GroupBySkeleton),\n #('Join selected', self.GroupBySkeleton)\n )\n smenu = Hybrid.cascade_menu(menubar, 'Graph',\n graph_menu_entries)\n\n\n if 1 :\n\n #menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n #tw.config(menu = menubar)\n\n file_menu_entries = (\n ('Extract', self.ExtractDensities),\n ('SegFit', self.FitDialog),\n ('rSeg - Radial Segmentation', self.RSeg),\n (\"iSeg - Icosahedral Segmentation\", self.ISeg),\n (\"ProMod - Probabilistic Modeling\", self.ProMod)\n #(\"BioMovie\", self.BioMovie)\n )\n\n fmenu = Hybrid.cascade_menu(menubar, 'Tools', file_menu_entries)\n\n\n self.UseAllMods = Tkinter.IntVar()\n self.UseAllMods.set ( 0 )\n\n from chimera.tkgui import aquaMenuBar\n aquaMenuBar(menubar, parent, row = 0, columnspan=3)\n\n #umsg ( '')\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n row += 1\n\n l = Tkinter.Label(f, text=' Map:')\n l.grid(column=0, row=0, sticky='w')\n\n self.cur_dmap = None\n self.dmap = Tkinter.StringVar(parent)\n\n self.mb = Tkinter.Menubutton ( f, textvariable=self.dmap, relief=Tkinter.RAISED )\n self.mb.grid (column=1, row=0, sticky='we', padx=2)\n self.mb.menu = Tkinter.Menu ( self.mb, tearoff=0, postcommand=self.MapMenu )\n self.mb[\"menu\"] = self.mb.menu\n\n if 1:\n b = Tkinter.Button(f, text=\"Center\", command=self.MapCOM)\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Segment\", command=self.Segment)\n b.grid (column=7, row=0, sticky='w', padx=0)\n\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n row += 1\n\n l = Tkinter.Label(f, text=' Seg:')\n l.grid(column=0, row=0, sticky='w')\n\n self.cur_seg = None\n self.regions_file = Tkinter.StringVar(parent)\n\n rmb = Tkinter.Menubutton ( f, textvariable=self.regions_file,\n relief=Tkinter.RAISED )\n rmb.grid (column=1, row=0, sticky='we', padx=2)\n rmb.menu = Tkinter.Menu ( rmb, tearoff=0,\n postcommand=self.FillSegmentationMenu )\n self.mbSegmentationMenu = rmb.menu\n rmb[\"menu\"] = rmb.menu\n\n rc = Tkinter.Label(f, text='')\n rc.grid(column=2, row=0, sticky='w')\n self.regionCount = rc\n\n #b = Tkinter.Button(f, text=\"Group\", command=self.Group)\n #b.grid (column=7, row=0, sticky='w', padx=0)\n\n #b = Tkinter.Button(f, text=\"Ungroup\", command=self.Ungroup)\n #b.grid (column=8, row=0, sticky='w', padx=0)\n\n\n\n\n if dev_menus:\n cp = Hybrid.Popup_Panel(parent)\n cpf = cp.frame\n cpf.grid(row = row, column = 0, sticky = 'news')\n cpf.grid_remove()\n cpf.columnconfigure(0, weight=1)\n self.contactsPanel = cp.panel_shown_variable\n row += 1\n orow = 0\n\n #cb = cp.make_close_button(cpf)\n #cb.grid(row = orow, column = 0, sticky = 'e')\n\n l = Tkinter.Label(cpf, text='Contact Grouping', font = 'TkCaptionFont')\n l.grid(column=0, row=orow, sticky='w', pady=5)\n orow += 1\n\n s = Hybrid.Scale(cpf, 'Coloring level ', 1, 100, 1, 100)\n s.frame.grid(row = orow, column = 0, sticky = 'ew')\n orow += 1\n self.colorLevel = s\n s.callback(self.SetColorLevel)\n\n b = Tkinter.Button(cpf, text = 'Set Grouping',\n command = self.SetContactGrouping)\n b.grid(row = orow, column = 0, sticky = 'w')\n orow += 1\n\n\n\n # --- Options Frame ----------------------------------------------------------------------\n\n op = Hybrid.Popup_Panel(parent)\n opf = op.frame\n opf.grid(row = row, column = 0, sticky = 'news')\n opf.grid_remove()\n opf.columnconfigure(0, weight=1)\n self.optionsPanel = op.panel_shown_variable\n row += 1\n orow = 0\n\n dummyFrame = Tkinter.Frame(opf, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=orow,column=0,columnspan=7, pady=1, sticky='we')\n orow += 1\n\n #cb = op.make_close_button(opf)\n #cb.grid(row = orow, column = 1, sticky = 'e')\n\n #l = Tkinter.Label(opf, text='Segmenting Options', font = 'TkCaptionFont')\n #l.grid(column=0, row=orow, sticky='w', pady=5)\n #orow += 1\n\n sopt = Tkinter.Frame(opf)\n sopt.grid(column=0, row=orow, sticky='ew', padx=10)\n orow += 1\n\n sorow = 0\n\n\n if 1 :\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n l = Tkinter.Label(f, text='Display at most')\n l.grid(column=0, row=0, sticky='w')\n\n self.maxNumRegions = Tkinter.StringVar(sopt)\n self.maxNumRegions.set ( '6000' )\n e = Tkinter.Entry(f, width=5, textvariable=self.maxNumRegions)\n e.grid(column=1, row=0, sticky='w', padx=2)\n e.bind('<KeyPress-Return>', self.NewMaxRegions)\n\n l = Tkinter.Label(f, text='regions, granularity')\n l.grid(column=2, row=0, sticky='w')\n\n\n self.surfaceGranularity = Tkinter.StringVar(sopt)\n self.surfaceGranularity.set ( '1' )\n e = Tkinter.Entry(f, width=5, textvariable=self.surfaceGranularity)\n e.bind('<KeyPress-Return>', self.NewSurfaceResolution)\n e.grid(column=3, row=0, sticky='w', padx=2)\n\n l = Tkinter.Label(f, text='voxels')\n l.grid(column=4, row=0, sticky='w')\n\n\n\n if 1 :\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n l = Tkinter.Label(f, text=\"Keep regions having >\")\n l.grid(column=0, row=0, sticky='w')\n\n self.minRegionSize = Tkinter.StringVar(parent)\n self.minRegionSize.set ( '1' )\n e = Tkinter.Entry(f, width=5, textvariable=self.minRegionSize)\n e.grid(column=1, row=0, sticky='w', padx=2)\n e.bind('<KeyPress-Return>', lambda e: self.RemoveSmallRegions())\n\n l = Tkinter.Label(f, text='voxels')\n l.grid(column=2, row=0, sticky='w')\n\n l = Tkinter.Label(f, text=', ')\n l.grid(column=4, row=0, sticky='w')\n\n self.minContactSize = Tkinter.StringVar(parent)\n self.minContactSize.set ( '0' )\n e = Tkinter.Entry(f, width=5, textvariable=self.minContactSize)\n e.grid(column=5, row=0, sticky='w', padx=2)\n e.bind('<KeyPress-Return>', lambda e: self.RemoveContactRegions())\n\n l = Tkinter.Label(f, text='contact voxels')\n l.grid(column=6, row=0, sticky='w')\n\n\n if 1 :\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n self.groupMode = Tkinter.StringVar()\n self.groupMode.set ( 'cons' )\n\n\n c = Tkinter.Radiobutton(f, text=\"Group by smoothing\", variable=self.groupMode, value = 'smooth')\n c.grid (column=0, row=0, sticky='w')\n\n self.numSteps = Tkinter.StringVar(sopt)\n self.numSteps.set ( '4' )\n e = Tkinter.Entry(f, width=5, textvariable=self.numSteps)\n e.grid(column=1, row=0, sticky='w', padx=2)\n\n l = Tkinter.Label(f, text='steps size')\n l.grid(column=2, row=0, sticky='w')\n\n self.stepSize = ss = Tkinter.StringVar(sopt)\n ss.set('1')\n e = Tkinter.Entry(f, width=2, textvariable=self.stepSize)\n e.grid(column=3, row=0, sticky='w', padx=2)\n\n l = Tkinter.Label(f, text=', stop at')\n l.grid(column=4, row=0, sticky='w')\n\n self.targNRegions = Tkinter.StringVar(sopt)\n self.targNRegions.set ( '1' )\n e = Tkinter.Entry(f, width=2, textvariable=self.targNRegions)\n e.grid(column=5, row=0, sticky='w', padx=2)\n\n l = Tkinter.Label(f, text='regions')\n l.grid(column=6, row=0, sticky='w')\n\n\n if 1 :\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n c = Tkinter.Radiobutton(f, text=\"Group by connections\", variable=self.groupMode, value = 'cons')\n c.grid (column=0, row=0, sticky='w')\n\n self.numStepsCon = Tkinter.StringVar(sopt)\n self.numStepsCon.set ( '0' )\n e = Tkinter.Entry(f, width=5, textvariable=self.numStepsCon)\n e.grid(column=1, row=0, sticky='w', padx=2)\n\n l = Tkinter.Label(f, text='steps, stop at')\n l.grid(column=4, row=0, sticky='w')\n\n self.targNRegionsCon = Tkinter.StringVar(sopt)\n self.targNRegionsCon.set ( '1' )\n e = Tkinter.Entry(f, width=2, textvariable=self.targNRegionsCon)\n e.grid(column=5, row=0, sticky='w', padx=2)\n\n l = Tkinter.Label(f, text='regions')\n l.grid(column=6, row=0, sticky='w')\n\n #oft = Hybrid.Checkbutton(f, 'only visible', False )\n #oft.button.grid(column=7, row=0, sticky='w')\n #self.groupByConsOnlyVis = oft.variable\n\n\n if 0:\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n l = Tkinter.Label(f, text='Minimum connecting voxels ')\n l.grid(column=0, row=0, sticky='w')\n\n self.minConnection = Tkinter.StringVar(sopt)\n self.minConnection.set ( '0' )\n e = Tkinter.Entry(f, width=5, textvariable=self.minConnection)\n e.grid(column=1, row=0, sticky='w', padx=2)\n\n\n mmf = Tkinter.Frame(sopt)\n mmf.grid(row = sorow, column = 0, sticky = 'ew')\n sorow += 1\n\n mg = Hybrid.Checkbutton(mmf, 'Group with mouse ', False)\n mg.button.grid(row = 0, column = 0, sticky = 'w')\n self.mouse_group = mg.variable\n mg.callback(self.mouse_group_cb)\n\n mgb = Hybrid.Option_Menu(mmf, '', 'button 1', 'button 2',\n 'button 3', 'ctrl button 1',\n 'ctrl button 2', 'ctrl button 3')\n mgb.variable.set('button 3')\n mgb.frame.grid(row = 0, column = 1, sticky = 'w')\n mgb.add_callback(self.mouse_group_button_cb)\n self.mouse_group_button = mgb\n\n\n\n if 1 :\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n\n oft = Hybrid.Checkbutton(f, '', False )\n #oft.button.grid(row = 0, column = 0, sticky = 'w')\n self.useSymmetry = oft.variable\n oft.button.grid(column=0, row=0, sticky='w')\n\n l = Tkinter.Label(f, text='Symm:')\n l.grid(column=1, row=0, sticky='w')\n\n self.symmetryString = Tkinter.StringVar(f)\n e = Tkinter.Entry(f, width=7, textvariable=self.symmetryString)\n e.grid(column=2, row=0, sticky='w', padx=0)\n\n b = Tkinter.Button(f, text=\"Get\", command=self.DetectSym)\n b.grid (column=3, row=0, sticky='w', padx=0)\n\n b = Tkinter.Button(f, text=\"Sel>\", command=self.ShowSelSymm)\n b.grid (column=4, row=0, sticky='w', padx=0)\n\n l = Tkinter.Label(f, text='Color:')\n l.grid(column=5, row=0, sticky='w')\n\n b = Tkinter.Button(f, text=\"Same\", command=self.ColorSymmSame)\n b.grid (column=6, row=0, sticky='w', padx=0)\n\n b = Tkinter.Button(f, text=\"Diff\", command=self.ColorSymmDiff)\n b.grid (column=7, row=0, sticky='w', padx=0)\n\n #sorow += 1\n\n\n\n # --- Shortcuts Frame ----------------------------------------------------------------------\n\n sc = Hybrid.Popup_Panel(parent)\n scf = sc.frame\n scf.grid(row = row, column = 0, sticky = 'news')\n scf.grid_remove()\n scf.columnconfigure(0, weight=1)\n self.shortcutsPanelShownVar = sc.panel_shown_variable\n row += 1\n orow = 0\n\n dummyFrame = Tkinter.Frame(scf, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=orow,column=0,columnspan=3, pady=1, sticky='we')\n orow += 1\n\n #cb = sc.make_close_button(scf)\n #cb.grid(row = orow, column = 1, sticky = 'e')\n\n #l = Tkinter.Label(scf, text='Shortcuts - Regions', font = 'TkCaptionFont')\n #l.grid(column=0, row=orow, sticky='w', pady=5)\n #orow += 1\n\n sopt = Tkinter.Frame(scf)\n sopt.grid(column=0, row=orow, sticky='ew', padx=10)\n orow += 1\n sorow = 0\n\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n if 1 :\n l = Tkinter.Label(f, text='Show: ', width=7, anchor=Tkinter.E)\n l.grid(column=0, row=0, sticky='w')\n\n b = Tkinter.Button(f, text=\"None\", command=self.RegSurfsShowNone)\n b.grid (column=1, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"All\", command=self.RegSurfsShowAll)\n b.grid (column=2, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Sel\", command=self.RegSurfsShowOnlySelected)\n b.grid (column=3, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Adj\", command=self.RegSurfsShowAdjacent)\n b.grid (column=4, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Not-\", command=self.RegSurfsShowNotGrouped)\n b.grid (column=5, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Grouped\", command=self.RegSurfsShowGrouped)\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n\n if 0 :\n b = Tkinter.Button(f, text=\"Axes\", command=self.ShowRegionAxesSelected)\n b.grid (column=4, row=0, sticky='w', padx=2)\n\n self.axesFactor = Tkinter.StringVar(f)\n self.axesFactor.set ( \"3\" )\n e = Tkinter.Entry(f, width=4, textvariable=self.axesFactor)\n e.grid(column=5, row=0, sticky='w', padx=2)\n\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n if 1 :\n b = Tkinter.Label(f, text=\"Select: \", width=7, anchor=Tkinter.E)\n b.grid (column=0, row=0, sticky='w', padx=0)\n\n self.overlappingPercentage = Tkinter.StringVar(f)\n self.overlappingPercentage.set ( \"50\" )\n #e = Tkinter.Entry(f, width=4, textvariable=self.overlappingPercentage)\n #e.grid(column=1, row=0, sticky='w', padx=2)\n #l = Tkinter.Label(f, text=\"%\")\n #l.grid(column=2, row=0, sticky='w')\n\n\n b = Tkinter.Button(f, text=\"All\", command=self.SelectAllRegions)\n b.grid (column=1, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Over Sel\", command=self.Overlapping)\n b.grid (column=2, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Flip\", command=self.Invert)\n b.grid (column=3, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Not-\", command=self.SelectNotGrouped)\n b.grid (column=4, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Grouped\", command=self.SelectGrouped)\n b.grid (column=5, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Vis\", command=self.SelectVisible)\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n if 1 :\n l = Tkinter.Label(f, text='Selected: ', width=7, anchor=Tkinter.E)\n l.grid(column=0, row=0)\n\n #b = Tkinter.Button(f, text=\"Group\", command=self.Group)\n #b.grid (column=1, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Ungroup\", command=self.Ungroup)\n #b.grid (column=2, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"H\", command=self.RegSurfsHide)\n b.grid (column=1, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"S\", command=self.RegSurfsShow)\n b.grid (column=2, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Del\", command=self.DelSelRegs)\n b.grid (column=3, row=0, sticky='w', padx=2)\n\n\n #f = Tkinter.Frame(sopt)\n #f.grid(column=0, row=sorow, sticky='w')\n #sorow += 1\n #if 1 :\n #l = Tkinter.Label(f, text=' ', width=15)\n #l.grid(column=0, row=0, sticky='w')\n\n b = Tkinter.Button(f, text=\"Tr\", command=self.RegSurfsTransparent)\n b.grid (column=4, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Opq\", command=self.RegSurfsOpaque)\n b.grid (column=5, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Mesh\", command=self.RegSurfsMesh)\n #b.grid (column=6, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Un-\", command=self.Ungroup)\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Group\", command=self.Group)\n b.grid (column=7, row=0, sticky='w', padx=2)\n\n\n #b = Tkinter.Button(f, text=\"Invert Selection\", command=self.Invert)\n #b.grid (column=3, row=0, sticky='w', padx=2)\n\n\n\n # ---------- end of shortcuts frame ------------------------------------------------------\n\n\n\n # --- Tools Frame ----------------------------------------------------------------------\n\n sc = Hybrid.Popup_Panel(parent)\n scf = sc.frame\n scf.grid(row = row, column = 0, sticky = 'news')\n scf.grid_remove()\n scf.columnconfigure(0, weight=1)\n self.toolsPanelShownVar = sc.panel_shown_variable\n row += 1\n orow = 0\n\n dummyFrame = Tkinter.Frame(scf, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=orow,column=0,columnspan=7, pady=1, sticky='we')\n orow += 1\n\n #cb = sc.make_close_button(scf)\n #cb.grid(row = orow, column = 1, sticky = 'e')\n\n #l = Tkinter.Label(scf, text=' Tools', font = 'TkCaptionFont')\n #l.grid(column=0, row=orow, sticky='w', pady=1)\n #orow += 1\n\n sopt = scf\n #sopt = Tkinter.Frame(scf)\n #sopt.grid(column=0, row=orow, sticky='ew', padx=10)\n #orow += 1\n sorow = orow\n\n\n if 1 :\n # flat, groove, raised, ridge, solid, or sunken\n dummyFrame = Tkinter.Frame(sopt, relief='flat', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=sorow,column=0,columnspan=3, pady=3, sticky='we')\n #sorow += 1\n\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n l = Tkinter.Label(f, text=' ', width=1, anchor=Tkinter.E)\n l.grid(column=0, row=0)\n\n b = Tkinter.Button(f, text=\"Extract\", command=self.ExtractDensities)\n b.grid (column=1, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"SegFit\", command=self.FitDialog)\n b.grid (column=2, row=0, sticky='w', padx=2)\n\n if dev_menus :\n b = Tkinter.Button(f, text=\"SegMod\", command=self.SegMod)\n b.grid (column=3, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"SWIM\", command=self.SWIM)\n b.grid (column=4, row=0, sticky='w', padx=2)\n\n if not dev_menus :\n #b = Tkinter.Button(f, text=\"rSeg\", command=self.RSeg)\n #b.grid (column=5, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"iSeg\", command=self.ISeg)\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"SegLoop\", command=self.SegLoop)\n #b.grid (column=5, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"ProMod\", command=self.ProMod)\n b.grid (column=7, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"ModelZ\", command=self.ModelZ)\n #b.grid (column=7, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"MapQ\", command=self.MapQ)\n b.grid (column=8, row=0, sticky='w', padx=2)\n\n if dev_menus :\n b = Tkinter.Button(f, text=\"BioMovie\", command=self.BioMovie)\n b.grid (column=9, row=0, sticky='w', padx=2)\n\n\n #b = Tkinter.Button(f, text=\"Frk\", command=self.Frankensteinify)\n #b.grid (column=9, row=0, sticky='w', padx=2)\n\n\n\n if showDevTools or dev_menus :\n f = Tkinter.Frame(sopt)\n f.grid(column=0, row=sorow, sticky='w')\n sorow += 1\n\n l = Tkinter.Label(f, text=' ', width=1, anchor=Tkinter.E)\n l.grid(column=0, row=0)\n\n #b = Tkinter.Button(f, text=\"GeoSeg\", command=self.GeoSegDialog)\n #b.grid (column=1, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"SSE\", command=self.SSE)\n #b.grid (column=2, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Animate\", command=self.Animate)\n #b.grid (column=3, row=0, sticky='w', padx=2)\n\n\n b = Tkinter.Button(f, text=\"FlexFit\", command=self.FlexFit)\n b.grid (column=5, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"Geo\", command=self.GeoSeg)\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"MDFF\", command=self.MDFF)\n b.grid (column=9, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Pi\", command=self.PiFold)\n #b.grid (column=10, row=0, sticky='w', padx=2)\n\n\n #f = Tkinter.Frame(sopt)\n #f.grid(column=0, row=sorow, sticky='w')\n #sorow += 1\n\n #l = Tkinter.Label(f, text=' ', width=1, anchor=Tkinter.E)\n #l.grid(column=0, row=0)\n\n b = Tkinter.Button(f, text=\"Mod\", command=self.SegMod0)\n b.grid (column=10, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"NA\", command=self.SegNA)\n b.grid (column=11, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Ar\", command=self.Ar)\n #b.grid (column=3, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(f, text=\"VR\", command=self.Vr)\n b.grid (column=12, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Mono\", command=self.CamMono)\n #b.grid (column=13, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"SBS\", command=self.CamSBS)\n #b.grid (column=14, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(f, text=\"Spr\", command=self.Spr)\n #b.grid (column=7, row=0, sticky='w', padx=2)\n\n\n # ---------- end of tools frame ------------------------------------------------------\n\n\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=3, pady=7, sticky='we')\n\n\n row += 1\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n row += 1\n\n l = Tkinter.Label(f, text='To cite Segger or learn more about it press the Help button', fg=\"blue\")\n l.grid(column=0, row=0, sticky='w')\n\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=3, pady=3, sticky='we')\n row += 1\n\n global msg\n msg = Tkinter.Label(parent, width = 20, anchor = 'w', justify = 'left', fg=\"red\")\n msg.grid(column=0, row=row, sticky='ew')\n self.msg = msg\n\n umsg ( 'Select an open density map in the field above and press Segment!' )\n row += 1\n\n vlist = VolumeViewer.volume_list()\n if vlist:\n self.SetMapMenu(vlist[0])\n\n for m in regions.segmentations() :\n\n v = m.volume_data()\n if v and m.display :\n self.SetCurrentSegmentation ( m )\n try : self.SetMapMenu ( v )\n except : pass\n\n chimera.openModels.addRemoveHandler(self.ModelClosed, None)\n\n if dev_menus :\n self.optionsPanel.set(True)\n self.shortcutsPanelShownVar.set(True)\n self.toolsPanelShownVar.set(True)\n\n\n def SetColorLevel(self):\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n s = self.colorLevel\n lev = int(s.value())\n\n regions = [r for r in smod.all_regions()\n if hasattr(r, 'color_level') and r.color_level >= lev and\n (r.preg is None or r.preg.color_level < lev)]\n smod.color_density(regions)\n return\n\n # TODO: Unused code adjusts region surface colors.\n\n if not hasattr(smod, 'contact_grouping'):\n cg = smod.contact_grouping = regions.contact_grouping(smod)\n smod.region_count = len(smod.childless_regions())\n cg = smod.contact_grouping\n range = (smod.region_count - len(cg), smod.region_count)\n if s.range() != range:\n s.set_range(range[0], range[1], step = 1)\n p = max(0, smod.region_count - int(s.value()))\n\n cs, pairs = regions.connected_subsets(cg[:p])\n\n # Reset colors\n for r in smod.regions:\n sp = r.surface_piece\n if sp:\n sp.color = r.color\n\n # Color groups\n for rlist in cs:\n r0 = rlist[0]\n sp0 = r0.surface_piece\n if sp0:\n c = sp0.color\n for r in rlist[1:]:\n sp = r.surface_piece\n if sp:\n sp.color = c\n\n def SetColorLevelRange(self):\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n clevels = [r.color_level for r in smod.all_regions()\n if hasattr(r, 'color_level')]\n clmin = min(clevels)\n clmax = max(clevels)\n cl = self.colorLevel\n cl.set_range(clmin, clmax, step = 1)\n cl.set_value(clmin)\n\n def SetContactGrouping(self):\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n s = self.colorLevel\n lev = int(s.value())\n\n regions = [r for r in smod.all_regions()\n if hasattr(r, 'color_level') and r.color_level < lev]\n smod.remove_regions(regions, update_surfaces = True)\n self.RegsDispUpdate()\n\n\n def ColorDensity(self):\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n smod.color_density()\n\n\n\n\n def Options(self) :\n self.optionsPanel.set (not self.optionsPanel.get())\n\n def Shortcuts (self) :\n print \"shortcuts\"\n self.shortcutsPanelShownVar.set ( not self.shortcutsPanelShownVar.get() )\n\n def Tools (self) :\n print \"tools\"\n self.toolsPanelShownVar.set ( not self.toolsPanelShownVar.get() )\n\n def Log ( self ) :\n import Idle\n Idle.start_shell()\n\n\n def RSeg ( self ) :\n import Segger.rseg_dialog\n reload ( Segger.rseg_dialog )\n Segger.rseg_dialog.show_dialog()\n\n def ISeg ( self ) :\n import Segger.iseg_dialog\n reload ( Segger.iseg_dialog )\n Segger.iseg_dialog.show_dialog()\n\n def SSE ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.sse_dialog\n reload ( Segger.sse_dialog )\n Segger.sse_dialog.show_sse_dialog()\n\n def SegLoop ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.segloop_dialog\n reload ( Segger.segloop_dialog )\n Segger.segloop_dialog.show_dialog()\n\n def SegMod0 ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.segmod_dialog\n reload ( Segger.segmod_dialog )\n Segger.segmod_dialog.show_dialog()\n\n def SegNA ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.segna_dialog\n reload ( Segger.segna_dialog )\n Segger.segna_dialog.show_dialog()\n\n\n def Ar ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.ar_dialog\n reload ( Segger.ar_dialog )\n Segger.ar_dialog.show_dialog()\n\n\n def Spr ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.spr_dialog\n reload ( Segger.spr_dialog )\n Segger.spr_dialog.show_dialog()\n\n\n def Vr ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.vr_dialog\n reload ( Segger.vr_dialog )\n Segger.vr_dialog.show_dialog()\n\n\n def CamMono ( self ) :\n chimera.viewer.camera.setMode ( \"mono\" )\n\n def CamSBS ( self ) :\n chimera.viewer.camera.setMode ( \"DTI side-by-side stereo\" )\n\n\n def ProMod ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.promod_dialog\n reload ( Segger.promod_dialog )\n Segger.promod_dialog.show_dialog()\n\n\n def ModelZ ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.modelz\n reload ( Segger.modelz )\n Segger.modelz.show_dialog()\n\n def MapQ ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.mapq\n reload ( Segger.mapq )\n Segger.mapq.show_dialog()\n\n\n\n def BioMovie ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.biomovie\n reload ( Segger.biomovie )\n Segger.biomovie.show_dialog()\n\n\n def SWIM ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.SWIM\n reload ( Segger.SWIM )\n Segger.SWIM.show_dialog()\n\n def SegMod ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.SegMod\n reload ( Segger.SegMod )\n Segger.SegMod.show_dialog()\n\n def MDFF ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.mdff_dialog\n reload ( Segger.mdff_dialog )\n Segger.mdff_dialog.show_dialog()\n\n def PiFold ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.PiFold\n reload ( Segger.PiFold )\n Segger.PiFold.show_dialog()\n\n def Animate ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.animate_dialog\n reload ( Segger.animate_dialog )\n Segger.animate_dialog.close_animate_dialog ()\n Segger.animate_dialog.show_dialog()\n\n def FlexFit ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.flexfit_dialog\n reload ( Segger.flexfit_dialog )\n Segger.flexfit_dialog.show_dialog()\n\n\n def Tomolog ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.tomolog_dialog\n reload ( Segger.tomolog_dialog )\n Segger.tomolog_dialog.show_dialog()\n\n def GeoSeg ( self ) :\n # self.ssePanelShownVar.set ( not self.ssePanelShownVar.get() )\n import Segger.geoseg_dialog\n reload ( Segger.geoseg_dialog )\n Segger.geoseg_dialog.show_dialog()\n\n\n def MapCOM ( self ) :\n\n dmap = self.SegmentationMap()\n\n import axes\n pts, weights = axes.map_points ( dmap )\n if len(pts) == 0 :\n print \" - no pts at this threshold?\"\n return\n\n COM, U, S, V = axes.prAxes ( pts )\n print \"com:\", COM\n\n #chimera.viewer.camera.center = chimera.Point ( COM[0], COM[1], COM[2] )\n #xf = chimera.Xform.translation ( chimera.Vector( -COM[0], -COM[1], -COM[2] ) )\n #dmap.openState.xform = xf\n\n p = chimera.Point ( COM[0], COM[1], COM[2] )\n chimera.openModels.cofr = dmap.openState.xform.apply ( p )\n\n moveCam = 1\n if moveCam :\n p0 = numpy.array ( chimera.viewer.camera.center )\n p1 = numpy.array ( chimera.openModels.cofr )\n for i in range (10) :\n f = float(i) / 9.0\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n P = p0 * f1 + p1 * f2\n chimera.viewer.camera.center = (P[0],P[1],P[2])\n print \".\",\n print \"\"\n\n\n\n\n def OpenSegmentation(self):\n\n dmap = self.SegmentationMap()\n dir = os.path.dirname(dmap.data.path) if dmap else None\n import segfile\n segfile.show_open_dialog(dir, self.OpenSegFiles)\n\n\n\n def OpenSegFiles(self, paths_and_types, open = True):\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Loading segmentation', modal = True)\n smods = []\n try:\n import segfile\n reload (segfile)\n for path, ftype in paths_and_types:\n if ftype == 'Segmentation':\n try:\n smod = segfile.read_segmentation(path, open, task)\n except CancelOperation:\n break\n elif ftype == 'Old regions file':\n dmap = self.SegmentationMap()\n if dmap is None:\n from chimera.replyobj import error\n from os.path import basename\n error('Segmentation map must be open before opening old-style segmentation file\\n\\n\\t%s\\n\\nbecause file does not contain grid size and spacing.' % basename(path))\n return\n import regionsfile\n smod = regionsfile.ReadRegionsFile ( path, dmap )\n smods.append(smod)\n\n if len(smods) == 0:\n umsg ( \"No segmentation was loaded.\" )\n return\n\n for smod in smods:\n smod.open_map()\n\n # TODO: Can't control whether marker model is opened.\n smod = smods[-1]\n self.SetCurrentSegmentation(smod)\n v = smod.volume_data()\n if v:\n self.SetMapMenu(v)\n else :\n umsg ( \"Volume data not found\" )\n try:\n self.RegsDispUpdate (task)\n except CancelOperation:\n pass\n\n finally:\n task.finished()\n\n for s in smods:\n mname = os.path.basename(getattr(s, 'map_path', 'unknown'))\n umsg('Opened segmentation %s of map %s, grid size (%d,%d,%d)'\n % ((s.name, mname) + tuple(s.grid_size())))\n\n\n return smods\n\n\n def SaveSegmentation(self):\n\n smod = self.CurrentSegmentation()\n if smod:\n\n map = smod.volume_data()\n if map == None :\n umsg ( \"Map not found - please associate a map first\" )\n return\n\n if hasattr(smod, 'path') and smod.path:\n import segfile\n segfile.write_segmentation(smod, smod.path)\n umsg ( \"Saved\" )\n\n else:\n self.SaveSegmentationAs()\n else :\n umsg ( \"No segmentation selected\" )\n\n\n\n def SaveSegmentationAs(self):\n\n smod = self.CurrentSegmentation()\n if smod:\n import segfile\n segfile.show_save_dialog(smod, self.path_changed_cb)\n\n def path_changed_cb(self, seg):\n\n if seg is self.CurrentSegmentation():\n seg.name = os.path.basename(seg.path)\n self.regions_file.set(seg.name)\n\n def ModelClosed(self, trigger, n, mlist):\n\n # Clear menus that are showing closed models.\n if self.cur_dmap in mlist:\n self.SetMapMenu(None)\n if self.cur_seg in mlist:\n self.cur_seg = None\n self.regions_file.set('')\n\n def MapMenu ( self ) :\n\n self.mb.menu.delete ( 0, 'end' ) # Clear menu\n from VolumeViewer import Volume\n mlist = OML(modelTypes = [Volume])\n for m in mlist :\n self.mb.menu.add_radiobutton ( label=\"%s (%d)\"%(m.name, m.id), variable=self.dmap,\n command=lambda m=m: self.MapSelected(m) )\n\n def SetMapMenu (self, dmap):\n\n if dmap == None :\n self.dmap.set('')\n else :\n self.dmap.set( \"%s (%d)\" % (dmap.name, dmap.id) )\n self.cur_dmap = dmap\n #print \"Set map menu to \", dmap.name\n\n def MapSelected ( self, dmap ) :\n\n self.cur_dmap = dmap\n #if dmap:\n # dmap.display = True\n\n def SegmentationMap(self):\n\n return self.cur_dmap\n\n def FillSegmentationMenu ( self ) :\n\n menu = self.mbSegmentationMenu\n menu.delete ( 0, 'end' ) # Clear menu\n\n #open_names = [(m.name, m) for m in regions.segmentations()]\n open_names = []\n for m in chimera.openModels.list() :\n #print m.name\n if os.path.splitext (m.name)[1] == \".seg\" :\n open_names.append ( (m.name, m) )\n\n\n if len(open_names ) > 0 :\n menu.add_radiobutton ( label=\"Open regions files:\" )\n menu.add_separator()\n open_names.sort()\n for name, smod in open_names:\n menu.add_radiobutton (label= \"%s (%d)\"%(name, smod.id), variable=self.regions_file,\n command=lambda smod=smod: self.RFileSelected(smod) )\n\n smm = self.SegmentationMap()\n if smm == None :\n self.SetCurrentSegmentation(None)\n self.SetMapMenu(None)\n return\n\n path = os.path.dirname ( smm.data.path ) + os.path.sep\n bname = os.path.splitext ( smm.name ) [0]\n\n files = os.listdir ( path );\n names_in_path = []\n for f in files :\n if f.find ( bname ) < 0 or f.find('.seg') < 0 : continue\n if f.find ( \".txt\" ) >= 0 : continue\n if f.find ( \".mrc\" ) >= 0 : continue\n if not f in open_names :\n names_in_path.append ( f )\n\n if len ( names_in_path ) == 0 : return\n\n if len(open_names ) > 0 :\n menu.add_separator()\n\n menu.add_radiobutton ( label=\"In %s:\" % path )\n menu.add_separator()\n\n for f in names_in_path :\n menu.add_radiobutton (\n label=f, variable=self.regions_file, command=self.RFileSelected )\n\n\n\n def RFileSelected ( self, rmod = None ) :\n\n if rmod is None:\n mm = self.SegmentationMap()\n if mm == None :\n print self.dmap.get(), \"not open\";\n return\n path = os.path.dirname(mm.data.path) + os.path.sep\n rfile = self.regions_file.get()\n print \" - opening seg file \" + rfile + \" for map \" + mm.name\n rmod = self.OpenSegFiles ( [(path + rfile, 'Segmentation')] )[-1]\n else:\n rmod.display = True\n if rmod.adj_graph : rmod.adj_graph.show_model(True)\n self.ReportRegionCount(rmod)\n\t\t\t# hiding all other segmentations can be annoying sometimes\n if 1 :\n\t for m in regions.segmentations():\n\t if m != rmod:\n\t m.display = False\n\t if m.adj_graph : m.adj_graph.show_model(False)\n rfile = rmod.name\n\n self.cur_seg = rmod\n self.SetMapMenu(rmod.volume_data())\n\n umsg ( \"Showing %s - %d regions, %d surfaces\" %\n (rfile, len(rmod.regions), len(rmod.surfacePieces)) )\n\n\n def CurrentSegmentation ( self, warn = True ):\n\n if warn and self.cur_seg is None:\n umsg ( \"No segmentation chosen\" )\n return self.cur_seg\n\n def SetCurrentSegmentation ( self, smod ):\n\n self.cur_seg = smod\n self.regions_file.set(smod.name if smod else '')\n if smod:\n self.SetMapMenu(smod.volume_data())\n\n def NewSurfaceResolution( self, event = None ):\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n self.SetSurfaceGranularity(smod)\n\n def SetSurfaceGranularity ( self, smod ):\n\n g = self.surfaceGranularity.get()\n try:\n res = float(g)\n except:\n umsg ('Surface granularity \"%s\" is not a number' % g)\n return\n\n if res <= 0:\n return\n\n if smod.regions:\n from chimera import tasks, CancelOperation\n task = tasks.Task('Changing surface resolution', modal = True)\n try:\n smod.change_surface_resolution(res, task)\n except CancelOperation:\n pass\n finally:\n task.finished()\n else:\n smod.change_surface_resolution(res)\n\n\n def NewMaxRegions ( self, event = None ):\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Redisplaying regions', modal = True)\n try:\n self.RegsDispUpdate (task)\n except CancelOperation:\n pass\n finally:\n task.finished()\n\n\n def CloseHiddenSeg ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for m in regions.segmentations () :\n if not m.display:\n umsg ( \"Closed %s\" % m.name )\n m.close()\n\n\n\n def SaveRegsToMRC ( self, regs, dmap, path = None ) :\n\n segs = set([r.segmentation for r in regs])\n for s in segs:\n if tuple(s.grid_size()) != tuple(dmap.data.size):\n from chimera import replyobj\n replyobj.error('Cannot mask map.\\n\\n'\n 'Map %s grid size (%d,%d,%d) does not match '\n 'segmentation %s grid size (%d,%d,%d).'\n % ((dmap.name,) + tuple(dmap.data.size) +\n (s.name,) + tuple(s.grid_size())))\n return\n\n if path is None:\n # Show file chooser dialog.\n fprefix = os.path.splitext(dmap.name)[0]\n if len(regs) == 1 :\n fname = fprefix + \"_region_%d.mrc\" % regs[0].rid\n else :\n fname = fprefix + \"_%d_regions.mrc\" % len(regs)\n dir = os.path.dirname ( dmap.data.path )\n import OpenSave\n d = OpenSave.SaveModal ( title = \"Save Masked Map\",\n initialdir = dir, initialfile = fname,\n filters = [('MRC map', '*.mrc', '.mrc')] )\n paths_and_types = d.run ( self.toplevel_widget )\n if paths_and_types:\n path = paths_and_types[0][0]\n else:\n return\n\n (li,lj,lk), (hi,hj,hk) = regions.region_bounds(regs)\n\n bound = 2\n li = li - bound; lj = lj - bound; lk = lk - bound\n hi = hi + bound; hj = hj + bound; hk = hk + bound\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n print \"Bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )\n\n umsg ( \"Saving %d regions to mrc file...\" % len(regs) )\n\n nmat = numpy.zeros ( (n3,n2,n1), numpy.float32 )\n dmat = dmap.full_matrix()\n\n #regs_name = \"\"\n for reg in regs :\n p = reg.points()\n i,j,k = p[:,0],p[:,1],p[:,2]\n nmat[k-lk,j-lj,i-li] = dmat[k,j,i]\n\n O = dmap.data.origin\n print \"origin:\", O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n print \"new origin:\", nO\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, dmap.data.step, dmap.data.cell_angles )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n nv.name = os.path.basename ( path )\n\n nv.openState.xform = dmap.openState.xform\n\n nv.write_file ( path, \"mrc\" )\n\n if [s.volume_data() for s in segs] == [dmap]:\n umsg ( \"Wrote %s\" % ( nv.name, ) )\n else:\n umsg ( \"Masked map %s, wrote %s\" % ( dmap.name, nv.name ) )\n\n\n\n def WriteSelRegionsMRCFile ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions to save to .mrc file\" )\n return\n\n self.SaveRegsToMRC ( regs, dmap )\n\n\n def WriteAllRegionsMRCFile ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n regs = [ sp.region for sp in smod.surfacePieces\n if hasattr(sp, 'region')]\n\n self.SaveRegsToMRC ( regs, dmap )\n\n\n\n\n def WriteEachRegionMRCFile ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n regs = smod.regions\n\n # Choose file path.\n dir = os.path.dirname ( dmap.data.path )\n fprefix = os.path.splitext ( dmap.name ) [0]\n fname = fprefix + \"_region_%d.mrc\"\n import OpenSave\n d = OpenSave.SaveModal ( title = \"Save Masked Map\",\n initialdir = dir, initialfile = fname,\n filters = [('MRC map', '*.mrc', '.mrc')] )\n paths_and_types = d.run ( self.toplevel_widget )\n if paths_and_types:\n path = paths_and_types[0][0]\n else:\n return\n if not '%d' in path:\n umsg ( \"Must include '%d' in map file name for region number\" )\n return\n\n print \"Saving each of %d regions to .mrc files\" % len(regs)\n\n for reg in regs :\n self.SaveRegsToMRC ( [reg], dmap, path % (reg.rid,) )\n\n\n\n def MaskMapWRegions ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions\" )\n return\n\n nv = regions.mask_volume(regs, dmap)\n if nv is None:\n umsg ('Map size %d,%d,%d is incompatible with mask size %d,%d,%d'\n % (tuple(dmap.data.size) + tuple(smod.grid_size())))\n return\n\n return nv\n\n\n def MaskAnotherMapWRegions ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions\" )\n return\n\n points = regs[0].points().astype ( numpy.float32 )\n for r in regs[1:] :\n npoints = r.points().astype ( numpy.float32 )\n points = numpy.concatenate ( [points, npoints], axis=0 )\n\n _contour.affine_transform_vertices ( points, smod.seg_map.data.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( smod.openState.xform ) )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n sg = VolumeData.zone_masked_grid_data ( dmap.data, points, smod.seg_map.data.step[0] )\n\n try : gv = VolumeViewer.volume.add_data_set ( sg, None )\n except : gv = VolumeViewer.volume.volume_from_grid_data ( sg )\n gv.openState.xform = dmap.openState.xform\n #chimera.openModels.add ( [gv] )\n gv.name = \"Masked\"\n\n\n def ExtractDensities ( self ) :\n\n import Segger.extract_region_dialog\n reload ( Segger.extract_region_dialog )\n\n Segger.extract_region_dialog.show_extract_region_dialog()\n\n\n def Frankensteinify ( self ) :\n\n import Segger.frankensteinify\n reload ( Segger.frankensteinify )\n\n Segger.frankensteinify.show_dialog()\n\n\n def FitDialog ( self ) :\n\n import Segger.fit_dialog\n Segger.fit_dialog.close_fit_segments_dialog();\n reload(Segger.fit_dialog);\n Segger.fit_dialog.new_fit_segments_dialog()\n\n\n def GeoSegDialog ( self ) :\n\n import Segger.geoseg;\n reload(Segger.geoseg);\n Segger.geoseg.show_dialog();\n\n\n def MaskAnotherMapWRegionsShrink ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions\" )\n return\n\n points = regs[0].points().astype ( numpy.float32 )\n for r in regs[1:] :\n npoints = r.points().astype ( numpy.float32 )\n points = numpy.concatenate ( [points, npoints], axis=0 )\n\n _contour.affine_transform_vertices ( points, smod.seg_map.data.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( smod.openState.xform ) )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n sg = VolumeData.zone_masked_grid_data ( dmap.data, points, smod.seg_map.data.step[0] )\n regsm = sg.matrix()\n\n nze = numpy.nonzero ( regsm )\n\n print nze\n\n li = numpy.min ( nze[0] )\n lj = numpy.min ( nze[1] )\n lk = numpy.min ( nze[2] )\n\n hi = numpy.max ( nze[0] )\n hj = numpy.max ( nze[1] )\n hk = numpy.max ( nze[2] )\n\n bound = 2\n li = li - bound; lj = lj - bound; lk = lk - bound\n hi = hi + bound; hj = hj + bound; hk = hk + bound\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n print \"Bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )\n\n umsg ( \"Saving %d regions to mrc file...\" % len(regs) )\n\n nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n print \"map grid dim: \", numpy.shape ( dmap.full_matrix() )\n print \"masked grid dim: \", numpy.shape ( regsm )\n print \"new map grid dim: \", numpy.shape ( nmat )\n\n\n #regs_name = \"\"\n for ii in range ( len(nze[0]) ) :\n i,j,k = nze[0][ii], nze[1][ii], nze[2][ii]\n #nmat[k-lk,j-lj,i-li] = regsm[k,j,i]\n nmat[i-li,j-lj,k-lk] = regsm[i,j,k]\n\n O = dmap.data.origin\n print \"origin:\", O\n nO = ( O[0] + float(lk) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(li) * dmap.data.step[2] )\n\n print \"new origin:\", nO\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, dmap.data.step, dmap.data.cell_angles )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n nv.name = \"Masked\"\n\n nv.openState.xform = dmap.openState.xform\n\n\n def MaskMapWRegionsCube ( self ) :\n\n # thsi is useful for input to EMAN fitting procedures which\n # requires a cube map\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions\" )\n return\n\n if 0 :\n points = regs[0].points().astype ( numpy.float32 )\n for r in regs[1:] :\n npoints = r.points().astype ( numpy.float32 )\n points = numpy.concatenate ( [points, npoints], axis=0 )\n\n for rri, reg in enumerate ( regs ) :\n\n print \" ---- Region %d/%d ---- \" % (rri+1, len(regs))\n\n points = reg.points().astype ( numpy.float32 )\n\n _contour.affine_transform_vertices ( points, smod.seg_map.data.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( smod.openState.xform ) )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n sg = VolumeData.zone_masked_grid_data ( dmap.data, points, smod.seg_map.data.step[0] )\n regsm = sg.matrix()\n\n nze = numpy.nonzero ( regsm )\n\n # print nze\n\n li = numpy.min ( nze[0] )\n lj = numpy.min ( nze[1] )\n lk = numpy.min ( nze[2] )\n\n hi = numpy.max ( nze[0] )\n hj = numpy.max ( nze[1] )\n hk = numpy.max ( nze[2] )\n\n ci = int ( numpy.ceil ( (hi + li) / 2 ) )\n cj = int ( numpy.ceil ( (hj + lj) / 2 ) )\n ck = int ( numpy.ceil ( (hk + lk) / 2 ) )\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n n = 120 # max ( n1, n2, n3 ) + 4\n n2 = int ( numpy.ceil ( n / 2 ) )\n\n li = ci - n2; lj = cj - n2; lk = ck - n2\n hi = ci + n2; hj = cj + n2; hk = ck + n2\n\n print \"Bounds - %d %d %d --> %d %d %d --> %d %d %d (%d)\" % ( li, lj, lk, hi, hj, hk, n1, n2, n3, n )\n\n umsg ( \"Saving %d regions to mrc file...\" % len(regs) )\n\n nmat = numpy.zeros ( (n,n,n), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n print \"map grid dim: \", numpy.shape ( dmap.full_matrix() )\n print \"masked grid dim: \", numpy.shape ( regsm )\n print \"new map grid dim: \", numpy.shape ( nmat )\n\n\n #regs_name = \"\"\n for ii in range ( len(nze[0]) ) :\n i,j,k = nze[0][ii], nze[1][ii], nze[2][ii]\n mapVal = regsm[i,j,k]\n nmat[i-li,j-lj,k-lk] = mapVal\n\n O = dmap.data.origin\n print \"origin:\", O\n if 1 :\n nO = ( O[0] + float(lk) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(li) * dmap.data.step[2] )\n print \"new origin:\", nO\n else :\n nO = ( -float(n2) * dmap.data.step[0],\n -float(n2) * dmap.data.step[1],\n -float(n2) * dmap.data.step[2] )\n print \"new origin:\", nO\n\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, dmap.data.step, dmap.data.cell_angles )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n suff = \"_CubeRid%d.mrc\" % reg.rid\n\n import os.path\n nv.name = os.path.splitext (dmap.name) [0] + suff\n nv.openState.xform = dmap.openState.xform\n\n path = os.path.splitext (dmap.data.path) [0] + suff\n nv.write_file ( path, \"mrc\" )\n\n\n def ExtractMapWRegionsCube ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions\" )\n return\n\n if 0 :\n points = regs[0].points().astype ( numpy.float32 )\n for r in regs[1:] :\n npoints = r.points().astype ( numpy.float32 )\n points = numpy.concatenate ( [points, npoints], axis=0 )\n\n for rri, reg in enumerate ( regs ) :\n\n (li,lj,lk), (hi,hj,hk) = regions.region_bounds( [reg] )\n\n ci = int ( numpy.ceil ( (hi + li) / 2 ) )\n cj = int ( numpy.ceil ( (hj + lj) / 2 ) )\n ck = int ( numpy.ceil ( (hk + lk) / 2 ) )\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n n = 62 # max ( n1, n2, n3 ) + 4\n n2 = int ( numpy.ceil ( n / 2 ) )\n\n li = ci - n2; lj = cj - n2; lk = ck - n2\n hi = ci + n2; hj = cj + n2; hk = ck + n2\n\n #bound = 2\n #li = li - bound; lj = lj - bound; lk = lk - bound\n #hi = hi + bound; hj = hj + bound; hk = hk + bound\n\n print \"Bounds - %d %d %d --> %d %d %d --> %d %d %d, %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3, n )\n\n umsg ( \"Saving %d regions to mrc file...\" % len(regs) )\n\n #nmat = numpy.zeros ( (n3,n2,n1), numpy.float32 )\n nmat = numpy.zeros ( (n,n,n), numpy.float32 )\n dmat = dmap.full_matrix()\n\n #regs_name = \"\"\n for i in range ( li, hi ) :\n for j in range ( lj, hj ) :\n for k in range ( lk, hk ) :\n try :\n nmat[k-lk,j-lj,i-li] = dmat[k,j,i]\n except :\n pass\n\n O = dmap.data.origin\n print \"origin:\", O\n nO = O\n if 1 :\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n else :\n nO = ( -float(n2) * dmap.data.step[0],\n -float(n2) * dmap.data.step[1],\n -float(n2) * dmap.data.step[2] )\n print \"new origin:\", nO\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, dmap.data.step, dmap.data.cell_angles )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n suff = \"_MC_%d_%d_%d_%d.mrc\" % (li, lj, lk, n)\n\n import os.path\n nv.name = os.path.splitext (dmap.name) [0] + suff\n nv.openState.xform = dmap.openState.xform\n\n path = os.path.splitext (dmap.data.path) [0] + suff\n nv.write_file ( path, \"mrc\" )\n\n\n def SubtractRegionsFromMap ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map from which density will be taken\" ); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select one ore more regions\" )\n return\n\n nv = regions.remove_mask_volume(regs, dmap)\n if nv is None:\n umsg ('Map size %d,%d,%d is incompatible with mask size %d,%d,%d'\n % (tuple(dmap.data.size) + tuple(smod.grid_size())))\n return\n\n return nv\n\n\n\n def DetectSym ( self ) :\n\n self.syms = None\n\n dmap = segmentation_map()\n\n if dmap == None:\n umsg ( \"Please select a map in the Segment Map dialog\" )\n return []\n\n print \"Symmetry for\", dmap.name\n\n from Measure.symmetry import find_point_symmetry\n\n syms, msg = find_point_symmetry ( dmap, nMax=8 )\n\n if syms is None :\n umsg ( \"No symmetry detected for %s\" % dmap.name )\n self.symmetryString.set ( \"No symmetry detected\" )\n return []\n\n umsg ( msg )\n start = msg.find(': ')+2\n end = msg.find (', center')\n self.symmetryString.set ( msg [start : end] )\n\n for i, sym in enumerate ( syms ) :\n #print i, \" -> \", sym\n pass\n\n from Measure.symmetry import centers_and_points\n\n centers, xyz, w = centers_and_points(dmap)\n print \"Centers: \", centers\n tcenters = numpy.array(centers, numpy.float32)\n Matrix.transform_points ( tcenters, dmap.data.xyz_to_ijk_transform )\n\n\n print \"TCenters: \", tcenters\n\n self.syms = syms\n self.scenters = tcenters\n return syms\n\n\n\n def SetSymColors ( self, regions ) :\n\n if not hasattr (self, 'syms') :\n return\n\n from random import random as rand\n\n # set same color for each symm matrix\n if not hasattr ( self, 'sym_colors' ) or self.sym_type != self.symmetryString.get() :\n print \" - making symm colors...\"\n self.sym_colors = {}\n self.sym_type = self.symmetryString.get()\n for si, smat in enumerate ( self.syms ) :\n self.sym_colors[si] = ( rand(), rand(), rand(), 1 )\n\n\n # in case region color were changed by user:\n if regions :\n clr = None\n for reg in regions :\n if reg.surface_piece.vertexColors is not None :\n reg.surface_piece.vertexColors = None\n clr = reg.surface_piece.color\n reg.set_color ( clr )\n if clr :\n for reg in regions :\n reg.set_color ( clr )\n self.sym_colors[0] = regions[0].color\n\n\n\n def ShowSelSymm ( self ) :\n\n dmap = segmentation_map()\n\n if dmap == None:\n umsg ( \"Please select a map...\" )\n return\n\n smod = self.CurrentSegmentation()\n if smod is None :\n umsg ( \"Select a segmentation...\" )\n return\n\n regions = smod.selected_regions()\n if len(regions)==0 :\n umsg ( \"Select one or more regions...\" )\n return\n\n if not hasattr(self,'syms') or self.syms == None :\n umsg ( \"No symmetry? Press Detect first...\" )\n return\n\n\n syms = self.syms\n centers = self.scenters\n\n print \"Showing %d symmetric copies...\" % len(syms)\n print \"Centers:\", centers\n\n com = centers[0]\n t_0_com = ( (1.0,0.0,0.0,-com[0]),\n (0.0,1.0,0.0,-com[1]),\n (0.0,0.0,1.0,-com[2]) )\n t_to_com = ( (1.0,0.0,0.0,com[0]),\n (0.0,1.0,0.0,com[1]),\n (0.0,0.0,1.0,com[2]) )\n\n ptf = smod.point_transform()\n #print ptf\n\n surf = _surface.SurfaceModel ()\n rname = \"\"\n\n self.SetSymColors (regions)\n\n #for si, smat in enumerate ( syms [1 : ] ) :\n for si, smat in enumerate ( syms ) :\n\n clr = self.sym_colors[si]\n\n rname = \"\"\n for i, reg in enumerate (regions) :\n\n tf = Matrix.multiply_matrices( t_to_com, smat, t_0_com )\n\n points = numpy.array ( reg.points(), numpy.float32 )\n _contour.affine_transform_vertices ( points, tf )\n #print points\n\n from MultiScale.surface import surface_points\n vertices, triangles, normals = \\\n surface_points ( points,\n resolution = smod.surface_resolution,\n density_threshold = 0.1,\n smoothing_factor = .25,\n smoothing_iterations = 5 )\n\n _contour.affine_transform_vertices ( vertices, ptf )\n\n nsp = surf.addPiece ( vertices, triangles, clr )\n nsp.oslName = \"Reg_%d_sym_%d\" % (reg.rid, si)\n\n rname = rname + \"_%d\" % reg.rid\n\n\n\n nn = os.path.splitext(dmap.name)[0]\n\n surf.name = nn + \"_sym_%s\" % self.symmetryString.get() + rname\n chimera.openModels.add ( [surf] )\n\n\n def ColorSymmSame ( self ) :\n\n print \"Color symm:\"\n\n smod = self.CurrentSegmentation()\n if smod is None :\n umsg ( \"Please segment first...\" )\n return\n\n if not hasattr(self,'syms') or self.syms == None :\n umsg ( \"No symmetry? Press Detect first...\" )\n return\n\n regions = smod.selected_regions()\n if len(regions)==0 :\n print \" - using all regions\"\n regions = None\n else :\n print \" - using %d selected regions\" % len(regions)\n\n syms = self.syms\n centers = self.scenters\n\n if 1 :\n from chimera import tasks, CancelOperation\n task = tasks.Task('Coloring symmetries', modal = True)\n try:\n smod.find_sym_regions2 ( [centers, syms], symColors=None, regs=regions, task=None )\n except CancelOperation:\n umsg('Cancelled coloring symmetries')\n finally:\n task.finished()\n\n else :\n #smod.calculate_watershed_regions ( mm, thrD, csyms, task )\n smod.find_sym_regions2 ( [centers, syms], symColors=None, regs=regions, task=None )\n\n\n\n def ColorSymmDiff ( self ) :\n\n print \"Color symm:\"\n\n smod = self.CurrentSegmentation()\n if smod is None :\n umsg ( \"Please segment first...\" )\n return\n\n if not hasattr(self,'syms') or self.syms == None :\n umsg ( \"No symmetry? Press Detect first...\" )\n return\n\n regions = smod.selected_regions()\n if len(regions)==0 :\n print \" - using all regions\"\n regions = None\n else :\n print \" - using %d selected regions\" % len(regions)\n\n syms = self.syms\n centers = self.scenters\n\n self.SetSymColors (regions)\n\n\n if 1 :\n from chimera import tasks, CancelOperation\n task = tasks.Task('Coloring symmetries', modal = True)\n try:\n #smod = self.SegmentAndGroup(show, group, task)\n smod.find_sym_regions2 ( [centers, syms], symColors=self.sym_colors, regs=regions, task=task )\n except CancelOperation:\n umsg('Cancelled coloring symmetries')\n finally:\n task.finished()\n\n else :\n #smod.calculate_watershed_regions ( mm, thrD, csyms, task )\n smod.find_sym_regions2 ( [centers, syms], symColors=self.sym_colors, regs=regions, task=None )\n\n\n def RegsDispUpdate ( self, task = None ) :\n\n smod = self.CurrentSegmentation()\n if smod is None :\n print \" - regs disp update - no smod\"\n return\n\n if smod.volume_data() is None:\n print \" - regs disp update - no smod\"\n smod.set_volume_data(self.SegmentationMap())\n\n maxnr = self.MaximumRegionsToDisplay()\n\n if maxnr > 0 and len(smod.regions) >= maxnr :\n umsg('Only showing %d of %d regions.' % (maxnr, len(smod.regions)))\n\n #smod.display_regions(self.regsVisMode.get(), maxnr, task)\n\n try :\n minSize = int ( self.minRegionSize.get() )\n except :\n umsg ( \"Non-num for min regions size\" )\n return\n\n smod.display_regions(style = 'Voxel_Surfaces', max_reg = maxnr, minSize=minSize, task = task, bForce=False)\n\n if maxnr >= len(smod.regions):\n umsg ( \"Showing %d region surfaces, min size:%d\" % (len(smod.regions), minSize) )\n else:\n umsg ( \"Showing %d of %d region surfaces, min size:%d\" % (maxnr, len(smod.regions), minSize) )\n\n self.ReportRegionCount(smod)\n\n def MaximumRegionsToDisplay ( self ) :\n\n try:\n maxnr = int(self.maxNumRegions.get())\n except:\n maxnr = 0\n return maxnr\n\n def ReportRegionCount ( self, smod ):\n\n if smod is None:\n s = ''\n else:\n s = \"%s regions\" % \"{:,}\".format( len(smod.regions) )\n self.regionCount[\"text\"] = s\n\n\n def RegsDispThr ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n print \"%s - thresholding %d regions\" % ( smod.name, len(smod.regions) )\n\n dmap = self.SegmentationMap()\n if dmap == None : print \"Map %s not open\" % self.dmap.get(); return\n print \" - using map:\", dmap.name\n dthr = dmap.surface_levels[0]\n\n for r in smod.regions : r.remove_surface()\n\n maxnr = self.MaximumRegionsToDisplay()\n for reg in smod.regions :\n\n if maxnr > 0 and len(smod.surfacePieces) >= maxnr :\n umsg('Only showing %d of %d regions.' %\n (len(smod.surfacePieces), len(smod.regions)))\n break\n reg.make_surface()\n\n\n def RegsPrint ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for reg in smod.regions :\n\n print \"%d - %d %d %d\" % ( reg.rid, reg.max_point[0], reg.max_point[1], reg.map_point[2] )\n\n\n def ReduceMap ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a map first\" ); return\n mm = self.SegmentationMap()\n if mm == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n path = os.path.dirname ( mm.data.path ) + os.path.sep\n mname = os.path.splitext ( mm.name )[0]\n\n d = mm.data\n m2 = d.matrix ( ijk_step=(2,2,2) )\n step2 = ( d.step[0]*2.0, d.step[1]*2.0, d.step[2]*2.0 )\n ld = VolumeData.Array_Grid_Data(m2, d.origin, step2, d.cell_angles, d.rotation,\n name = mname + '_s2.mrc')\n\n gv = VolumeViewer.volume.add_data_set ( ld, None )\n gv.name = mname + '_s2.mrc'\n\n print \"writing\", path + gv.name\n mod.write_file ( path + gv.name, \"mrc\" )\n\n\n def GetUseSymmetry ( self ) :\n\n csyms = None\n err_msg = None\n\n if self.useSymmetry.get () :\n\n sstring = self.symmetryString.get ()\n if len ( sstring ) == 0 :\n umsg (\"Detecting symmetry...\")\n self.DetectSym ()\n sstring = self.symmetryString.get ()\n\n if len ( sstring ) == 0 :\n umsg ( \"Enter a symmetry string, e.g. D8\" )\n return [None, \"No symmetry specified or found\"]\n\n print \"Using symmetry:\", sstring\n\n from Measure.symmetry import centers_and_points\n\n dmap = segmentation_map()\n centers, xyz, w = centers_and_points(dmap)\n #print \"Centers: \", centers\n tcenters = numpy.array(centers, numpy.float32)\n Matrix.transform_points ( tcenters, dmap.data.xyz_to_ijk_transform )\n #print \"Centers in ijk coords: \", tcenters\n\n import Symmetry\n\n if sstring[0] == \"D\" :\n print \"Dihedral syms\"\n syms = Symmetry.dihedral_symmetry_matrices ( int(sstring[1]) )\n csyms = [tcenters, syms]\n\n elif sstring[0] == \"C\" :\n print \"Cyclic syms\"\n syms = Symmetry.cyclic_symmetry_matrices ( int(sstring[1]) )\n csyms = [tcenters, syms]\n\n else :\n err_msg = \"Symmetry string not recognized\"\n\n\n return [ csyms, err_msg ]\n\n\n\n def Segment ( self, show = True, group = True ) :\n\n smod = self.CurrentSegmentation(warn = False)\n if smod :\n chimera.openModels.close ( [smod] )\n\n if self.cur_dmap :\n mname, mext = os.path.splitext ( self.cur_dmap.name )\n print \" - current map: %s\" % self.cur_dmap.name\n remm = []\n for m in chimera.openModels.list() :\n if \".seg\" in m.name and mname in m.name :\n print \" - closing %s\" % m.name\n remm.append ( m )\n if len(remm) > 0 :\n chimera.openModels.close ( remm )\n\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Segmenting %s' % self.dmap.get(), modal = True)\n\n try:\n smod = self.SegmentAndGroup(show, group, task)\n except CancelOperation:\n umsg('Cancelled segmentation')\n return None\n finally:\n task.finished()\n\n return smod\n\n\n\n def SegmentAndGroup ( self, show = True, group = True, task = None ) :\n\n if len(self.dmap.get()) == 0 :\n umsg (\"Select a density map in the Segment map field\" );\n return\n\n mm = self.SegmentationMap()\n if mm == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n thrD = mm.surface_levels[0]\n mm.segmentThreshold = thrD\n print \"\\n___________________________\"\n umsg ( \"Segmenting %s, density threshold %f\" % (mm.name, thrD) )\n\n\n csyms = None\n\n smod = self.CurrentSegmentation(warn = False)\n if smod is None or smod.volume_data() != mm:\n mbase, msuf = os.path.splitext ( mm.name )\n msp = msuf.find(' ')\n mend = '' if msp == -1 else msuf[msp:]\n segname = mbase + mend + '.seg'\n smod = regions.Segmentation(segname, mm)\n self.SetSurfaceGranularity(smod)\n self.SetCurrentSegmentation(smod)\n\n if timing: t0 = clock()\n smod.calculate_watershed_regions ( mm, thrD, csyms, task )\n\n if timing: t1 = clock()\n self.RemoveSmallRegions(smod, task)\n self.RemoveContactRegions(smod, task)\n nwr = len(smod.regions)\n\n if timing: t2 = clock()\n\n if group:\n if self.groupMode.get() == 'smooth' :\n self.SmoothAndGroup ( smod, task )\n else :\n self.GroupByCons ( smod, task )\n\n\n # Undisplay other segmentations\n if timing: t3 = clock()\n for m in regions.segmentations() :\n if m != smod:\n m.display = False\n\n self.RegsDispUpdate ( task )\t # Display region surfaces\n# mm.display = False # Undisplay map\n\n if timing :\n t4 = clock()\n print \"Time %.2f sec: watershed %.2f sec, small %.2f, group %.2f sec, display %.2f sec\" % (t4-t0, t1-t0, t2-t1, t3-t2, t4-t3)\n\n umsg ( '%d watershed regions, grouped to %d regions' % ( nwr, len(smod.regions)) )\n\n return smod\n\n\n\n def RemoveSmallRegions(self, smod = None, task = None):\n\n if smod is None:\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n mrs = self.minRegionSize.get()\n try:\n minsize = int ( mrs )\n except:\n print 'Minimum region size \"%s\" is not an integer' % mrs\n minsize = 1\n\n if minsize <= 1:\n return\n\n if task is None:\n from chimera import tasks, CancelOperation\n task = tasks.Task('Removing small regions', modal = True)\n try:\n smod.remove_small_regions(minsize, task)\n self.RegsDispUpdate(task)\n except CancelOperation:\n umsg('Cancelled removing small regions')\n return\n finally:\n task.finished()\n else:\n smod.remove_small_regions(minsize, task)\n self.RegsDispUpdate(task)\n\n self.ReportRegionCount(smod)\n\n\n def RemoveContactRegions(self, smod = None, task = None):\n\n if smod is None:\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n mrs = self.minContactSize.get()\n try:\n minsize = int ( mrs )\n except:\n print 'Minimum contact size \"%s\" is not an integer' % mrs\n minsize = 1\n\n if minsize <= 0:\n return\n\n if task is None:\n from chimera import tasks, CancelOperation\n task = tasks.Task('Removing contact regions', modal = True)\n try:\n smod.remove_contact_regions(minsize, task)\n self.RegsDispUpdate(task)\n except CancelOperation:\n umsg('Cancelled removing small regions')\n return\n finally:\n task.finished()\n else:\n smod.remove_contact_regions(minsize, task)\n self.RegsDispUpdate(task)\n\n self.ReportRegionCount(smod)\n\n\n\n def GroupConnectedRegs ( self ) :\n\n if 0 :\n min_contact = int(self.minConnection.get())\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n regions = smod.selected_regions()\n if len(regions)==0 :\n regions = smod.regions\n\n smod.group_connected ( regions, min_contact)\n\n self.RegsDispUpdate()\n self.ReportRegionCount(smod)\n if regions:\n from regions import TopParentRegions\n nsurfs = [r.surface_piece for r in TopParentRegions(regions)]\n chimera.selection.setCurrent(nsurfs)\n\n else :\n\n print \" - connection grouping step\"\n\n\n def GroupByContacts ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Contact grouping', modal = True)\n try:\n regions.group_by_contacts(smod, task)\n self.RegsDispUpdate(task)\n except CancelOperation:\n umsg('Cancelled contact grouping')\n finally:\n task.finished()\n\n self.SetColorLevelRange()\n self.ReportRegionCount(smod)\n\n self.contactsPanel.set(True)\n\n def CloseAll ( self ) :\n\n umsg ( \"Closing all segmentations.\" )\n\n dmap = self.SegmentationMap()\n if dmap != None :\n dmap.display = True\n\n for m in regions.segmentations ():\n print 'Closed', m.name\n m.close()\n\n self.SetCurrentSegmentation ( None )\n self.ReportRegionCount(None)\n\n\n def Associate ( self ) :\n\n seg = self.CurrentSegmentation ()\n if seg :\n print \" - seg: \", seg.name\n\n if self.cur_dmap :\n print \" - map: \" + self.cur_dmap.name\n seg.set_volume_data ( self.cur_dmap )\n umsg ( \"Map %s is now associated with %s\" % (self.cur_dmap.name, seg.name) )\n else :\n umsg ( \"No map selected\" )\n\n\n def SmoothAndGroup ( self, smod, task = None ) :\n\n try :\n numit = int ( self.numSteps.get() )\n sdev = float ( self.stepSize.get() )\n except :\n umsg ( \"Enter an integer for # smoothing steps, float for step size\" )\n return\n\n try :\n targNRegs = int(self.targNRegions.get())\n except :\n umsg ( \"Enter an integer for target # of regions\" );\n return\n\n\n csyms = None\n if self.useSymmetry.get() :\n print \"Using symmetry...\"\n self.DetectSym ()\n csyms = [self.scenters, self.syms]\n\n\n if targNRegs <= 0 :\n umsg ( \"# of regions\" )\n return\n\n smod.smooth_and_group(numit, sdev, targNRegs, csyms, task)\n\n self.ReportRegionCount(smod)\n\n\n\n\n def GroupByCons ( self, smod, task = None ) :\n\n try :\n numit = int ( self.numStepsCon.get() )\n #sdev = float ( self.stepSize.get() )\n except :\n umsg ( \"Enter an integer for # steps\" )\n return\n\n try :\n targNRegs = int(self.targNRegionsCon.get())\n except :\n umsg ( \"Enter an integer for target # of regions\" );\n return\n\n\n #csyms, sym_err = self.GetUseSymmetry ()\n #if sym_err :\n # umsg ( sym_err )\n # return\n\n csyms = None\n if self.useSymmetry.get() :\n print \"Using symmetry...\"\n self.DetectSym ()\n csyms = [self.scenters, self.syms]\n\n\n if targNRegs <= 0 :\n umsg ( \"Enter an integer > 0 for target # of regions\" )\n return\n\n print \" - grouping %d steps, target %d\" % (numit, targNRegs)\n\n #smod.smooth_and_group(numit, sdev, targNRegs, csyms, task)\n smod.group_connected_n ( numit, targNRegs, None, csyms, task )\n\n\n\n self.ReportRegionCount(smod)\n\n\n\n def GroupByConsOneStep ( self, task = None ) :\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n if smod.volume_data() is None:\n umsg ('Segmentation map not opened')\n return\n\n if len(smod.regions) <= 1:\n umsg ('%s has %d regions' % (smod.name, len(smod.regions)))\n return\n\n\n csyms = None\n if self.useSymmetry.get() :\n print \"Using symmetry...\"\n self.DetectSym ()\n csyms = [self.scenters, self.syms]\n\n\n regions = None\n if 0 and self.groupByConsOnlyVis.get() :\n regions = smod.visible_regions()\n if len(regions) == 0 :\n umsg (\"Grouping by connections: no visible regions found or they are from a different model\" )\n return\n\n umsg (\"Grouping by connections: applying only to %d regions visible\" % len(regions) )\n\n\n if 1 :\n from chimera import tasks, CancelOperation\n task = tasks.Task('Group by connections', modal = True)\n try :\n newRegs, removedRegs = smod.group_connected_n ( 1, 1, regions, csyms, task )\n #self.RegsDispUpdate ( task )\t # Display region surfaces\n except CancelOperation :\n umsg('Cancelled group by connections')\n return\n finally:\n task.finished()\n else :\n newRegs, removedRegs = smod.group_connected_n ( 1, 1, regions, csyms, task )\n\n for r in newRegs : r.make_surface (None, None, smod.regions_scale)\n print \" - removig %d surfs\" % len(removedRegs)\n for r in removedRegs : r.remove_surface()\n\n self.ReportRegionCount(smod)\n\n if smod.adj_graph :\n graph.create_graph ( smod, smod.graph_links )\n\n umsg ( \"Got %d regions after grouping by connections\" % (len(smod.regions)) )\n\n\n def SmoothAndGroupOneStep ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None:\n return\n\n if smod.volume_data() is None:\n umsg ('Segmentation map not opened')\n return\n\n if len(smod.regions) <= 1:\n umsg ('%s has %d regions' % (smod.name, len(smod.regions)))\n return\n\n try :\n step = float ( self.stepSize.get() )\n except :\n umsg ( \"Enter <float> for step size\" )\n return\n\n sdev = step + smod.smoothing_level\n\n csyms = None\n if self.useSymmetry.get() :\n print \"Using symmetry...\"\n self.DetectSym ()\n csyms = [self.scenters, self.syms]\n\n\n umsg ( \"Smoothing and grouping, standard deviation %.3g voxels\" % sdev)\n\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Smooth and group', modal = True)\n try:\n for i in range ( 10 ) :\n new_regs = len(smod.smooth_and_group(1, sdev, 1, csyms, task))\n\n # if symmetry is being used we should stop after one step\n # since symmetry can block regions from joining indefinitely\n if new_regs > 0 : break\n\n umsg ('No new groups smoothing %.3g voxels' % sdev)\n sdev += step\n self.RegsDispUpdate ( task )\t # Display region surfaces\n\n except CancelOperation:\n umsg('Cancelled smooth and group')\n return\n finally:\n task.finished()\n\n self.ReportRegionCount(smod)\n\n if smod.adj_graph :\n graph.create_graph ( smod, smod.graph_links )\n\n umsg ( \"Got %d regions after smoothing %.3g voxels.\" %\n (len(smod.regions), sdev) )\n\n\n def Overlapping ( self ) :\n\n dmap = self.SegmentationMap()\n if dmap == None :\n umsg ( \"No map selected\" )\n return\n\n smod = self.CurrentSegmentation()\n if smod == None :\n umsg ( \"No segmentation selected\" )\n return\n\n if len(smod.regions) == 0 :\n umsg ( \"No regions found in %s\" % smod.name )\n return\n\n selatoms = chimera.selection.currentAtoms()\n spoints = None\n\n if len ( selatoms ) > 0 :\n spoints = _multiscale.get_atom_coordinates ( selatoms, transformed = True )\n\n else :\n mods = chimera.selection._currentSelection.models()\n if len(mods) == 1 :\n mod = mods[0]\n print \"Using for selection:\", mod.name\n\n import axes\n spoints, weights = axes.map_points ( mod, True )\n print \" - map - got %d points in contour\" % len (spoints)\n from _contour import affine_transform_vertices as transform_vertices\n transform_vertices( spoints, Matrix.xform_matrix( mod.openState.xform ) )\n else :\n umsg (\"0 or more than 1 model selected\")\n return\n\n\n simap = self.PointIndexesInMap ( spoints, dmap )\n\n umsg ( \"Overlapping %d atoms with %d regions\" % (\n len(selatoms), len(smod.regions) ) )\n\n ovRatio = float ( self.overlappingPercentage.get() ) / 100.0\n print \" - overlap ratio: %f\" % ovRatio\n\n oregs = []\n for ri, r in enumerate ( smod.regions ) :\n ipoints = r.points()\n noverlap = 0\n for i,j,k in ipoints :\n try : simap[i][j][k]\n except: continue\n noverlap += 1\n ov = float ( noverlap ) / float ( len(ipoints) )\n if ov > ovRatio : oregs.append ( r )\n #if noverlap > 0 : oregs.append ( r )\n regions.select_regions ( oregs )\n\n umsg ( \"Selected %d regions\" % ( len(oregs) ) )\n\n\n\n\n def GroupUsingFits ( self ) :\n\n dmap = self.SegmentationMap()\n if dmap == None : print \"Map %s not open\" % self.dmap.get(); return\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n if len(smod.regions) == 0 : print \"No regions in\", smod.name; return\n try : dmap.fitted_mols\n except : dmap.fitted_mols = []\n if len(dmap.fitted_mols) == 0 : print \"No fits found for\", dmap.name; return\n\n print \"Grouping %d regions by overlap to %d fitted structures\" % (\n len(smod.regions), len(dmap.fitted_mols) )\n\n dmap.chain_maps = []\n\n for mol in dmap.fitted_mols :\n try : mol.fmap.imap\n except : mol.fmap.imap = self.MapIndexesInMap ( dmap, mol.fmap )\n from random import random as rand\n mol.fmap.surf_color = ( rand(), rand(), rand(), 1 )\n dmap.chain_maps.append ( mol.fmap )\n\n# self.SegAccuracy ( \"_fits_acc\", True )\n\n\n\n def RegSurfsShowNone ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for reg in smod.regions :\n if reg.surface_piece:\n reg.surface_piece.display = False\n\n\n def RegSurfsShowAll ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Showing all regions', modal = True)\n try:\n self.RegsDispUpdate(task)\n except CancelOperation:\n pass\n finally:\n task.finished()\n\n\n def RegSurfsShowOnlySelected ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n regions.show_only_regions(smod.selected_regions())\n\n\n def RegSurfsHide ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n #if len(sregs) == 0 : sregs = smod.all_regions()\n\n for r in sregs : r.hide_surface()\n\n\n def RegSurfsShow ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n #if len(sregs) == 0 : sregs = smod.all_regions()\n\n for r in sregs : r.show_surface()\n\n\n\n def RegSurfsShowAdjacent ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 :\n return\n\n cr = set()\n for r in sregs :\n cr.update(r.contacting_regions())\n\n umsg ( \"Region has %d adjacent regions\" % len(cr) )\n for r in cr :\n r.show_surface()\n\n\n\n\n\n\n def RegSurfsShowNotGrouped ( self ) :\n\n print \"Showing not-grouped regions...\"\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for reg in smod.regions :\n if len(reg.cregs) == 0 :\n if reg.surface_piece:\n reg.surface_piece.display = True\n else :\n if reg.surface_piece:\n reg.surface_piece.display = False\n\n\n\n def SelectGrouped ( self ) :\n\n print \"Selecting grouped regions...\"\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n surfs = []\n for reg in smod.regions :\n if len(reg.cregs) > 0 :\n if reg.surface_piece:\n surfs.append ( reg.surface_piece )\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( surfs )\n\n\n\n def SelectVisible ( self ) :\n\n print \"Selecting visible regions...\"\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n surfs = []\n for reg in smod.regions :\n if reg.surface_piece and reg.surface_piece.display:\n surfs.append ( reg.surface_piece )\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( surfs )\n\n\n\n def SelectNotGrouped ( self ) :\n\n print \"Showing not-grouped regions...\"\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n surfs = []\n for reg in smod.regions :\n if len(reg.cregs) == 0 :\n if reg.surface_piece:\n surfs.append ( reg.surface_piece )\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( surfs )\n\n\n\n def RegSurfsShowGrouped ( self ) :\n\n print \"Showing grouped regions...\"\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.grouped_regions()\n if len(sregs) == 0 :\n umsg ( \"No grouped regions\" )\n return\n\n umsg ( \"Showing %d grouped regions\" % len(sregs) )\n\n regions.show_only_regions(sregs)\n\n\n\n def RegSurfsTransparent ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 : sregs = smod.all_regions()\n\n for r in sregs :\n if r.has_surface():\n cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]\n r.surface_piece.color = ( cr, cg, cb, REG_OPACITY )\n r.surface_piece.displayStyle = r.surface_piece.Solid\n\n\n def RegSurfsOpaque ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 : sregs = smod.all_regions()\n\n for r in sregs :\n if r.has_surface():\n cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]\n r.surface_piece.color = ( cr, cg, cb, 1.0 )\n r.surface_piece.displayStyle = r.surface_piece.Solid\n\n\n def RegSurfsMesh ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 : sregs = smod.all_regions()\n\n for r in sregs :\n if r.has_surface():\n cr,cg,cb = r.surface_piece.color[:3] #r.color[:3]\n r.surface_piece.color = ( cr, cg, cb, 1.0 )\n r.surface_piece.displayStyle = r.surface_piece.Mesh\n r.surface_piece.lineThickness = 1.0\n\n\n\n def SelectAllRegions ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sel_regs = set ( smod.selected_regions() )\n surfs = [r.surface_piece for r in smod.regions\n if 1]\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( surfs )\n\n\n def Invert ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sel_regs = set ( smod.selected_regions() )\n surfs = [r.surface_piece for r in smod.regions\n if not r in sel_regs and r.surface_piece]\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( surfs )\n\n\n def Group ( self ):\n\n\n if NothingSelected():\n\n if self.groupMode.get() == 'smooth' :\n self.SmoothAndGroupOneStep()\n else :\n self.GroupByConsOneStep()\n\n else:\n self.JoinSelRegs()\n\n\n def JoinSelRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"No regions selected\" )\n return\n regs = regions.TopParentRegions(regs)\n\n jreg = smod.join_regions ( regs )\n jreg.make_surface(None, None, smod.regions_scale)\n\n if smod.adj_graph :\n graph.create_graph ( smod, smod.graph_links )\n\n chimera.selection.setCurrent([jreg.surface_piece])\n\n self.ReportRegionCount(smod)\n umsg ( \"Grouped %d regions\" % len(regs) )\n\n\n\n\n def DelSelRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None :\n umsg ( \"No segmentation selected...\" )\n return\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Select one or more regions to delete\" )\n return\n\n smod.remove_regions ( regs, update_surfaces = True, remove_children = True )\n\n self.ReportRegionCount(smod)\n umsg ( \"Deleted %d regions\" % len(regs) )\n\n\n def DelExcSelRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None :\n umsg ( \"No segmentation selected...\" )\n return\n\n sel_regs = smod.selected_regions()\n if len(sel_regs)==0 :\n umsg ( \"No regions selected...\" )\n return\n\n dregs = [r for r in smod.regions\n if not r in sel_regs]\n\n smod.remove_regions ( dregs, update_surfaces = True, remove_children = True )\n\n self.ReportRegionCount(smod)\n umsg ( \"Deleted %d regions\" % len(dregs) )\n\n\n def Ungroup ( self ):\n\n if NothingSelected():\n self.UngroupLastSmoothing()\n else:\n self.UngroupSelRegs()\n\n\n\n\n def SafeCreateSurfsForRegs ( self, smod, rlist, rregs ) :\n\n maxnr = self.MaximumRegionsToDisplay()\n\n nsurfs = 0\n for r in smod.regions :\n if r.has_surface() :\n nsurfs += 1\n\n print \" - %d surfs have pieces before\" % nsurfs\n\n # surfs that will go away...\n for r in rregs :\n if r.has_surface() :\n nsurfs -= 1\n\n print \" - %d surfs will have pieces after removing selected\" % nsurfs\n\n\n if nsurfs >= maxnr :\n umsg('Ungrouped to %d regions, but did not show their surfaces, see Options' % len(rlist) )\n\n else :\n canshow = maxnr - nsurfs\n\n if canshow < len(rlist) :\n umsg('Ungrouped to %d regions, but did not show all surfaces, see Options' % len(rlist) )\n else :\n umsg('Ungrouped to %d regions' % len(rlist) )\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Adding surfaces', modal = True)\n try:\n for ri, reg in enumerate ( rlist ) :\n if ri >= canshow :\n break\n reg.make_surface(None, None, smod.regions_scale)\n except CancelOperation:\n pass\n finally:\n task.finished()\n\n\n\n def ShowNumSubRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 :\n umsg ( \"No regions selected\" )\n return\n sregs = regions.TopParentRegions(sregs)\n\n num = 0\n for r in sregs :\n if len(r.cregs) == 0 :\n pass\n else :\n num += len(r.cregs)\n\n umsg ( \"selected regions have %d total sub regions\" % num )\n\n\n\n def UngroupSelRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 :\n umsg ( \"No regions selected\" )\n return\n sregs = regions.TopParentRegions(sregs)\n\n chimera.selection.clearCurrent ()\n\n [rlist, removedRegs] = smod.ungroup_regions ( sregs )\n self.SafeCreateSurfsForRegs ( smod, rlist, removedRegs )\n for r in removedRegs : r.remove_surface()\n\n print \" - now %d regions\" % len(smod.regions)\n\n if smod.adj_graph :\n graph.create_graph ( smod, smod.graph_links )\n\n chimera.selection.setCurrent ( [r.surface_piece for r in rlist if (hasattr(r,'surface_piece') and r.surface_piece != None)] )\n\n self.ReportRegionCount(smod)\n\n\n\n def UngroupAllRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n rlist = list(smod.regions)\n [rlist2, removedRegs] = smod.ungroup_regions(rlist)\n\n self.SafeCreateSurfsForRegs ( smod, rlist2, removedRegs )\n for r in removedRegs : r.remove_surface()\n\n self.ReportRegionCount(smod)\n\n\n\n def UngroupLastSmoothing ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n levels = [r.smoothing_level for r in smod.regions]\n if len(levels) == 0:\n return\n slev = max(levels)\n\n rlev = [r for r in smod.regions if r.smoothing_level == slev]\n rlist2 = []\n removedRegs = []\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Ungrouping', modal = True)\n try:\n [rlist2, removedRegs] = smod.ungroup_regions(rlev, task)\n except CancelOperation:\n pass\n finally:\n task.finished()\n\n self.SafeCreateSurfsForRegs ( smod, rlist2, removedRegs )\n for r in removedRegs : r.remove_surface()\n\n levels = [r.smoothing_level for r in smod.regions]\n smod.smoothing_level = max(levels)\n\n if smod.adj_graph :\n graph.create_graph ( smod, smod.graph_links )\n\n #umsg ( \"Ungrouped to %.3g voxel smoothing, %d regions\" % (smod.smoothing_level, len(smod.regions)) )\n self.ReportRegionCount(smod)\n\n\n def CloseRegions ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n chimera.openModels.remove ( smod )\n self.SetCurrentSegmentation(None)\n self.ReportRegionCount(None)\n\n if smod.adj_graph : smod.adj_graph.close()\n\n\n def CloseSeg ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n smod.close()\n self.SetCurrentSegmentation(None)\n\n\n\n def RegionsVolume ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n print \"%d selected regions\" % len(sregs)\n\n if len(sregs) == 0 :\n sregs = smod.regions\n\n if len(sregs) == 0 :\n umsg ( \"No regions found in %s\" % smod.name )\n return\n\n tvol = sum([reg.enclosed_volume() for reg in sregs])\n pcount = sum([reg.point_count() for reg in sregs])\n\n rw = \"region\"\n if len(sregs) > 1 : rw = \"regions\"\n umsg ( \"Volume of %d %s: %.3g Angstroms^3, %d points\" % ( len(sregs), rw, tvol, pcount ) )\n\n def RegionMeanAndSD ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs) == 0 :\n umsg ( \"No regions selected in %s\" % smod.name )\n return\n\n v = self.SegmentationMap()\n if v is None:\n v = smod.volume_data()\n if v is None:\n umsg ( 'No map specified' )\n return\n\n means, sdevs = regions.mean_and_sd(sregs, v)\n for r, m, sd in zip(sregs, means, sdevs):\n umsg ( 'Region %d mean %.5g, SD %.5g' % (r.rid, m, sd) )\n\n\n def Graph ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n graph.create_graph(smod,\"uniform\")\n\n def GraphAvgD ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n graph.create_graph(smod,\"avgd\")\n\n def GraphMaxD ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n graph.create_graph(smod,\"maxd\")\n\n def GraphN ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n graph.create_graph(smod,\"N\")\n\n\n def LoadGraph ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None:\n graph.open_skeleton(smod)\n\n def SaveGraph ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n graph.read_graph(smod)\n\n def CloseGraph ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n graph.close(smod)\n smod.display = True\n\n def GroupBySkeleton ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod:\n skeleton.group_by_skeleton(smod)\n smod.display = True\n smod.display_regions()\n self.ReportRegionCount(smod)\n\n def RemoveGraphLinks ( self ) :\n\n graph.remove_graph_links()\n\n\n def ShowRegionsAxes ( self, regs ) :\n\n smod = self.CurrentSegmentation()\n if smod is None: return\n\n for r in regs :\n\n sp = r.surface_piece\n try :\n sp.axes.display = True\n chimera.openModels.close ( sp.axes )\n except :\n pass\n\n tpoints = r.map_points()\n sp.COM, sp.U, sp.S, sp.V = prAxes ( tpoints )\n\n com = numpy.sum(tpoints, axis=0) / len(tpoints)\n comv = numpy.ones_like ( tpoints ) * com\n points = tpoints - comv\n\n ppoints = points * sp.U\n sp.Extents = numpy.asarray ( numpy.max ( numpy.abs ( ppoints ), 0 ) )[0]\n\n sp.Extents[0] += 5.0\n sp.Extents[1] += 5.0\n sp.Extents[2] += 5.0\n\n import axes\n reload (axes)\n\n if 0 :\n # for ribosome direction\n sp.Extents[1] = sp.Extents[1] * float(self.axesFactor.get())\n\n sp.axes = axes.AxesMod ( sp.COM, sp.U, sp.Extents, 6, 1.0, alignTo = sp.model )\n else :\n sp.axes = axes.AxesMod ( sp.COM, sp.U, sp.Extents, 1.0, 1.1, alignTo = sp.model )\n\n sp.axes.name = \"region_%d_axes\" % r.rid\n\n\n\n def ShowRegionAxesSelected ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n sregs = smod.selected_regions()\n if len(sregs)==0 : print \"no selected regions found\"; return\n\n self.ShowRegionsAxes ( sregs )\n\n\n def HideRegionAxes ( self ) :\n\n print \"hiding axes\"\n for m in OML() :\n t = m.name.split (\"_\")\n if t[0] == \"region\" and t[2] == \"axes\" :\n print \"- removing\", m.name\n chimera.openModels.close( m )\n\n\n\n\n def PointIndexesInMap ( self, points, ref_map ) :\n\n print \"Making map indices for %d points in %s\" % ( len(points), ref_map.name )\n\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix ( ref_map.openState.xform.inverse() ) )\n _contour.affine_transform_vertices ( points, ref_map.data.xyz_to_ijk_transform )\n\n imap = {}\n for fi, fj, fk in points :\n if 0 :\n i, j, k = int(numpy.round(fi)), int(numpy.round(fj)), int(numpy.round(fk))\n try : mi = imap[i]\n except : mi = {}; imap[i] = mi\n try : mij = mi[j]\n except : mij = {}; mi[j] = mij\n mij[k] = 1\n continue\n for i in [ int(numpy.floor(fi)), int(numpy.ceil(fi)) ] :\n for j in [ int(numpy.floor(fj)), int(numpy.ceil(fj)) ] :\n for k in [ int(numpy.floor(fk)), int(numpy.ceil(fk)) ] :\n try : mi = imap[i]\n except : mi = {}; imap[i] = mi\n try : mij = mi[j]\n except : mij = {}; mi[j] = mij\n mij[k] = 1\n\n\n return imap #, C, bRad\n\n\n def MapIndexesInMap ( self, ref_map, mask_map ) :\n\n thr = mask_map.surface_levels[0]\n mm = mask_map.data.matrix()\n mm = numpy.where ( mm > thr, mm, numpy.zeros_like(mm) )\n\n nze = numpy.nonzero ( mm )\n nzs = numpy.array ( [nze[2], nze[1], nze[0]] )\n\n # the copy is needed! otherwise the _contour.afine_transform does not work for some reason\n points = numpy.transpose ( nzs ).astype(numpy.float32)\n\n #points = numpy.zeros ( ( len(nze[0]), 3 ), numpy.float32 )\n #for ei, i in enumerate ( nze[0] ) :\n # j = nze[1][ei]\n # k = nze[2][ei]\n # points[ei][0], points[ei][1], points[ei][2] = float (k), float(j), float(i)\n\n #print points[0]\n\n print \"Making map indices for %s in %s\" % ( mask_map.name, ref_map.name )\n print \" - %d points above %.3f\" % ( len(points), thr )\n\n # transform to index reference frame of ref_map\n f1 = mask_map.data.ijk_to_xyz_transform\n f2 = Matrix.xform_matrix ( mask_map.openState.xform )\n f3 = Matrix.xform_matrix ( ref_map.openState.xform.inverse() )\n f4 = ref_map.data.xyz_to_ijk_transform\n\n tf = Matrix.multiply_matrices( f2, f1 )\n tf = Matrix.multiply_matrices( f3, tf )\n tf = Matrix.multiply_matrices( f4, tf )\n _contour.affine_transform_vertices ( points, tf )\n\n #_contour.affine_transform_vertices ( points, f1 )\n #_contour.affine_transform_vertices ( points, f2 )\n #_contour.affine_transform_vertices ( points, f3 )\n\n #print points[0]\n\n #com = numpy.sum (points, axis=0) / len(points)\n #C = chimera.Vector ( com[0], com[1], com[2] )\n #comv = numpy.ones_like ( points ) * com\n #points_v = points - comv\n #bRad = numpy.sqrt ( numpy.max ( numpy.sum ( numpy.square (points_v), 1 ) ) )\n\n # transform points to indexes in reference map\n # _contour.affine_transform_vertices ( points, ref_map.data.xyz_to_ijk_transform )\n\n imap = {}\n for fk, fj, fi in points :\n\n for i in [ int(numpy.floor(fi)), int(numpy.ceil(fi)) ] :\n for j in [ int(numpy.floor(fj)), int(numpy.ceil(fj)) ] :\n for k in [ int(numpy.floor(fk)), int(numpy.ceil(fk)) ] :\n\n try : mi = imap[i]\n except : mi = {}; imap[i] = mi\n try : mij = mi[j]\n except : mij = {}; mi[j] = mij\n mij[k] = 1\n\n\n return imap #, C, bRad\n\n\n def ShowGroupSurfaces ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n sregs = smod.selected_regions()\n regs = set([r.top_parent() for r in sregs])\n if len(regs)==0 :\n regs = smod.regions\n\n for r in regs:\n for c in r.all_children():\n c.remove_surface()\n r.make_surface()\n\n if sregs:\n surfs = [r.surface() for r in regs if r.has_surface()]\n from chimera import selection\n selection.setCurrent(surfs)\n\n\n\n def ShowUngroupedSurfaces ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n sregs = smod.selected_regions()\n regs = set([r.top_parent() for r in sregs])\n if len(regs)==0 :\n regs = smod.regions\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Showing ungrouped surfaces', modal = True)\n try:\n for i, r in enumerate(regs):\n if task and i % 100 == 0:\n task.updateStatus('region %d of %d' % (i, len(regs)))\n if r.has_children():\n r.remove_surface()\n for c in r.childless_regions():\n c.make_surface()\n else:\n r.make_surface()\n except CancelOperation:\n pass\n finally:\n task.finished()\n\n if sregs:\n from regions import all_regions\n surfs = [r.surface() for r in all_regions(regs) if r.has_surface()]\n from chimera import selection\n selection.setCurrent(surfs)\n\n\n def SelectGroups ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n rlist = [r for r in smod.regions if r.has_children()]\n from regions import all_regions\n surfs = [r.surface() for r in all_regions(rlist) if r.has_surface()]\n from chimera import selection\n selection.setCurrent(surfs)\n\n\n\n # Regions that have voxels on the mask boundary.\n def SelectBoundaryRegions ( self, pad = 3 ) :\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n m = smod.mask\n if m is None:\n return\n\n import _segment\n b = _segment.region_bounds(m)\n\n rset = set()\n kmax, jmax, imax = [(s-1)-pad for s in m.shape]\n for r in smod.childless_regions():\n i = r.rid\n if (i < len(b) and b[i,6] > 0 and\n (b[i,0] <= pad or b[i,1] <= pad or b[i,2] <= pad or\n b[i,3] >= kmax or b[i,4] >= jmax or b[i,5] >= imax)):\n rset.add(r.top_parent())\n from regions import all_regions, select_regions\n select_regions(all_regions(rset))\n\n\n def SelectNonPlacedRegions ( self ) :\n\n if len(self.dmap.get()) == 0 : umsg (\"Please select a density map\"); return\n dmap = self.SegmentationMap()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n nsel = 0\n chimera.selection.clearCurrent ()\n for sp in smod.surfacePieces :\n try :\n sp.region.placed\n except :\n chimera.selection.addCurrent ( sp )\n nsel = nsel + 1\n\n print \"%d non-placed regions\" % nsel\n\n\n def mouse_group_cb(self):\n\n gmm = self.group_mouse_mode\n if self.mouse_group.get():\n if gmm is None:\n import mousemode\n gmm = mousemode.Group_Connected_Mouse_Mode()\n self.group_mouse_mode = gmm\n button, modifiers = self.mouse_button_spec()\n gmm.bind_mouse_button(button, modifiers)\n elif gmm:\n gmm.unbind_mouse_button()\n\n def mouse_group_button_cb(self):\n\n if self.mouse_group.get() and self.group_mouse_mode:\n button, modifiers = self.mouse_button_spec()\n self.group_mouse_mode.bind_mouse_button(button, modifiers)\n\n def mouse_button_spec(self):\n\n name = self.mouse_group_button.variable.get()\n name_to_bspec = {'button 1':('1', []), 'ctrl button 1':('1', ['Ctrl']),\n 'button 2':('2', []), 'ctrl button 2':('2', ['Ctrl']),\n 'button 3':('3', []), 'ctrl button 3':('3', ['Ctrl'])}\n bspec = name_to_bspec[name]\n return bspec\n\n\ndef NothingSelected():\n\n from chimera import selection\n return selection.currentEmpty()\n\n\ndef volume_segmentation_dialog ( create=False ) :\n\n from chimera import dialogs\n return dialogs.find ( Volume_Segmentation_Dialog.name, create=create )\n\n\ndef show_volume_segmentation_dialog ():\n\n print \"hi\"\n\n from chimera import dialogs\n d = volume_segmentation_dialog ( create = True )\n # Avoid transient dialog resizing when created and mapped for first time.\n #d.toplevel_widget.update_idletasks ()\n #d.enter()\n\n d = dialogs.display(Volume_Segmentation_Dialog.name)\n\n #from Accelerators import add_accelerator\n #add_accelerator('gg', 'Group regions', d.JoinSelRegs )\n #add_accelerator('uu', 'Ungroup regions', d.UngroupSelRegs )\n #add_accelerator('rh', 'Hide regions', d.RegSurfsHide )\n #add_accelerator('rs', 'Show regions', d.RegSurfsShowOnlySelected )\n #add_accelerator('dd', 'Delete regions', d.DelSelRegs )\n #print \"gg - groups regions\"\n #print \"uu - ungroup regions\"\n\n return d\n\n\n\ndef current_segmentation(warn = True):\n\n d = volume_segmentation_dialog()\n if d:\n return d.CurrentSegmentation(warn)\n elif warn:\n umsg ( \"No segmentation opened\" )\n return None\n\ndef segmentation_map():\n\n d = volume_segmentation_dialog()\n if d:\n return d.SegmentationMap()\n return None\n\n\n\n\n# -----------------------------------------------------------------------------\n#\nfrom chimera import dialogs\ndialogs.register (Volume_Segmentation_Dialog.name, Volume_Segmentation_Dialog, replace = True)\n", "id": "1698433", "language": "Python", "matching_score": 5.524244785308838, "max_stars_count": 6, "path": "Segger/segment_dialog.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport VolumeViewer\nfrom sys import stderr\nfrom time import clock\n\nfrom axes import prAxes\nimport regions\nimport graph\nfrom Segger import dev_menus, timing, seggerVersion\n\nOML = chimera.openModels.list\n\nREG_OPACITY = 0.45\n\n\nfrom segment_dialog import current_segmentation, segmentation_map\n\n\n\ndef umsg ( txt ) :\n print txt\n status ( txt )\n\n\ndef status ( txt ) :\n txt = txt.rstrip('\\n')\n msg.configure(text = txt)\n msg.update_idletasks()\n\n\n\n\nclass ProMod_Dialog ( chimera.baseDialog.ModelessDialog ):\n\n title = \"ProMod - Probabilistic Models (Segger v\" + seggerVersion + \")\"\n name = \"segger_promod\"\n buttons = ( \"Close\" )\n help = 'https://github.com/gregdp/segger'\n\n def fillInUI(self, parent):\n\n self.group_mouse_mode = None\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n parent.columnconfigure(0, weight = 1)\n\n row = 0\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n l = Tkinter.Label(f, text=' ')\n l.grid(column=0, row=row, sticky='w')\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"1. Open all models to be considered, make them visible, hide other models\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"2. Find (closest-to) average model\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"Find Average Model\", command=self.AvgMod)\n b.grid (column=1, row=0, sticky='w', padx=5, pady=1)\n\n\n self.avgModLabel = Tkinter.Label(ff, text = \" \", anchor = 'w')\n self.avgModLabel.grid(column=2, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \"3. Calculate standard deviations at each residue \", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"Calculate\", command=self.Calc)\n b.grid (column=1, row=0, sticky='w', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" - standard deviations are stored for each residue atom as the b-factor\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" - use Tools -> Depiction -> Render by Attribute to show deviations using\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w')\n if 1 :\n l = Tkinter.Label(ff, text = \" color and/or ribbon thickness. See tutorial by pressing Help below.\", anchor = 'w')\n l.grid(column=0, row=0, sticky='ew', padx=5, pady=1)\n\n\n\n row += 1\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n l = Tkinter.Label(f, text=' ')\n l.grid(column=0, row=row, sticky='w')\n\n\n row += 1\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=7, pady=7, sticky='we')\n\n\n global msg\n row = row + 1\n msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg=\"red\")\n msg.grid(column=0, row=row, sticky='ew', padx=5, pady=1)\n row += 1\n\n\n\n\n\n\n def Calc ( self ) :\n\n\n if hasattr ( self, 'avgMod' ) and hasattr ( self, 'mods' ) and len(self.mods) > 0 and self.avgMod != None :\n print \"Average model: %s -- %d mods\" % ( self.avgMod.name, len(self.mods) )\n else :\n umsg (\"Find Average Model first.\")\n return\n\n\n avgMod = self.avgMod\n mods = self.mods\n\n umsg ( \"Calculating standard deviations...\" )\n\n vars = []\n\n for ri, avgRes in enumerate ( avgMod.residues ) :\n\n\n status ( \"Res %d/%d\" % (ri+1,len(avgMod.residues)) )\n\n\n for avgAt in avgRes.atoms :\n\n mean = 0.0\n\n for m in mods :\n res = m.residues[ri]\n cat = res.atomsMap[avgAt.name][0]\n v = cat.coord() - avgAt.coord()\n d = v.length * v.length\n mean += d\n\n mean /= len(mods)\n stdev = numpy.sqrt ( mean )\n vars.append ( stdev )\n\n for m in mods :\n res = m.residues[ri]\n cat = res.atomsMap[avgAt.name][0]\n cat.bfactor = stdev\n\n\n umsg ( \"%d models, %d residues - min variance %.2f, max variance %.2f\" % (\n len(mods), len(avgMod.residues), numpy.min(vars), numpy.max(vars) ) )\n\n\n\n def Calc_CA ( self ) :\n\n\n if hasattr ( self, 'avgMod' ) and hasattr ( self, 'mods' ) and len(self.mods) > 0 and self.avgMod != None :\n print \"Average model: %s -- %d mods\" % ( self.avgMod.name, len(self.mods) )\n else :\n umsg (\"Find Average Model first.\")\n return\n\n\n avgMod = self.avgMod\n mods = self.mods\n\n umsg ( \"Calculating standard deviations...\" )\n\n vars = []\n\n for ri, resAvg in enumerate ( avgMod.residues ) :\n try :\n catAvg = resAvg.atomsMap[\"CA\"][0]\n except :\n continue\n\n\n mean = 0.0\n\n for m in mods :\n res = m.residues[ri]\n cat = res.atomsMap[\"CA\"][0]\n v = cat.coord() - catAvg.coord()\n d = v.length * v.length\n mean += d\n\n mean /= len(mods)\n stdev = numpy.sqrt ( mean )\n\n vars.append ( stdev )\n\n for m in mods :\n res = m.residues[ri]\n for at in res.atoms :\n at.bfactor = stdev\n #at.occupancy = stdev\n\n\n umsg ( \"%d models, %d residues - min variance %.2f, max variance %.2f\" % (\n len(mods), len(avgMod.residues), numpy.min(vars), numpy.max(vars) ) )\n\n\n\n\n def AvgMod0 ( self ) :\n\n self.avgMod = None\n self.mods = []\n import numpy\n\n for m in chimera.openModels.list() :\n if type (m) == chimera.Molecule and m.display == True:\n self.mods.append ( m )\n\n N = len(self.mods)\n\n if N < 2 :\n umsg ( \"At least 2 models are needed - make sure they are shown\" )\n self.avgModLabel.configure ( text = \"\" )\n return\n\n\n\n mod0 = self.mods[0]\n numRes = len(mod0.residues)\n\n umsg ( \"Finding average of %d mods, %d residues\" % ( len(self.mods), len(mod0.residues) ) )\n\n avgPs = numpy.zeros ( [len(mod0.residues), 3] )\n\n for mod in self.mods :\n #print \" - mod: %s, %d residues\" % ( mod.name, len(mod.residues) )\n\n if numRes <> len(mod.residues) :\n umsg (\"All models should have the same number of residues\")\n self.avgModLabel.configure ( text = \"\" )\n return\n\n for ri, res in enumerate ( mod.residues ) :\n cat = None\n try :\n cat = res.atomsMap[\"CA\"][0]\n except :\n #print \"carbon alpha not found in res \", ri, res.id.position\n #return None\n pass\n\n if cat :\n avgPs[ri] += cat.coord().data()\n\n\n N = float ( len(self.mods) )\n for ri, res in enumerate ( mod0.residues ) :\n avgPs[ri] /= N\n\n #if ri == 0 :\n # print \" r0 avg pos: \", avgPs[ri]\n\n\n\n minDist = -1.0\n minMod = None\n\n for mod in self.mods :\n\n #print \" - mod: %s, %d residues\" % ( mod.name, len(mod.residues) ),\n modDist = 0.0\n\n for ri, res in enumerate ( mod.residues ) :\n try :\n cat = res.atomsMap[\"CA\"][0]\n except :\n #print \"carbon alpha not found in mod %s res \" % mod.name, ri, res.id.position\n #return None\n continue\n\n dv = avgPs[ri] - cat.coord().data()\n modDist += numpy.sum ( dv * dv )\n\n #print \", dist: \", modDist\n\n if minMod == None or modDist < minDist :\n minMod = mod\n minDist = modDist\n\n print \"Avg mod: %s, min dist to avg: %.2f\" % (minMod.name, minDist)\n\n self.avgMod = minMod\n\n self.avgModLabel.configure ( text = \" found: %s\" % minMod.name )\n umsg ( \"Average of %d models is %s\" % (len(self.mods), minMod.name) )\n\n\n return minMod, avgPs\n\n\n\n\n\n def AvgMod ( self ) :\n\n self.avgMod = None\n self.mods = []\n import numpy\n\n for m in chimera.openModels.list() :\n if type (m) == chimera.Molecule and m.display == True:\n self.mods.append ( m )\n\n N = len(self.mods)\n\n if N < 2 :\n umsg ( \"At least 2 models are needed - make sure they are shown\" )\n self.avgModLabel.configure ( text = \"\" )\n return\n\n\n\n mod0 = self.mods[0]\n numRes = len(mod0.residues)\n\n umsg ( \"Finding average of %d mods, %d residues\" % ( len(self.mods), len(mod0.residues) ) )\n print \".\"\n\n #avgPs = numpy.zeros ( [len(mod0.atoms), 3] )\n avg = {}\n\n for mod in self.mods :\n #print \" - mod: %s, %d residues\" % ( mod.name, len(mod.residues) )\n\n for res in mod.residues :\n for at in res.atoms :\n if not res.id.chainId in avg :\n avg[res.id.chainId] = {}\n if not res.id.position in avg[res.id.chainId] :\n avg[res.id.chainId][res.id.position] = {}\n if not at.name in avg[res.id.chainId][res.id.position] :\n avg[res.id.chainId][res.id.position][at.name] = []\n\n avg[res.id.chainId][res.id.position][at.name].append ( numpy.array ( at.coord().data() ) )\n\n\n for ci, rmap in avg.iteritems () :\n for ri, amap in rmap.iteritems () :\n for aname, plist in amap.iteritems () :\n if len(plist) <> len(self.mods) :\n print \" - at %s_%d.%s has only %d/%d pos\" % ( aname, ri, ci, len(plist), len(self.mods) )\n\n avgp = numpy.array ( [0,0,0] )\n for p in plist :\n avgp += p\n avgp /= float ( len(plist) )\n\n\n\n minDist = -1.0\n minMod = None\n\n for mod in self.mods :\n\n #print \" - mod: %s, %d residues\" % ( mod.name, len(mod.residues) ),\n modDist = 0.0\n\n for ri, res in enumerate ( mod.residues ) :\n for at in res.atoms :\n avgPos = avg[res.id.chainId][res.id.position][at.name]\n dv = numpy.array ( at.coord().data() ) - avgPos\n modDist += numpy.sum ( dv * dv )\n\n #print \", dist: \", modDist\n\n if minMod == None or modDist < minDist :\n minMod = mod\n minDist = modDist\n\n print \"Avg mod: %s, min dist to avg: %.2f\" % (minMod.name, minDist)\n\n self.avgMod = minMod\n\n self.avgModLabel.configure ( text = \" found: %s\" % minMod.name )\n umsg ( \"Average of %d models is %s\" % (len(self.mods), minMod.name) )\n\n\n return minMod\n\n\n\ndef Bring () :\n\n print \"bring...\"\n fromm, tom = None, None\n for m in chimera.openModels.list() :\n if type (m) == chimera.Molecule and m.display == True:\n if \"promod\" in m.name :\n fromm = m\n else :\n tom = m\n\n print \" - from: %s\" % fromm.name\n print \" - to: %s\" % tom.name\n\n bfs = []\n rid = {}\n for r in fromm.residues :\n rid[r.id.position] = r\n for at in r.atoms :\n bfs.append ( at.bfactor )\n\n print \"devs mean: %.3f\" % numpy.average(bfs)\n print \"devs std: %.3f\" % numpy.std(bfs)\n print \"devs 3sig: %.3f\" % (numpy.average(bfs) + 3.0*numpy.std(bfs))\n\n for r in tom.residues :\n rf = rid[r.id.position]\n for at in r.atoms :\n at.bfactor = rf.atomsMap[at.name][0].bfactor\n\n\ndef show_dialog (closeOld = True):\n\n from chimera import dialogs\n\n d = dialogs.find ( ProMod_Dialog.name, create=False )\n if d :\n if closeOld :\n d.toplevel_widget.update_idletasks ()\n d.Close()\n d.toplevel_widget.update_idletasks ()\n else :\n return d\n\n dialogs.register ( ProMod_Dialog.name, ProMod_Dialog, replace = True)\n\n d = dialogs.find ( ProMod_Dialog.name, create=True )\n # Avoid transient dialog resizing when created and mapped for first time.\n d.toplevel_widget.update_idletasks ()\n d.enter()\n\n return d\n\n\n\n# -----------------------------------------------------------------------------\n#\n", "id": "54104", "language": "Python", "matching_score": 2.192263126373291, "max_stars_count": 6, "path": "Segger/promod_dialog.py" }, { "content": "\n\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\n\nimport numpy\nimport _multiscale\nfrom CGLutil.AdaptiveTree import AdaptiveTree\nimport chimera\nimport FitMap\nimport os\nimport Matrix\nimport VolumeData\nimport VolumeViewer\nimport _contour\nimport _gaussian\n\n\n\nchargedIons = { \"MG\":2, \"NA\":1, \"CL\":-1, \"CA\":2, \"ZN\":2, \"MN\":2, \"FE\":3, \"CO\":2, \"NI\":2 }\n\n\n\n# returns the min and max density value in a map\n\ndef MinMaxD ( dmap ) :\n\n # dmap - the map\n\n M = dmap.data.full_matrix()\n maxM = numpy.max(M)\n minM = numpy.min(M)\n\n maxD = min ( numpy.average(M)+numpy.std(M)*10, maxM )\n minD = max ( numpy.average(M)-numpy.std(M)*1, minM )\n\n # xray\n #maxD = min ( numpy.average(M)+numpy.std(M)*3.5, maxM )\n #minD = max ( numpy.average(M)-numpy.std(M)*0.77, minM )\n\n #print \"%s - %.2f->%.2f, %.2f->%.2f\" % (dmap.name, minD, maxD, minM, maxM )\n #minD = numpy.min(M)\n #minD, maxD = numpy.min(M), numpy.max(M)\n return minD, maxD\n\n\n\n\n\n# attempt to do Q-score with volume-volume CC rather than sphere points\n# works ok, but is not faster - main reason why to try\n# another difference is that with sphere points, the same number of points\n# is used at each distance, so the map values at each radial distance even weigh\n\ndef QscoreM ( atoms, dmap, sigma, agrid=None, allAtTree=None, show=0, log=0, toRAD=2.0, step=0.2, minD=None, maxD=None, useMask=False ) :\n\n xyz = _multiscale.get_atom_coordinates(atoms, transformed = False)\n\n #_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n li,lj,lk = numpy.min ( xyz, axis=0 ) - (toRAD, toRAD, toRAD)\n hi,hj,hk = numpy.max ( xyz, axis=0 ) + (toRAD, toRAD, toRAD)\n nO = ( li, lj, lk )\n #print nO\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li,lj,lk, hi,hj,hk, d1,d2,d3 )\n\n d1, d2, d3 = hi - li, hj - lj, hk - lk\n nstep = (step, step, step)\n #nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )\n\n nn1 = int ( numpy.ceil ( float(d1) / step) )\n nn2 = int ( numpy.ceil ( float(d2) / step) )\n nn3 = int ( numpy.ceil ( float(d3) / step) )\n\n #print \" - step %.2f, n: %d %d %d\" % (S, nn1, nn2, nn3)\n\n nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )\n\n ii = 1.0 / step\n ni = -ii\n xyz_to_ijk = ((ii, 0.0, 0.0, ni*nO[0]), (0.0, ii, 0.0, ni*nO[1]), (0.0, 0.0, ii, ni*nO[2]))\n ijk_to_xyz = ((step, 0.0, 0.0, nO[0]), (0.0, step, 0.0, nO[1]), (0.0, 0.0, step, nO[2]))\n\n #print ijk_to_xyz\n\n #ijk[:] = xyz\n weights = [ 1.0 for a in atoms]\n sdevs = [ [sigma, sigma, sigma] for a in atoms ]\n cutoff_range = 5\n\n A, B = maxD - minD, minD\n\n\n #ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n #print ndata.xyz_to_ijk_transform\n #print ndata.ijk_to_xyz_transform\n #Matrix.transform_points(xyz, ndata.xyz_to_ijk_transform)\n\n if useMask == False :\n Matrix.transform_points(xyz, xyz_to_ijk)\n _gaussian.sum_of_gaussians(xyz, weights, sdevs, cutoff_range, nmat)\n\n #print \" -gm max %.3f\" % numpy.max ( nmat )\n nmat *= A\n nmat += B\n #print \" -gm max %.3f\" % numpy.max ( nmat )\n\n\n # make smaller atom tree...\n if 1 and allAtTree != None :\n ats_near = []\n for at in atoms :\n anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )\n ats_near.extend ( anear )\n\n points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )\n if log :\n print \" - new search tree: %d pts\" % ( len(ats_near) )\n allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)\n\n\n if useMask :\n\n nearAts = []\n if agrid != None :\n for at in atoms :\n nats = agrid.AtsNearPtLocal ( at.coord() )\n for nat, v in nats :\n if at != nat :\n nearAts.append ( nat )\n #print \" - %s, %d.%s - %.3f\" % (nat.name, nat.residue.id.position, nat.residue.id.chainId, v.length)\n\n if allAtTree != None :\n for at in atoms :\n opointsNear = allAtTree.searchTree ( at.coord(), toRAD )\n for nat in opointsNear :\n if nat == at :\n continue\n v = at.coord() - nat.coord()\n if v.length < toRAD :\n nearAts.append (nat)\n\n if len(nearAts) == 0 :\n print \" - no near ats?\"\n\n #print \" - %d near ats\" % len(nearAts)\n\n\n for k in range(nn3) :\n pz = nO[2] + float(k)*step\n\n for j in range(nn2) :\n py = nO[1] + float(j)*step\n\n for i in range(nn1) :\n px = nO[0] + float(i)*step\n\n P = chimera.Point(px, py, pz)\n\n minDToAt = 1e9\n for at in atoms :\n v = at.coord() - P\n if v.length < minDToAt :\n minDToAt = v.length\n\n if minDToAt > toRAD :\n nmat[k,j,i] = B-0.1\n continue\n\n closestToAt = True\n for nat in nearAts :\n v = nat.coord() - P\n if v.length < minDToAt :\n closestToAt = False\n #break\n\n if not closestToAt :\n nmat[k,j,i] = minD-0.1\n else :\n nmat[k,j,i] = A * numpy.exp ( -0.5 * numpy.power(minDToAt/sigma,2) ) + B\n\n\n\n if 0 and agrid :\n nearAts = []\n for at in atoms :\n nats = agrid.AtsNearPtLocal ( at.coord() )\n for nat, v in nats :\n if at != nat :\n print \" - %s, %d.%s - %.3f\" % (nat.name, nat.residue.id.position, nat.residue.id.chainId, v.length)\n nearAts.append ( at )\n\n #print \"%d near ats\" % len(nearAts)\n mat1 = numpy.ones ( (nn1,nn2,nn3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( mat1, nO, nstep, dmap.data.cell_angles )\n points = _multiscale.get_atom_coordinates(nearAts, transformed = False)\n\n mdata = VolumeData.zone_masked_grid_data ( ndata, points, toRAD, invert_mask=False )\n #nmat = mdata.matrix()\n nv = VolumeViewer.volume.volume_from_grid_data ( mdata )\n nv.openState.xform = dmap.openState.xform\n mdata = mask\n\n\n fpoints = VolumeData.grid_indices ( (nn3,nn2,nn1), numpy.single) # i,j,k indices\n _contour.affine_transform_vertices ( fpoints, ijk_to_xyz )\n fpoint_weights = numpy.ravel(nmat).astype(numpy.single)\n\n\n #print \" - %d points\" % len(fpoints)\n ge = numpy.greater_equal(fpoint_weights, B)\n fpoints = numpy.compress(ge, fpoints, 0)\n fpoint_weights = numpy.compress(ge, fpoint_weights)\n #print \" - %d above thr\" % len(fpoint_weights)\n #nz = numpy.nonzero( fpoint_weights )[0]\n #print \" - %d above thr\" % len(nz)\n\n #map_values, outside = VolumeData.interpolate_volume_data(pts, xyz_to_ijk_tf, darray)\n #olap0, cc0, other = overlap_and_correlation ( wts, map_values )\n\n map_values = dmap.interpolated_values ( fpoints, atoms[0].molecule.openState.xform )\n #print map_values\n olap, cc, ccm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #print olap, cc, ccm\n\n\n if show :\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n nv.openState.xform = dmap.openState.xform\n nv.name = \"bam\"\n\n\n\n\n return ccm\n\n\n\ndef zone_mask ( grid_data, zone_points, zone_radius, invert_mask = False, zone_point_mask_values = None ):\n\n from numpy import single as floatc, array, ndarray, zeros, int8, intc\n\n if not isinstance(zone_points, ndarray):\n zone_points = array(zone_points, floatc)\n\n if (not zone_point_mask_values is None and not isinstance(zone_point_mask_values, ndarray)):\n zone_point_mask_values = array(zone_point_mask_values, int8)\n\n shape = tuple(reversed(grid_data.size))\n mask_3d = zeros(shape, int8)\n mask_1d = mask_3d.ravel()\n\n if zone_point_mask_values is None:\n if invert_mask:\n mask_value = 0\n mask_1d[:] = 1\n else:\n mask_value = 1\n\n from VolumeData import grid_indices\n from _contour import affine_transform_vertices\n from _closepoints import find_closest_points, BOXES_METHOD\n\n size_limit = 2 ** 22 # 4 Mvoxels\n if mask_3d.size > size_limit:\n # Calculate plane by plane to save memory with grid point array\n xsize, ysize, zsize = grid_data.size\n grid_points = grid_indices((xsize,ysize,1), floatc)\n affine_transform_vertices(grid_points, grid_data.ijk_to_xyz_transform)\n zstep = [grid_data.ijk_to_xyz_transform[a][2] for a in range(3)]\n for z in range(zsize):\n i1, i2, n1 = find_closest_points(BOXES_METHOD, grid_points, zone_points, zone_radius)\n offset = xsize*ysize*z\n if zone_point_mask_values is None:\n mask_1d[i1 + offset] = mask_value\n else:\n mask_1d[i1 + offset] = zone_point_mask_values[n1]\n grid_points[:,:] += zstep\n else :\n grid_points = grid_indices(grid_data.size, floatc)\n affine_transform_vertices(grid_points, grid_data.ijk_to_xyz_transform)\n i1, i2, n1 = find_closest_points(BOXES_METHOD, grid_points, zone_points, zone_radius)\n if zone_point_mask_values is None:\n mask_1d[i1] = mask_value\n else:\n mask_1d[i1] = zone_point_mask_values[n1]\n\n return mask_3d\n\n\n\n# this method calculates CC between radial points placed around the atoms and the map\n# - two values are returned - CC and CC about the mean - the latter is the Q-score\n\n\ndef Qscore ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :\n\n if minD == None or maxD == None :\n minD, maxD = MinMaxD (dmap)\n\n #sigma = 1.0\n\n if len(atoms) == 0 :\n #print \" - no RAD atoms?\"\n return None\n\n from _multiscale import get_atom_coordinates\n pts = get_atom_coordinates(atoms, transformed = False)\n #print \" __%s__ \" % (atoms[0].name), pts[0]\n\n A, B = maxD - minD, minD\n refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B\n #print \" - refg: \", refG\n\n # g_vals should have the reference gaussian...\n g_vals = (numpy.ones ( [len(pts)*numPts,1] ) * refG).astype(numpy.float64, copy=False)\n g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)\n\n if mol == None :\n mol = atoms[0].molecule\n\n # r_avg holds the average values and number of points at each radial distance\n d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)\n #print pts\n #print d_vals\n d_vals = numpy.repeat ( d_vals, numPts )\n\n avgV = numpy.average ( d_vals )\n r_avg = [ [0,avgV,len(pts)*numPts] ]\n\n d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)\n\n\n # make smaller atom tree...\n if 1 and allAtTree != None :\n ats_near = []\n for at in atoms :\n anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )\n ats_near.extend ( anear )\n\n points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )\n if log :\n print \" - new search tree: %d pts\" % ( len(ats_near) )\n allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)\n\n # check if any atoms are too close; ignore those atoms and give them q=0\n if 0 and allAtTree :\n for at in atoms :\n anear = allAtTree.searchTree ( at.coord().data(), 2.0 )\n for nat in anear :\n if nat != at :\n v = at.coord() - nat.coord()\n if v.length < 1.0 :\n print \"c\"\n return 0.0\n\n\n #olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = dRAD\n i = 1.0\n while RAD < toRAD + 0.01 :\n outRad = RAD*0.9\n outRad2 = outRad * outRad\n #outRad2 = outRad * outRad\n pts = []\n for at in atoms :\n #npts = numPts # 8 # int ( npts )\n npts = int (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts\n #npts = numPts * (RAD*RAD / (dRAD*dRAD))\n #print RAD, dRAD, numPts, \" -> \", npts\n for i in range (0, 50) :\n outPts = SpherePts ( at.coord(), RAD, npts+i*2 )\n at_pts, at_pts_i = [None]*len(outPts), 0\n for pt in outPts :\n vPt = [pt[0], pt[1], pt[2]]\n apt = numpy.array ( vPt )\n if allAtTree != None :\n opointsNear = allAtTree.searchTree ( vPt, outRad )\n\n if 1 :\n foundNearPt = False\n for npt in opointsNear :\n v = apt - npt.coord().data()\n r2 = numpy.sum ( v * v )\n if r2 < outRad2 :\n foundNearPt = True\n break\n if not foundNearPt :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n\n else :\n if len(opointsNear) == 0 :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n else :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n #if log :\n # print \" - %d, %d pts\" % (i, len(at_pts))\n if at_pts_i >= npts : # or show :\n #print \" - %.2f - after %d\" % (RAD, i)\n pts.extend ( at_pts[0:at_pts_i] )\n break\n\n if show :\n pmod = AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, \"RAD points %.1f %s\" % (RAD,atoms[0].name) )\n pmod.openState.xform = atoms[0].molecule.openState.xform\n\n if len (pts) < 1 :\n if log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n r_avg.append ( [RAD,0,0] )\n\n\n else :\n d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )\n d_vals = numpy.append ( d_vals, d_vals_n )\n avg = numpy.average ( d_vals_n )\n\n #gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #A, B = GV, 0\n #A, B = GV - minD, minD\n A,B = maxD - minD, minD\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )\n\n g_vals_avg = numpy.append ( g_vals_avg, gv )\n d_vals_avg = numpy.append ( d_vals_avg, avg )\n\n r_avg.append ( [RAD,avg,len(pts)] )\n\n #if log :\n # print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, len(pts))\n\n RAD += dRAD\n i+=1\n\n if log and not fitg :\n min, max = r_avg[0][1], r_avg[0][1]\n for RAD, avg, numPts in r_avg :\n if avg < min : min = avg\n if avg > max : max = avg\n A,B = max-min, min\n #A,B = maxD - minD, minD\n #A,B = GV - minD, minD\n for RAD, avg, numPts in r_avg :\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n #print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg+0.02, gv+0.02, numPts)\n print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, numPts)\n\n #d_vals = d_vals + 0.02\n #g_vals = g_vals + 0.02\n\n # this is the CC between averaged radial values - not at robust\n if 0 :\n olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )\n if log :\n print \"olap -avg-: %.3f cc: %.3f, Q: %.3f -- %d\" % (olap, CC, Qs, len(d_vals_avg))\n #print \"%f\\t%f\\t%f\" % (olap, CC, Qs)\n\n olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals, g_vals )\n # this is the CC between _all_ radial values\n Qs = CCmean\n if log :\n print \"olap --N--: %.3f cc: %.3f, ccmean (Q-score): %.3f -- %d\" % (olap, CC, Qs, len(d_vals))\n #print \"%f\\t%f\\t%f\" % (olap, CC, Qs)\n\n\n if fitg :\n if log : print \"fitting gaussian : \"\n #V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))\n V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)\n\n sdev, A, B = optSGD ( V, 5000, 1.0 )\n sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n errp = err / r_avg[0][1] * 100.0\n if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )\n if sdev2 != None :\n sdev, A, B = sdev2, A2, B2\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n #print \"max:\", r_avg[0][1]\n errp = err / r_avg[0][1] * 100.0\n if log : print \" gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n mx = 0.0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if y > mx :\n mx = y\n if gv > mx :\n mx = gv\n if log : print \"%.1f\\t%f\\t%f\" % (x, y, gv)\n i += 1\n\n print \"\"\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if log : print \"%.1f\\t%f\\t%f\" % (x, y/mx, gv/mx)\n i += 1\n\n return Qs, yds, err\n\n else :\n return Qs\n\n\n\n\n# qscores on a grid\n\ndef QscoreG ( atoms, dmap, sigma, agrid=None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :\n\n\n if minD == None or maxD == None :\n minD, maxD = MinMaxD (dmap)\n\n #sigma = 1.0\n\n if len(atoms) == 0 :\n #print \" - no RAD atoms?\"\n return None\n\n from _multiscale import get_atom_coordinates\n pts = get_atom_coordinates(atoms, transformed = False)\n #print \" __%s__ \" % (atoms[0].name), pts[0]\n\n\n A, B = maxD - minD, minD\n refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B\n #print \" - refg: \", refG\n\n # g_vals should have the reference gaussian...\n g_vals = (numpy.ones ( [len(pts)*numPts,1] ) * refG).astype(numpy.float64, copy=False)\n g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)\n\n if mol == None :\n mol = atoms[0].molecule\n\n # r_avg holds the average values and number of points at each radial distance\n d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)\n d_vals = numpy.repeat ( d_vals, numPts )\n\n avgV = numpy.average ( d_vals )\n r_avg = [ [0,avgV,len(pts)*numPts] ]\n\n d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)\n\n #olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = dRAD\n i = 1.0\n while RAD < toRAD + 0.01 :\n outRad = RAD*0.9\n outRad2 = outRad * outRad\n #outRad2 = outRad * outRad\n pts = []\n for at in atoms :\n #npts = numPts # 8 # int ( npts )\n npts = int (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts\n #npts = numPts * (RAD*RAD / (dRAD*dRAD))\n #print RAD, dRAD, numPts, \" -> \", npts\n for i in range (0, 100) :\n outPts = SpherePts ( at.coord(), RAD, npts+i*2 )\n at_pts, at_pts_i = [None]*len(outPts), 0\n for pt in outPts :\n vPt = [pt[0], pt[1], pt[2]]\n #apt = numpy.array ( vPt )\n P = chimera.Point ( pt[0], pt[1], pt[2] )\n if agrid != None :\n #opointsNear = allAtTree.searchTree ( vPt, outRad )\n nearAts = agrid.AtsNearPtLocal ( P )\n if len(nearAts) <= 1 :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n else :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n #if log :\n # print \" - %d, %d pts\" % (i, len(at_pts))\n if at_pts_i >= npts or i >= 95 : # or show :\n pts.extend ( at_pts[0:at_pts_i] )\n break\n\n if show :\n pmod = AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, \"RAD points %.1f %s\" % (RAD,atoms[0].name) )\n pmod.openState.xform = atoms[0].molecule.openState.xform\n\n if len (pts) < 1 :\n if log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n r_avg.append ( [RAD,0,0] )\n\n\n else :\n d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )\n d_vals = numpy.append ( d_vals, d_vals_n )\n avg = numpy.average ( d_vals_n )\n\n #gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #A, B = GV, 0\n #A, B = GV - minD, minD\n A,B = maxD - minD, minD\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )\n\n g_vals_avg = numpy.append ( g_vals_avg, gv )\n d_vals_avg = numpy.append ( d_vals_avg, avg )\n\n r_avg.append ( [RAD,avg,len(pts)] )\n\n #if log :\n # print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, len(pts))\n\n RAD += dRAD\n i+=1\n\n if log and not fitg :\n min, max = r_avg[0][1], r_avg[0][1]\n for RAD, avg, numPts in r_avg :\n if avg < min : min = avg\n if avg > max : max = avg\n A,B = max-min, min\n #A,B = maxD - minD, minD\n #A,B = GV - minD, minD\n for RAD, avg, numPts in r_avg :\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n #print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg+0.02, gv+0.02, numPts)\n print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, numPts)\n\n #d_vals = d_vals + 0.02\n #g_vals = g_vals + 0.02\n\n # this is the CC between averaged radial values - not at robust\n if 0 :\n olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )\n if log :\n print \"olap -avg-: %.3f cc: %.3f, Q: %.3f -- %d\" % (olap, CC, Qs, len(d_vals_avg))\n #print \"%f\\t%f\\t%f\" % (olap, CC, Qs)\n\n olap, CC, CCmean = FitMap.overlap_and_correlation ( d_vals, g_vals )\n # this is the CC between _all_ radial values\n Qs = CCmean\n if log :\n print \"olap --N--: %.3f cc: %.3f, ccmean (Q-score): %.3f -- %d\" % (olap, CC, Qs, len(d_vals))\n #print \"%f\\t%f\\t%f\" % (olap, CC, Qs)\n\n\n if fitg :\n if log : print \"fitting gaussian : \"\n #V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))\n V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)\n\n sdev, A, B = optSGD ( V, 5000, 1.0 )\n sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n errp = err / r_avg[0][1] * 100.0\n if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )\n if sdev2 != None :\n sdev, A, B = sdev2, A2, B2\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n #print \"max:\", r_avg[0][1]\n errp = err / r_avg[0][1] * 100.0\n if log : print \" gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n mx = 0.0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if y > mx :\n mx = y\n if gv > mx :\n mx = gv\n if log : print \"%.1f\\t%f\\t%f\" % (x, y, gv)\n i += 1\n\n print \"\"\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if log : print \"%.1f\\t%f\\t%f\" % (x, y/mx, gv/mx)\n i += 1\n\n return Qs, yds, err\n\n else :\n return Qs\n\n\n\n\n# this is an older Q-score function which does not try to make sure to use numPts around each atom\n\ndef Qscore_ ( atoms, dmap, sigma, allAtTree = None, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0, mol=None ) :\n\n if minD == None or maxD == None :\n minD, maxD = MinMaxD (dmap)\n\n #sigma = 1.0\n\n if len(atoms) == 0 :\n #print \" - no RAD atoms?\"\n return None\n\n from _multiscale import get_atom_coordinates\n pts = get_atom_coordinates(atoms, transformed = False)\n #print \" __%s__ \" % (atoms[0].name), pts[0]\n\n\n A, B = maxD - minD, minD\n refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B\n #print \" - refg: \", refG\n\n # g_vals should have the reference gaussian...\n g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False)\n\n if mol == None :\n mol = atoms[0].molecule\n\n\n # r_avg holds the average values and number of points at each radial distance\n d_vals = dmap.interpolated_values ( pts, mol.openState.xform ).astype(numpy.float64, copy=False)\n d_vals = numpy.repeat ( d_vals, numPts )\n\n avgV = numpy.average ( d_vals )\n r_avg = [ [0,avgV,len(pts)*numPts] ]\n\n d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)\n\n\n # make smaller atom tree...\n if 1 and allAtTree != None :\n ats_near = []\n for at in atoms :\n anear = allAtTree.searchTree ( at.coord().data(), toRAD*2.0 )\n ats_near.extend ( anear )\n\n points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )\n if log :\n print \" - new search tree: %d pts\" % ( len(ats_near) )\n allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)\n\n\n\n #olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = dRAD\n i = 1.0\n while RAD < toRAD + 0.01 :\n outRad = RAD*0.9\n outRad2 = outRad * outRad\n #outRad2 = outRad * outRad\n pts = []\n for at in atoms :\n\n outPts = SpherePts ( at.coord(), RAD, numPts )\n at_pts, at_pts_i = [None]*len(outPts), 0\n\n for pt in outPts :\n vPt = [pt[0], pt[1], pt[2]]\n apt = numpy.array ( vPt )\n if allAtTree != None :\n opointsNear = allAtTree.searchTree ( vPt, outRad )\n\n if 1 :\n foundNearPt = False\n for npt in opointsNear :\n v = apt - npt.coord().data()\n r2 = numpy.sum ( v * v )\n if r2 < outRad2 :\n foundNearPt = True\n break\n if not foundNearPt :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n\n else :\n if len(opointsNear) == 0 :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n else :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n\n pts.extend ( at_pts[0:at_pts_i] )\n\n if show :\n AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, \"RAD points %.1f\" % RAD )\n\n if len (pts) < 1 :\n if 0 and log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n r_avg.append ( [RAD,0,0] )\n\n\n else :\n d_vals_n = dmap.interpolated_values ( pts, mol.openState.xform )\n #d_vals = numpy.append ( d_vals, d_vals_n )\n avg = numpy.average ( d_vals_n )\n\n A,B = maxD - minD, minD\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n\n g_vals_avg = numpy.append ( g_vals_avg, gv )\n d_vals_avg = numpy.append ( d_vals_avg, avg )\n\n r_avg.append ( [RAD,avg,len(pts)] )\n\n\n RAD += dRAD\n i+=1\n\n if 0 and log :\n min, max = r_avg[0][1], r_avg[0][1]\n for RAD, avg, numPts in r_avg :\n if avg < min : min = avg\n if avg > max : max = avg\n A,B = max-min, min\n A,B = maxD - minD, minD\n #A,B = GV - minD, minD\n for RAD, avg, numPts in r_avg :\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n #print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg+0.02, gv+0.02, numPts)\n print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, numPts)\n\n #d_vals = d_vals + 0.02\n #g_vals = g_vals + 0.02\n\n olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )\n Qscore = CCm\n if log :\n print \"olap -avg-: %.3f cc: %.3f, ccm (Q-score): %.3f -- %d\" % (olap, CC, CCm, len(d_vals_avg))\n #print \"%f\\t%f\\t%f\" % (olap, CC, CCm)\n\n\n if fitg :\n if log : print \"fitting gaussian : \"\n #V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))\n V, N = [ [x[0],x[1]] for x in r_avg[0:15] ], float(15)\n\n sdev, A, B = optSGD ( V, 5000, 1.0 )\n sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f, err: %f\" % (sdev, A, B, err)\n sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )\n if sdev2 != None :\n sdev, A, B = sdev2, A2, B2\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n print \"max:\", r_avg[0][1]\n errp = err / r_avg[0][1] * 100.0\n if log : print \" gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if log : print \"%.1f\\t%f\\t%f\" % (x, y, gv)\n i += 1\n\n return Qscore, yds, err\n\n else :\n return Qscore\n\n\n\n\ndef QscorePt ( atPt, xfI, dmap, sigma, allAtTree = None, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :\n\n if minD == None or maxD == None :\n minD, maxD = MinMaxD (dmap)\n\n #xfI = chimera.Xform()\n atPtC = chimera.Point ( *atPt )\n\n A, B = maxD - minD, minD\n refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B\n #print \" - refg: \", refG\n\n # g_vals should have the reference gaussian...\n g_vals = (numpy.ones ( [numPts,1] ) * refG).astype(numpy.float64, copy=False )\n g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False )\n\n # r_avg holds the average values and number of points at each radial distance\n d_vals = dmap.interpolated_values ( [atPt], xfI ).astype(numpy.float64, copy=False)\n d_vals = numpy.repeat ( d_vals, numPts )\n\n avgV = numpy.average ( d_vals )\n r_avg = [ [0,avgV,numPts] ]\n\n d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)\n\n\n # make smaller atom tree...\n if 1 and allAtTree != None :\n ats_near = []\n anear = allAtTree.searchTree ( atPt, toRAD*2.0 )\n ats_near.extend ( anear )\n\n points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )\n if log :\n print \" - new search tree: %d pts\" % ( len(ats_near) )\n allAtTree = AdaptiveTree ( points.tolist(), ats_near, 1.0)\n\n #olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = dRAD\n i = 1.0\n while RAD < toRAD + 0.01 :\n outRad = RAD*0.9\n outRad2 = outRad * outRad\n #outRad2 = outRad * outRad\n pts = []\n\n for i in range (0, 100) :\n outPts = SpherePts ( atPtC, RAD, numPts+i*2 )\n at_pts, at_pts_i = [None]*len(outPts), 0\n for pt in outPts :\n vPt = [pt[0], pt[1], pt[2]]\n apt = numpy.array ( vPt )\n if allAtTree != None :\n opointsNear = allAtTree.searchTree ( vPt, outRad )\n foundNearPt = False\n for npt in opointsNear :\n v = apt - npt.coord().data()\n r2 = numpy.sum ( v * v )\n if r2 < outRad2 :\n foundNearPt = True\n break\n if not foundNearPt :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n\n else :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n #if log :\n # print \" - %d, %d pts\" % (i, len(at_pts))\n if at_pts_i >= numPts or i >= 15 : # or show :\n pts.extend ( at_pts[0:at_pts_i] )\n break\n\n if len (pts) < 1 :\n if log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n r_avg.append ( [RAD,0,0] )\n\n\n else :\n d_vals_n = dmap.interpolated_values ( pts, xfI )\n d_vals = numpy.append ( d_vals, d_vals_n )\n avg = numpy.average ( d_vals_n )\n\n #gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #A, B = GV, 0\n #A, B = GV - minD, minD\n A,B = maxD - minD, minD\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )\n\n g_vals_avg = numpy.append ( g_vals_avg, gv )\n d_vals_avg = numpy.append ( d_vals_avg, avg )\n\n r_avg.append ( [RAD,avg,len(pts)] )\n\n #if log :\n # print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, len(pts))\n\n RAD += dRAD\n i+=1\n\n if log and not fitg :\n min, max = r_avg[0][1], r_avg[0][1]\n for RAD, avg, numPts in r_avg :\n if avg < min : min = avg\n if avg > max : max = avg\n A,B = max-min, min\n #A,B = maxD - minD, minD\n #A,B = GV - minD, minD\n for RAD, avg, numPts in r_avg :\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n #print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg+0.02, gv+0.02, numPts)\n print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, numPts)\n\n #d_vals = d_vals + 0.02\n #g_vals = g_vals + 0.02\n\n #if log :\n # olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )\n # print \"olap -avg-: %.3f cc: %.3f, ccm: %.3f -- %d\" % (olap, CC, CCm, len(d_vals_avg))\n # #print \"%f\\t%f\\t%f\" % (olap, CC, CCm)\n\n\n olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals, g_vals )\n qscore = CCm\n if log :\n print \"olap --N--: %.3f cc: %.3f, ccm: %.3f -- %d\" % (olap, CC, CCm, len(d_vals))\n #print \"%f\\t%f\\t%f\" % (olap, CC, CCm)\n\n if fitg :\n if log : print \"fitting gaussian : \"\n #V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))\n V, N = [ [x[0],x[1]] for x in r_avg[0:25] ], float(25)\n\n sdev, A, B = optSGD ( V, 5000, 1.0 )\n sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f, err: %f\" % (sdev, A, B, err)\n sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )\n if sdev2 != None :\n sdev, A, B = sdev2, A2, B2\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n #print \"max:\", r_avg[0][1]\n errp = err / r_avg[0][1] * 100.0\n if log : print \" gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if log : print \"%.1f\\t%f\\t%f\" % (x, y, gv)\n i += 1\n\n return qscore, yds, err\n\n else :\n return qscore\n\n\n\n# calculate Q-score given a point (rather than atom), and using a 'points tree' rather than 'atoms tree'\n\ndef QscorePt2 ( atPt, xfI, dmap, sigma, allPtTree = None, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :\n\n if minD == None or maxD == None :\n minD, maxD = MinMaxD (dmap)\n\n #xfI = chimera.Xform()\n atPtC = chimera.Point ( *atPt )\n #print atPtC\n\n A, B = maxD - minD, minD\n refG = A * numpy.exp ( -0.5 * numpy.power(0.0/sigma,2) ) + B\n #print \" - refg: \", refG\n\n # g_vals should have the reference gaussian...\n g_vals = (numpy.ones ( [numPts,1] ) * refG).astype(numpy.float64, copy=False )\n g_vals_avg = numpy.array ( [refG] ).astype(numpy.float64, copy=False )\n\n # r_avg holds the average values and number of points at each radial distance\n d_vals = dmap.interpolated_values ( [atPt], xfI ).astype(numpy.float64, copy=False)\n #print atPt\n #print d_vals\n d_vals = numpy.repeat ( d_vals, numPts )\n\n avgV = numpy.average ( d_vals )\n r_avg = [ [0,avgV,numPts] ]\n\n d_vals_avg = numpy.array ( [avgV] ).astype(numpy.float64, copy=False)\n\n # make smaller atom tree, shaves a few ms off running time for each point\n if 1 and allPtTree != None :\n pts_near = []\n anear = allPtTree.searchTree ( atPt, toRAD*2.0 )\n pts_near.extend ( anear )\n\n #points = _multiscale.get_atom_coordinates ( ats_near, transformed = False )\n if log :\n print \" - new search tree: %d pts\" % ( len(ats_near) )\n allPtTree = AdaptiveTree ( pts_near, pts_near, 1.0)\n\n #olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = dRAD\n i = 1.0\n while RAD < toRAD + 0.01 :\n outRad = RAD*0.9\n outRad2 = outRad * outRad\n #outRad2 = outRad * outRad\n pts = []\n\n # try to get at least [numPts] points at [RAD] distance\n # from the atom, that are not closer to other atoms\n for i in range (0, 50) :\n # points on a sphere at radius RAD...\n outPts = SpherePts ( atPtC, RAD, numPts+i*2 )\n at_pts, at_pts_i = [None]*len(outPts), 0\n for pt in outPts :\n vPt = [pt[0], pt[1], pt[2]]\n apt = numpy.array ( vPt )\n if allPtTree != None :\n opointsNear = allPtTree.searchTree ( vPt, outRad )\n foundNearPt = False\n for npt in opointsNear :\n v = apt - npt\n r2 = numpy.sum ( v * v )\n if r2 < outRad2 :\n foundNearPt = True\n break\n if not foundNearPt :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n\n else :\n at_pts[at_pts_i] = vPt\n at_pts_i += 1\n #if log :\n # print \" - %d, %d pts\" % (i, len(at_pts))\n if at_pts_i >= numPts : # or show :\n #print \" - %.2f - after %d\" % (RAD, i)\n pts.extend ( at_pts[0:at_pts_i] )\n break\n\n if len (pts) < 1 :\n if log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n r_avg.append ( [RAD,0,0] )\n\n\n else :\n d_vals_n = dmap.interpolated_values ( pts, xfI )\n d_vals = numpy.append ( d_vals, d_vals_n )\n avg = numpy.average ( d_vals_n )\n\n #gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #A, B = GV, 0\n #A, B = GV - minD, minD\n A,B = maxD - minD, minD\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n g_vals = numpy.append ( g_vals, numpy.ones([len(pts),1]) * gv )\n\n g_vals_avg = numpy.append ( g_vals_avg, gv )\n d_vals_avg = numpy.append ( d_vals_avg, avg )\n\n r_avg.append ( [RAD,avg,len(pts)] )\n\n #if log :\n # print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, len(pts))\n\n RAD += dRAD\n i+=1\n\n if log and not fitg :\n min, max = r_avg[0][1], r_avg[0][1]\n for RAD, avg, numPts in r_avg :\n if avg < min : min = avg\n if avg > max : max = avg\n A,B = max-min, min\n #A,B = maxD - minD, minD\n #A,B = GV - minD, minD\n for RAD, avg, numPts in r_avg :\n gv = A * numpy.exp ( -0.5 * numpy.power(RAD/sigma,2) ) + B\n #print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg+0.02, gv+0.02, numPts)\n print \"%.1f\\t%f\\t%f\\t%d\" % (RAD, avg, gv, numPts)\n\n #d_vals = d_vals + 0.02\n #g_vals = g_vals + 0.02\n\n #if log :\n # olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals_avg, g_vals_avg )\n # print \"olap -avg-: %.3f cc: %.3f, ccm: %.3f -- %d\" % (olap, CC, CCm, len(d_vals_avg))\n # #print \"%f\\t%f\\t%f\" % (olap, CC, CCm)\n\n olap, CC, CCm = FitMap.overlap_and_correlation ( d_vals, g_vals )\n qscore = CCm\n if log :\n print \"olap --N--: %.3f cc: %.3f, ccm: %.3f -- %d\" % (olap, CC, CCm, len(d_vals))\n #print \"%f\\t%f\\t%f\" % (olap, CC, CCm)\n\n if fitg :\n if log : print \"fitting gaussian : \"\n #V, N = [ [x[0],x[1]] for x in r_avg ], float(len(r_avg))\n V, N = [ [x[0],x[1]] for x in r_avg[0:25] ], float(25)\n\n sdev, A, B = optSGD ( V, 5000, 1.0 )\n sdev, A, B = optSGD ( V, 5000, 0.1, sdev, A, B )\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f, err: %f\" % (sdev, A, B, err)\n sdev2, A2, B2 = optGN ( V, 0.0001, sdev, A, B )\n if sdev2 != None :\n sdev, A, B = sdev2, A2, B2\n err = numpy.sqrt(err3(V,sdev,A,B)/N)\n #print \"max:\", r_avg[0][1]\n errp = err / r_avg[0][1] * 100.0\n if log : print \" gn - sdev: %.4f, A %.4f, B %.4f, err: %f (%.1f%%)\" % (sdev, A, B, err, errp)\n\n yds, i = numpy.zeros ( len(r_avg) ), 0\n for x, y, n in r_avg:\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n #yds[i] = y - gv\n yds[i] = y\n if log : print \"%.1f\\t%f\\t%f\" % (x, y, gv)\n i += 1\n\n return qscore, yds, err\n\n else :\n return qscore\n\n\n\ndef RadAts ( atoms, dmap, allAtTree = None, show=0, log=0, numPts=20, toRAD=2.0, dRAD=0.1 ) :\n\n if len(atoms) == 0 :\n #print \" - no RAD atoms?\"\n return None\n\n #pts = []\n #for at in atoms :\n # p = at.coord()\n # pts.append ( [p[0], p[1], p[2]] )\n\n from _multiscale import get_atom_coordinates\n pts = get_atom_coordinates(atoms, transformed = False)\n\n RD_, X, Y = [], [], []\n d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )\n avg = numpy.average ( d_vals )\n\n RD_.append ( [0,avg] ); X.append (0); Y.append (avg)\n\n\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = dRAD\n i = 1.0\n while RAD < toRAD + 0.01 :\n outRad = RAD*0.9\n outRad2 = outRad * outRad\n pts = []\n for at in atoms :\n npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts\n npts = int ( npts )\n #print RAD, dRAD, numPts, \" -> \", npts\n outPts = SpherePts ( at.coord(), RAD, npts )\n for pt in outPts :\n ppt = [pt[0], pt[1], pt[2]]\n if allAtTree != None :\n vPt = numpy.array ( ppt )\n opointsNear = allAtTree.searchTree ( ppt, outRad )\n if 1 :\n clash = False\n for p in opointsNear :\n v = vPt - p.coord().data()\n sqSum = numpy.sum ( v * v )\n if sqSum < outRad2 :\n clash = True\n break\n if clash == False :\n pts.append ( ppt )\n\n else :\n if len(opointsNear) == 0 :\n pts.append ( ppt )\n else :\n pts.append ( ppt )\n\n if show :\n AddSpherePts ( pts, (.6,.6,.6,0.4), 0.1, \"RAD points %.1f\" % RAD )\n\n if len (pts) < 1 :\n if log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n else :\n d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )\n avg = numpy.average ( d_vals )\n RD_.append ( [RAD,avg] );\n if log :\n print RAD, avg, len(pts)\n X.append (RAD); Y.append (avg)\n\n RAD += dRAD\n\n #minSd = opt0 ( RD_, 0.1 )\n #if minSd != None :\n # if show :\n # print \" SD0: %.1f\" % minSd\n\n sdev = toRAD\n slope = 0\n\n if RD_[0][1] <= RD_[-1][1] :\n sdev = 10.0\n\n else :\n\n #for i in range ( len(RD_) ) :\n # RD_[i][1] = RD_[i][1] - RD_[-1][1]\n # if log :\n # Y[i] = Y[i] - Y[-1]\n\n\n #import time\n #start = time.time()\n sdev, A, B = optSGD ( RD_, 9000, 0.2 )\n sdev, A, B = optSGD ( RD_, 9000, 0.02, sdev, A, B )\n sdev, A, B = optSGD ( RD_, 9000, 0.002, sdev, A, B )\n #end = time.time()\n #if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f -- %f\" % (sdev, A, B, (end - start))\n sdev = sdev\n if log : print \" sgd - sdev: %.4f, A %.4f, B %.4f\" % (sdev, A, B)\n\n #start = time.time()\n #sdev, A, B = optGN ( RD_, 0.0001 )\n #print \" gn - sdev: %.4f, A %.4f, B %.4f -- %f\" % (sdev, A, B, (end - start))\n #end = time.time()\n\n if 1 :\n if 0 and sdev != None :\n\n if log :\n print \" gn1 - sdev: %.4f, A %.4f, B %.4f\" % (sdev, A, B)\n\n else :\n sdev, A, B = optSGD ( RD_, 10000, 0.01 )\n\n if log :\n print \" sgd - sdev: %.4f, A %.4f, B %.4f\" % (sdev, A, B)\n\n sdev2, A2, B2 = optGN ( RD_, 0.0001, sdev, A, B )\n if sdev2 != None :\n sdev, A, B = sdev2, A2, B2\n if log :\n print \" gn2 - sdev: %.4f, A %.4f, B %.4f\" % (sdev, A, B)\n #else :\n # return 10.0\n\n\n if log :\n r = numpy.polyfit ( X, Y, 1, rcond=None, full=False, w=None, cov=False)\n print \" sdev: %.4f, A %.4f, B %.4f // slope: %.4f y %.4f\" % (sdev, A, B, r[0], r[1])\n\n #A, B = 0.26+0.08, -0.08\n lastX = 0\n for i in range ( len(RD_) ) :\n x, y = RD_[i]\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B\n lv = x * r[0] + r[1]\n print \"%.1f\\t%f\\t%f\\t%f\" % (x, y, gv, gvRef)\n lastX = x\n\n if 1 :\n x = lastX + dRAD\n #while x < min(4 * sdev,50.0) :\n while x < min(10.0,50.0) :\n gv = A * numpy.exp ( -0.5 * numpy.power(x/sdev,2) ) + B\n gvRef = A * numpy.exp ( -0.5 * numpy.power(x/0.5,2) ) + B\n lv = x * r[0] + r[1]\n print \"%.1f\\t\\t%f\\t%f\" % (x, gv, gvRef)\n x += dRAD\n\n\n #return abs(sdev), abs(slope)\n return abs(sdev)\n\n\ndef TimeLeftStr ( atI, totI, totSec ) :\n\n leftTime = \"\"\n leftSec = 0.0\n iPerSec = float(atI) / totSec\n if iPerSec > 0 :\n leftSec = float ( totI - atI ) / iPerSec\n leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )\n leftSec = leftSec - leftHour * 60.0 * 60.0\n leftMin = numpy.floor ( leftSec / 60.0 )\n leftSec = leftSec - leftMin * 60.0\n leftTime = \"%.0f:%.0f:%.0f\" % (leftHour, leftMin, leftSec)\n return leftTime\n return \"\"\n\n\ndef optGN ( V, err, S=None, A=None, B=None ) :\n\n y0 = V[0][1]\n yN = V[-1][1]\n\n if S == None :\n S = 0.5\n A = y0+yN\n B = yN\n\n an = numpy.array ( [A,B,S] )\n #print \" _ -- A %.3f B %.3f s %.3f\" % (A, B, S)\n\n reg = 1.0\n badMatCount = 0\n\n for i in range ( 1000 ) :\n\n J = numpy.zeros ( [len(V),3] )\n e = numpy.zeros ( [len(V),1] )\n\n err0 = 0\n j = 0\n for x,y in V :\n expv = numpy.exp ( -0.5 * numpy.power(x/S,2) )\n v = A * expv + B\n yd = v - y\n err0 += yd * yd\n #print \"%.2f,%.2f/%.2f(%.2f)\" % (x, y, v, yd),\n\n dA = expv\n dB = 1\n dS = A*x*x*numpy.power(S,-3) * expv\n J[j,:] = [dA, dB, dS]\n e[j,0] = yd\n j += 1\n\n Jt = numpy.transpose(J)\n\n try :\n J_ = numpy.dot ( numpy.linalg.inv ( numpy.dot(Jt,J) ), Jt )\n except :\n #print \" - bad matrix?\"\n #print numpy.dot(Jt,J)\n badMatCount += 1\n\n if badMatCount > 3 :\n return None, None, None\n\n from numpy import random as R\n an = numpy.array ( [R.random()*(y0+yN),R.random()*yN,R.random()*10.0] )\n A,B,S = an[0], an[1], an[2]\n #print \" ? -- A %.3f B %.3f s %.3f\" % (A, B, S)\n reg = 1.0\n\n continue\n\n ad = numpy.dot ( J_, e )\n ann = an - ( ad[:,0] * reg )\n A,B,S = ann[0], ann[1], ann[2]\n\n err1 = err3 ( V, S, A, B )\n #if err1 > err0 :\n # reg = reg * 0.1\n # if reg < err :\n # break\n #else :\n an = ann\n #print \" %d -- A %.3f B %.3f s %.3f - err %.3f, reg %.5f\" % (i, A, B, S, err1, reg)\n\n if abs(err0 - err1) < err :\n #print \" - done\"\n break\n\n i += 1\n\n return S,A,B\n\n\n\ndef optSGD ( V, N, err, S=None, A=None, B=None ) :\n\n if S == None :\n y0 = V[0][1]\n yN = V[-1][1]\n S = 0.5\n A = y0+yN\n B = yN\n\n from numpy import random\n\n lastE = err3 ( V, S, A, B )\n #while True :\n for i in range(N) :\n\n S_ = S + random.normal ( 0, err ) # mean, sigma\n A_ = A + random.normal ( 0, err ) # mean, sigma\n B_ = B + random.normal ( 0, err ) # mean, sigma\n\n e = err3 ( V, S_, A_, B_ )\n #print \"%d %.2f %f %f %.4f\" % (i, sdAt, e, numpy.log(e), dd)\n if e < lastE :\n S, A, B = S_, A_, B_\n lastE = e\n\n return S,A,B\n\n\ndef err3 ( XYz, sd, A, B ) :\n\n y0 = XYz[0][1]\n err = 0\n #for x,y in XYz[1:] :\n for x,y in XYz :\n yd = y - A * numpy.exp ( -0.5 * numpy.power(x/sd,2) ) - B\n err += yd * yd\n #err /= float(len(XYz))\n return err\n\n\n\ndef err ( XYz, sd ) :\n\n y0 = XYz[0][1]\n err = 0\n for x,y in XYz[1:] :\n yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )\n err += yd * yd\n #err /= float(len(XYz))\n return err\n\n\ndef opt0 ( RD_, dStep ) :\n\n sd = 0.1\n y0 = RD_[0][1]\n minSd, minErr, N = None, 1e99, float ( len(RD_)-1 )\n while sd < 10.0 :\n\n err = 0\n for x,y in RD_[1:] :\n yd = y - y0 * numpy.exp ( -0.5 * numpy.power(x/sd,2) )\n err += yd * yd\n err /= N\n\n #print err\n\n if err < minErr :\n minErr = err\n minSd = sd\n\n sd += dStep\n\n\ndef opt ( V, maxErr ) :\n\n dd = 1.0\n sdAt = 0.1\n lastE = err ( V, sdAt )\n #while True :\n for i in range(10000) :\n sdAt += dd\n e = err ( V, sdAt )\n #print \"%d %.2f %f %f %.4f\" % (i, sdAt, e, numpy.log(e), dd)\n if e >= lastE :\n dd *= -0.75\n if abs(dd) < maxErr :\n return sdAt\n lastE = e\n return sdAt\n\n\n\n\ndef CalcQForAts ( dmap, mol, ats, sigma=0.6 ) :\n\n minD, maxD = MinMaxD (dmap)\n\n from _multiscale import get_atom_coordinates\n from CGLutil.AdaptiveTree import AdaptiveTree\n\n allAts = [at for at in mol.atoms if not at.element.name == \"H\"]\n points = get_atom_coordinates ( allAts, transformed = False )\n allAtTree = AdaptiveTree ( points.tolist(), allAts, 1.0)\n\n for at in ats :\n at.Q = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, minD=minD, maxD=maxD )\n\n\n\ndef Calc ( chimeraPath, numProc, res=3.0, bfactorF=-1, sigma=0.6 ) :\n\n print \"Calc Q scores\"\n print \" - chimera path: \", chimeraPath\n print \" - num processors: \", numProc\n print \" - resolution: \", res\n print \" - sigma: \", sigma\n if bfactorF > 0 :\n print \" - b-factor: \", bfactorF\n\n\n from VolumeViewer import Volume\n vols = chimera.openModels.list(modelTypes = [Volume])\n if len(vols) == 0 :\n print \" - no volumes loaded\"\n return\n\n dmap = vols[0]\n print \" - volume: %s\" % dmap.name\n\n from chimera import Molecule\n\n mols = chimera.openModels.list(modelTypes = [Molecule])\n if len(mols) == 0 :\n print \" - no molecules loaded\"\n return\n\n for mi, mol in enumerate (mols) :\n\n print \"\"\n print \"Model %d/%d: %s\" % (mi+1, len(mols), mol.name)\n SetBBAts ( mol )\n\n if numProc == 1 :\n CalcQ ( mol, None, dmap, sigma, log=True )\n else :\n CalcQp ( mol, None, dmap, sigma, numProc=numProc, chimeraPath=chimeraPath )\n\n SaveQStats ( mol, \"All\", dmap, sigma, res )\n\n if bfactorF > 0 :\n minb, maxb = 1.0e9, 0.0\n for at in mol.atoms :\n at.bfactor = bfactorF * (1.0 - at.Q)\n #at.occupancy = 1.0 # max(0,at.Q)\n #dval = self.cur_dmap.interpolated_values ( [ at.coord() ], self.cur_mol.openState.xform ).astype(numpy.float64, copy=False)[0]\n #at.occupancy = (dval - minD) / (maxD - minD)\n minb = min ( minb, at.bfactor )\n maxb = max ( maxb, at.bfactor )\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n nname = molPath + \"_B%.0f.pdb\" % bfactorF\n print \"Saving pdb with B'-factors, f=%.0f:\" % bfactorF\n print \" -> \", nname\n print \" - bfactor = %.0f*(1-Qscore), range %.2f to %.2f\" % (bfactorF, minb, maxb)\n #print \" - occupancies set to 1\"\n print \"\"\n chimera.PDBio().writePDBfile ( [mol], nname )\n\n\n\n\n# this is the function that the MP version executes once Chimera is opened\n# with partial model and map\n\ndef CalcQForOpenModelsRess () :\n\n from VolumeViewer import Volume\n D = chimera.openModels.list(modelTypes = [Volume])\n dmap = D[0]\n #dmapA = D[1]\n print \" - dmap: %s\" % dmap.data.path\n #print \" - dmapA: %s\" % dmapA.name\n\n tempPath, mapNameExt = os.path.split ( dmap.data.path )\n mapName, mapExt = os.path.splitext ( mapNameExt )\n procI = mapName.split(\"_\")[0]\n print \" - proc: %s\" % procI\n print \" - in path: %s\" % tempPath\n\n # read ids and position of all atoms\n aPosMap = {}\n fina = open ( os.path.join(tempPath, \"all_atoms.txt\") )\n l1 = fina.readline()\n sigma, minD, maxD, numAts = l1.split()\n sigma, minD, maxD, numAts = float(sigma), float(minD), float(maxD), int(numAts)\n #fout.write ( \"Sigma: %.1f, minD: %.3f, maxD: %.3f, numAllAts: %d\\n\" % (sigma, minD, maxD, numAts) )\n print \"Sigma: %.1f, minD: %.3f, maxD: %.3f, numAllAts: %d\\n\" % (sigma, minD, maxD, numAts)\n\n allPts = [None] * numAts # numpy.array.zeros ( [numAts,3] )\n ati = 0\n for l in fina :\n atIdStr, sx, sy, sz = l.split()\n pt = [ float(sx), float(sy), float(sz) ]\n allPts[ati] = [ float(sx), float(sy), float(sz) ]\n aPosMap[atIdStr] = pt\n ati += 1\n fina.close()\n\n if ati != numAts :\n print \" ---!!--- got only %d of %d atoms\" (ati, numAts)\n return\n\n ##ats = [at for at in mol.atoms if not at.element.name == \"H\"]\n ##points = _multiscale.get_atom_coordinates ( allPts, transformed = False )\n ##print \" - search tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n allPtTree = AdaptiveTree ( allPts, allPts, 1.0)\n print \" - points tree with %d points\" % len(allPts)\n\n fin = open ( os.path.join ( tempPath, \"%s_atoms.txt\" % procI ) )\n fout = open ( os.path.join ( tempPath, \"%s_out.txt\" % procI ), \"w\" )\n fout_status = os.path.join ( tempPath, \"%s_stat.txt\" % procI )\n\n # get positions of atoms to do in this process\n doAts = []\n for l in fin :\n #atIdStr, sx, sy, sz = l.split()\n atIdStr = l.strip()\n if not atIdStr in aPosMap :\n fout.write ( \"- atid not found: %s\\n\" % atIdStr )\n #at = atids[atIdStr]\n #pt = [ float(sx), float(sy), float(sz) ]\n doAts.append ( [atIdStr, aPosMap[atIdStr]] )\n fin.close()\n\n # write status to a temp file\n fs = open ( fout_status, \"w\" );\n fs.write ( \"at atom %d/%d\" % (0,len(doAts) ) );\n fs.close()\n\n import time\n start = time.time()\n xfI = dmap.openState.xform\n\n i = 1\n for atId, atPt in doAts :\n #print \"%d.%s.%s\" % (r.id.position,r.id.chainId,at.name),\n\n ##qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )\n qs = QscorePt2 ( atPt, xfI, dmap, sigma, allPtTree=allPtTree, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n fout.write ( \"%s %f\\n\" % (atId, qs) )\n\n if i%10 == 0 :\n\n end = time.time()\n totSec = end - start\n\n leftTime = \"\"\n leftSec = 0.0\n iPerSec = float(i) / totSec\n if iPerSec > 0 :\n leftSec = float ( len(doAts) - i ) / iPerSec\n leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )\n leftSec = leftSec - leftHour * 60.0 * 60.0\n leftMin = numpy.floor ( leftSec / 60.0 )\n leftSec = leftSec - leftMin * 60.0\n leftTime = \"%.0f:%.0f:%.0f\" % (leftHour, leftMin, leftSec)\n\n # update status\n fs = open ( fout_status, \"w\" );\n fs.write ( \"at atom %d/%d - ETA %s\" % (i+1,len(doAts),leftTime) );\n fs.close()\n\n i += 1\n\n fout.close()\n\n fs = open ( fout_status, \"w\" ); fs.write ( \"done\" ); fs.close()\n\n\n\n\ndef CalcQp ( mol, cid, dmap, sigma, useOld=False, log=False, numProc=None, chimeraPath=None ) :\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n mapName = os.path.splitext(dmap.name)[0]\n mapPath = os.path.split ( dmap.data.path )[0]\n mapBase = os.path.splitext ( dmap.data.path )[0]\n\n if useOld :\n SetBBAts ( mol )\n nname = molPath + \"__Q__\" + mapName + \".pdb\"\n if QsFromPdbFile ( mol, nname ) :\n Qavg = QStats1 ( mol, cid )\n return Qavg\n\n #numProc = 2\n\n if numProc == None :\n import multiprocessing\n numProc = multiprocessing.cpu_count() / 2\n\n print \"Q Scores - p - %d\" % numProc\n print \" - map: %s\" % dmap.name\n print \" - mol: %s, chain: %s\" % (mol.name, cid if cid != None else \"_all_\")\n print \" - sigma: %.2f\" % sigma\n\n minD, maxD = MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n import time\n start = time.time()\n\n tempPath = mapBase + \"__Q-scores__temp__\"\n print \" - making temp path: %s\" % tempPath\n try :\n os.mkdir(tempPath)\n except :\n print \" - could not make temp path (an old calc may have failed):\"\n print \" : check/remove temp path manually and try again\"\n print \" : or, check write permissions\"\n\n\n allAtsFilePath = os.path.join ( tempPath, \"all_atoms.txt\" )\n\n # write all (non-H) atoms to one file\n allAtoms = [at for at in mol.atoms if not at.element.name == \"H\"]\n fout = open ( allAtsFilePath, \"w\" )\n print \" - all atoms -> %s\" % allAtsFilePath\n fout.write ( \"%.3f %f %f %d\\n\" % (sigma, minD, maxD, len(allAtoms)) )\n for at in allAtoms :\n r = at.residue\n altLoc = '_' if at.altLoc == '' else at.altLoc\n atId = \"%d.%s.%s.%s\" % (r.id.position,r.id.chainId,at.name,altLoc)\n p = at.coord()\n fout.write ( \"%s %f %f %f\\n\" % (atId,p.x,p.y,p.z) )\n fout.close()\n\n # atoms for which to calculate Q-scores\n SetBBAts ( mol )\n ress = []\n atoms = []\n for r in mol.residues :\n if cid == None or cid == \"All\" or r.id.chainId == cid :\n for at in r.atoms :\n if not at.element.name == \"H\" :\n atoms.append ( at )\n\n print \" - atoms to do: %d\" % len(atoms)\n\n import subprocess\n import sys\n\n print \"cmd:\",\n #print sys.argv\n for arg in sys.argv :\n print arg,\n print \"\"\n\n if chimeraPath == None :\n # '/Users/greg/_mol/Chimera.app/Contents/Resources/share/__main__.py'\n chimeraPath = os.path.split ( sys.argv[0] )[0]\n print \"\"\n print \" ------------ \", chimeraPath\n print \"\"\n\n chimeraPath, share = os.path.split ( chimeraPath )\n chimeraPath = os.path.join ( chimeraPath, 'bin' )\n chimeraPath = os.path.join ( chimeraPath, 'chimera' )\n if os.path.isfile ( chimeraPath ) :\n print \" -- on unix/mac\"\n else :\n chimeraPath += \".exe\"\n if os.path.isfile ( chimeraPath ) :\n print \" -- on windows\"\n else :\n print \" - chimera path not found...\"\n print chimeraPath\n print \"\"\n return\n\n print \" -- path to Chimera:\", chimeraPath\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n inDir = os.path.split(dir_path)[0]\n print \" -- working dir:\", inDir\n #mapQPPath = os.path.join ( inDir, 'Segger' )\n mapQPPath = os.path.join ( dir_path, 'mapqp.py' )\n print \" -- path to mapQ script:\", mapQPPath\n\n n = len(atoms)\n g = [atoms[(n*c)/numProc:(n*(c+1))/numProc] for c in range(numProc)]\n procs = []\n for mi, atoms1 in enumerate(g) :\n\n ress1 = atoms1[0].residue\n ressN = atoms1[-1].residue\n print \" - %d/%d, %d-%d\" % (mi+1, numProc, ress1.id.position, ressN.id.position)\n\n procAtomsPath = os.path.join ( tempPath, \"%d_atoms.txt\" % mi )\n fout = open ( procAtomsPath, \"w\" )\n for at in atoms1 :\n r = at.residue\n altLoc = '_' if at.altLoc == '' else at.altLoc\n p = at.coord()\n #fout.write ( \"%d.%s.%s.%s %.3f %.3f %.3f\\n\" % (r.id.position,r.id.chainId,at.name,altLoc,p.x,p.y,p.z) )\n fout.write ( \"%d.%s.%s.%s\\n\" % (r.id.position,r.id.chainId,at.name,altLoc) )\n fout.close()\n\n nmap_path = os.path.join ( tempPath, \"%d_map.mrc\" % mi )\n\n if 1 :\n nmap = MaskMapResize ( atoms1, 6, dmap, nmap_path )\n else :\n import shutil\n shutil.copyfile ( dmap.data.path, nmap_path )\n\n #args = [chimeraPath, '--nogui', '--silent', '--nostatus', mol.openedAs[0], nmap_path, mapQPPath]\n #args = [chimeraPath, '--nogui', '--silent', '--nostatus', nmap_path, dmap.data.path, mapQPPath]\n args = [chimeraPath, '--nogui', '--silent', '--nostatus', nmap_path, mapQPPath]\n if mi == 0 :\n print \"running proc:\",\n for arg in args :\n print arg,\n print \"\"\n\n fout = open ( os.path.join(tempPath, \"%d.log\" % mi), \"w\" )\n foute = open ( os.path.join(tempPath, \"%d_err.log\" % mi), \"w\" )\n p = subprocess.Popen(args, stdout=fout, stderr=foute, cwd=inDir)\n procs.append ( [mi, p, fout, foute] )\n\n print \"\"\n print \"Waiting...\",\n for mi, p, fout, foute in procs :\n p.wait()\n fout.close()\n foute.close()\n print \"%d\" % mi,\n print \"\"\n\n atids = {}\n for r in mol.residues :\n for at in r.atoms :\n r = at.residue\n altLoc = '_' if at.altLoc == '' else at.altLoc\n atids[\"%d.%s.%s.%s\" % (r.id.position,r.id.chainId,at.name,altLoc)] = at\n\n print \"\"\n print \"Getting...\",\n for mi, p, fout, foute in procs :\n fin = os.path.join(tempPath, \"%d_out.txt\" % mi)\n #print \" - getting from: \", fin\n fp = open ( fin )\n for l in fp :\n #print \" - \", l\n try :\n atId, Q = l.split()\n except :\n print \" - err line: \", l\n at = atids[atId.strip()]\n #at = r.atomsMap[atName][0]\n at.Q = float(Q)\n #at.CC = float(cc)\n at.bfactor = at.Q\n\n fp.close()\n\n if mi == 0 :\n print \"\"\n print \"\"\n print \"__StdOut for process %d__\" % mi\n foute = open ( os.path.join(tempPath, \"%d.log\" % mi), \"r\" )\n for l in foute :\n print l,\n print \"\"\n foute.close()\n\n\n print \"__StdErr file for process %d__\" % mi\n foute = open ( os.path.join(tempPath, \"%d_err.log\" % mi), \"r\" )\n for l in foute :\n print l,\n print \"\"\n foute.close()\n\n if 1 :\n for mi, p, fout, foute in procs :\n print \"Removing temp files\",\n os.remove ( os.path.join(tempPath, \"%d_out.txt\" % mi) )\n try :\n os.remove ( os.path.join(tempPath, \"%d_stat.txt\" % mi) )\n except :\n print \" - did not find _stat file\"\n pass\n os.remove ( os.path.join(tempPath, \"%d_atoms.txt\" % mi) )\n os.remove ( os.path.join(tempPath, \"%d_map.mrc\" % mi) )\n os.remove ( os.path.join(tempPath, \"%d.log\" % mi) )\n os.remove ( os.path.join(tempPath, \"%d_err.log\" % mi) )\n print \"%d\" % mi,\n\n print \"\"\n os.remove ( os.path.join(tempPath, \"all_atoms.txt\") )\n os.rmdir ( tempPath )\n\n\n end = time.time()\n print \"\"\n print \" - done, time: %f\" % ( end-start )\n totSec = end - start\n totMin = numpy.floor ( totSec / 60.0 )\n totSec = totSec - totMin * 60.0\n print \" - done, time: %.0f min, %.1f sec\" % ( totMin, totSec )\n\n SaveQFile ( mol, cid, dmap, sigma )\n Qavg = QStats1 ( mol, cid )\n\n return Qavg\n\n\n\n\ndef QStats1 ( mol, chainId='All', doCalcResQ=True ) :\n\n totQ, totN = 0.0, 0.0\n #QT, QN = { \"Protein\":0.0, \"Nucleic\":0.0, \"Other\":0.0 }, { \"Protein\":0.0, \"Nucleic\":0.0, \"Other\":0.0}\n QT, QN = {}, {}\n QT_, QN_ = {}, {}\n QH, QL = {}, {}\n\n if chainId == None :\n chainId = \"All\"\n\n print \"Q for %d res, chain %s\" % ( len(mol.residues), chainId )\n for r in mol.residues :\n\n if r.id.chainId == chainId or chainId == \"All\" :\n\n if doCalcResQ :\n CalcResQ (r, None, None, useOld=True )\n\n for at in r.atoms :\n if at.element.name == \"H\" :\n continue\n\n if hasattr ( at, \"Q\") :\n totQ += at.Q\n totN += 1.0\n\n tp = \"Other\"\n if at.residue.isProt : tp = \"Protein\"\n elif at.residue.isNA : tp = \"Nucleic\"\n else : tp = at.residue.type\n\n if tp in QT :\n QT[tp] += at.Q; QN[tp] += 1.0;\n QH[tp] = max(QH[tp], at.Q); QL[tp] = min(QL[tp], at.Q)\n else :\n QT[tp] = at.Q; QN[tp] = 1.0\n QH[tp] = at.Q; QL[tp] = at.Q\n\n tps = r.id.chainId + \":\" + tp\n if tps in QT_ :\n QT_[tps] += at.Q; QN_[tps] += 1.0\n else :\n QT_[tps] = at.Q; QN_[tps] = 1.0\n\n\n #for tp in [\"Other\", \"Protein\", \"Nucleic\"] :\n print \"\"\n print \"Chain\\tAvg.Q-score\\tEst.Res.(A)\"\n tpk = QT_.keys()\n tpk.sort()\n for tp in tpk :\n if QN_[tp] > 0 :\n avgQ = QT_[tp]/QN_[tp]\n avgR = 0\n if \"nucleic\" in tp.lower() :\n avgR = (avgQ-1.0673)/-0.1574\n else :\n avgR = (avgQ-1.1244)/-0.1794\n print \" %s\\t%.3f\\t%.2f\" % (tp, avgQ, avgR )\n else :\n print \" %s\\tn/a\" % (tp)\n\n Q__ = { \" protein\":0, \" nucleic\":0, \" water\":0, \" ion\":0 }\n\n #for tp in [\"Other\", \"Protein\", \"Nucleic\"] :\n print \"\"\n print \"Type\\tAvg.Q-score\\tEst.Res.(A)\"\n for tp in QT.keys() :\n if QN[tp] > 0 :\n avgQ = QT[tp]/QN[tp]\n avgR = 0\n if \"nucleic\" in tp.lower() :\n avgR = (avgQ-1.0673)/-0.1574\n Q__[\" nucleic\"] = avgQ\n elif \"protein\" in tp.lower() :\n avgR = (avgQ-1.1244)/-0.1794\n Q__[\" protein\"] = avgQ\n elif \"hoh\" in tp.lower() :\n avgR = (avgQ-1.1244)/-0.1794\n Q__[\" water\"] = avgQ\n elif tp.upper() in chargedIons :\n avgR = (avgQ-1.1244)/-0.1794\n Q__[\" ion\"] = avgQ\n else :\n avgR = (avgQ-1.1244)/-0.1794\n Q__[tp] = avgQ\n print \" %s\\t%.3f\\t%.2f\" % (tp, avgQ, avgR )\n else :\n print \" %s\\tn/a\" % (tp)\n\n print \"\"\n\n for tp in QT.keys() :\n if QN[tp] > 0 :\n print \"\\t%s\" % tp,\n print \"\"\n\n print \"Avg.Q.\",\n for tp in QT.keys() :\n if QN[tp] > 0 :\n avgQ = QT[tp]/QN[tp]\n print \"\\t%.3f\" % avgQ,\n print \"\"\n\n print \"Max.Q.\",\n for tp in QT.keys() :\n if QN[tp] > 0 :\n print \"\\t%.3f\" % QH[tp],\n print \"\"\n\n print \"Min.Q.\",\n for tp in QT.keys() :\n if QN[tp] > 0 :\n print \"\\t%.3f\" % QL[tp],\n print \"\"\n\n print \"\"\n\n #return Q__\n return totQ/totN\n\n\ndef QStatsProt ( mol, dmap, chainId ) :\n\n SetBBAts ( mol )\n\n ress = []\n for r in mol.residues :\n if r.id.chainId == chainId and r.isProt :\n ress.append ( r )\n\n if len(ress) == 0 :\n print \"QstatsProt - no protein residues in chain %s\" % chainId\n return\n\n sByType = {}\n rByType = {}\n\n def addType (tp, r, score) :\n if not tp in sByType :\n rByType[tp] = []\n sByType[tp] = []\n rByType[tp].append ( [score, r] )\n sByType[tp].append ( [score] )\n\n\n for r in ress :\n\n if r.isProt and r.type == \"LEU\" :\n avg = (r.atomsMap[\"CD1\"][0].Q + r.atomsMap[\"CD2\"][0].Q)/2.0\n addType ( \"LEU(CD)\", r, avg )\n if r.isProt and r.type == \"LEU\" and r.id.position==114 :\n avg = (r.atomsMap[\"CD1\"][0].Q + r.atomsMap[\"CD2\"][0].Q)/2.0\n addType ( \"LEU_114(CD)\", r, avg )\n\n if r.isProt and r.type == \"VAL\" :\n avg = (r.atomsMap[\"CG1\"][0].Q + r.atomsMap[\"CG2\"][0].Q)/2.0\n addType ( \"VAL(CG)\", r, avg )\n if r.isProt and r.type == \"VAL\" and r.id.position==33 :\n avg = (r.atomsMap[\"CG1\"][0].Q + r.atomsMap[\"CG2\"][0].Q)/2.0\n addType ( \"VAL_33(CG)\", r, avg )\n\n if r.isProt and r.type == \"ARG\" :\n avg = (r.atomsMap[\"NH1\"][0].Q + r.atomsMap[\"NH2\"][0].Q)/2.0\n addType ( \"ARG(NH)\", r, avg )\n if r.isProt and r.type == \"ARG\" and r.id.position==76 :\n avg = (r.atomsMap[\"NH1\"][0].Q + r.atomsMap[\"NH2\"][0].Q)/2.0\n addType ( \"ARG_76(NH)\", r, avg )\n if r.isProt and r.type == \"ARG\" and r.id.position==9 :\n avg = (r.atomsMap[\"NH1\"][0].Q + r.atomsMap[\"NH2\"][0].Q)/2.0\n addType ( \"ARG_9(NH)\", r, avg )\n\n if r.isProt and r.type == \"LYS\" :\n avg = r.atomsMap[\"NZ\"][0].Q\n addType ( \"LYS(NZ)\", r, avg )\n\n if r.isProt and r.type == \"ASP\" :\n avg = (r.atomsMap[\"OD1\"][0].Q + r.atomsMap[\"OD2\"][0].Q)/2.0\n addType ( \"ASP(OD)\", r, avg )\n if r.isProt and r.type == \"ASP\" and r.id.position==42 :\n avg = (r.atomsMap[\"OD1\"][0].Q + r.atomsMap[\"OD2\"][0].Q)/2.0\n addType ( \"ASP_42(OD)\", r, avg )\n if r.isProt and r.type == \"ASP\" and r.id.position==131 :\n avg = (r.atomsMap[\"OD1\"][0].Q + r.atomsMap[\"OD2\"][0].Q)/2.0\n addType ( \"ASP_131(OD)\", r, avg )\n if r.isProt and r.type == \"ASP\" and r.id.position==171 :\n avg = (r.atomsMap[\"OD1\"][0].Q + r.atomsMap[\"OD2\"][0].Q)/2.0\n addType ( \"ASP_171(OD)\", r, avg )\n\n if r.isProt and r.type == \"GLU\" :\n avg = (r.atomsMap[\"OE1\"][0].Q + r.atomsMap[\"OE2\"][0].Q)/2.0\n addType ( \"GLU(OE)\", r, avg )\n if r.isProt and r.type == \"GLU\" and r.id.position==17 :\n avg = (r.atomsMap[\"OE1\"][0].Q + r.atomsMap[\"OE2\"][0].Q)/2.0\n addType ( \"GLU_17(OE)\", r, avg )\n if r.isProt and r.type == \"GLU\" and r.id.position==27 :\n avg = (r.atomsMap[\"OE1\"][0].Q + r.atomsMap[\"OE2\"][0].Q)/2.0\n addType ( \"GLU_27(OE)\", r, avg )\n if r.isProt and r.type == \"GLU\" and r.id.position==67 :\n avg = (r.atomsMap[\"OE1\"][0].Q + r.atomsMap[\"OE2\"][0].Q)/2.0\n addType ( \"GLU_67(OE)\", r, avg )\n if r.isProt and r.type == \"GLU\" and r.id.position==134 :\n avg = (r.atomsMap[\"OE1\"][0].Q + r.atomsMap[\"OE2\"][0].Q)/2.0\n addType ( \"GLU_134(OE)\", r, avg )\n\n\n if r.isProt or r.isNA :\n if r.scQ :\n addType ( r.type, r, r.scQ )\n else :\n addType ( r.type, r, r.Q )\n\n avgs = []\n for rtype, ra in sByType.iteritems () :\n avgs.append ( [numpy.average (ra), rtype, numpy.std (ra)] )\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n\n # sort by avg score\n #avgs.sort ( reverse=True, key=lambda x: x[0] )\n\n # sort by residue type\n avgs.sort ( reverse=False, key=lambda x: x[1] )\n\n mapName = os.path.splitext(dmap.name)[0]\n molName = os.path.splitext(mol.name)[0]\n mdir, mpfile = os.path.split(dmap.data.path)\n foname = mdir + \"/\" + mapName + \"__\" + molName + \".txt\"\n\n print \" - scores to: \" + foname\n fp = open (foname,\"w\")\n\n for avgScore, rtype, sdev in avgs :\n\n rscores = rByType[rtype]\n if len(rscores) > 0 :\n rscores.sort ( reverse=True, key=lambda x: x[0] )\n hr = rscores[0]\n R = hr[1]\n highestScore = hr[0]\n numRes = len(rscores)\n\n rts = \"\"\n if R.isProt : rts = protein3to1[R.type]\n elif R.isNA : rts = nucleic3to1[R.type]\n\n print \"%s\\t%s\\t%d\\t%f\\t%f\\t%d\\t.%s\\t%f\" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore)\n fp.write ( \"%s\\t%s\\t%d\\t%f\\t%f\\t%d\\t.%s\\t%f\\n\" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore) )\n\n fp.close()\n\n\n\ndef QStatsRNA ( mol, dmap, chainId ) :\n\n SetBBAts ( mol )\n\n ress = []\n for r in mol.residues :\n if r.id.chainId == chainId and r.isNA :\n ress.append ( r )\n\n if len(ress) == 0 :\n print \"Qstats RNA - no RNA residues found in chain %s\" % chainId\n return\n\n print \"\"\n print \"RNA stats for chain %s\" % chainId\n print \"\"\n\n\n sByType = {}\n rByType = {}\n\n def addType (tp, r, score) :\n if not tp in sByType :\n rByType[tp] = []\n sByType[tp] = []\n rByType[tp].append ( [score, r] )\n sByType[tp].append ( [score] )\n\n\n scAts = []\n bbAts = []\n allAts = []\n\n for r in ress :\n if r.isNA :\n\n avg = numpy.average ( [at.Q for at in r.scAtoms] )\n #addType ( nucleic3to1[r.type] + \"_SC\", r, avg )\n addType ( r.type + \"_SC\", r, avg )\n\n avg = numpy.average ( [at.Q for at in r.bbAtoms] )\n #addType ( nucleic3to1[r.type] + \"_BB\", r, avg )\n addType ( r.type + \"_BB\", r, avg )\n\n scAts.extend ( r.scAtoms )\n bbAts.extend ( r.bbAtoms )\n allAts.extend ( [at for at in r.atoms if at.element.name != \"H\"] )\n\n\n avgQ = numpy.average ( [at.Q for at in allAts] )\n avgQbb = numpy.average ( [at.Q for at in bbAts] )\n avgQsc = numpy.average ( [at.Q for at in scAts] )\n\n sQ = numpy.std ( [at.Q for at in allAts] )\n sQbb = numpy.std ( [at.Q for at in bbAts] )\n sQsc = numpy.std ( [at.Q for at in scAts] )\n\n avgs = []\n for rtype, ra in sByType.iteritems () :\n avgs.append ( [numpy.average (ra), rtype, numpy.std (ra)] )\n\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n\n # sort by avg score\n #avgs.sort ( reverse=True, key=lambda x: x[0] )\n\n # sort by residue type\n avgs.sort ( reverse=False, key=lambda x: x[1] )\n\n\n mapName = os.path.splitext(dmap.name)[0]\n molName = os.path.splitext(mol.name)[0]\n mdir, mpfile = os.path.split(dmap.data.path)\n foname = mdir + \"/\" + mapName + \"__\" + molName + \"_rscores.txt\"\n\n\n print \" - scores to: \" + foname\n fp = open (foname,\"w\")\n\n print \"\"\n print \"Map\\tModel\\tQ_All\\tQ_Backbone\\tQ_SideChain\\tStdQ_All\\tStdQ_Backbone\\tStdQ_SideChain\"\n print \"%s\\t%s\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\" % (mapName, molName, avgQ, avgQbb, avgQsc, sQ, sQbb, sQsc)\n print \"\"\n\n fp.write ( \"\\n\" )\n fp.write ( \"Map\\tModel\\tQ_All\\tQ_Backbone\\tQ_SideChain\\tStdQ_All\\tStdQ_Backbone\\tStdQ_SideChain\\n\" )\n fp.write ( \"%s\\t%s\\t%f\\t%f\\t%f\\t%f\\t%f\\t%f\" % (mapName, molName, avgQ, avgQbb, avgQsc, sQ, sQbb, sQsc) )\n\n fp.write ( \"\\n\\n\" )\n fp.write ( \"Type\\Mol\\t#\\tAvg.Q.\\tSDev\\tPos\\tChain\\tMaxQ\\n\" )\n print \"RType\\tResidue\\t#\\tAvg.Q.\\tSDev\\tPos\\tChain\\tMaxQ\"\n print \"\"\n\n for avgScore, rtype, sdev in avgs :\n\n rscores = rByType[rtype]\n if len(rscores) > 0 :\n rscores.sort ( reverse=True, key=lambda x: x[0] )\n hr = rscores[0]\n R = hr[1]\n highestScore = hr[0]\n numRes = len(rscores)\n\n rts = \"\"\n if R.isProt : rts = protein3to1[R.type]\n elif R.isNA : rts = nucleic3to1[R.type]\n\n print \"%s\\t%s\\t%d\\t%f\\t%f\\t%d\\t.%s\\t%f\" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore)\n fp.write ( \"%s\\t%s\\t%d\\t%f\\t%f\\t%d\\t.%s\\t%f\\n\" % (rtype, rts, numRes, avgScore, sdev, R.id.position, R.id.chainId, highestScore) )\n\n fp.close()\n\n\n# expected Q-scores given the resolution of a map\n# - sigma=0.6, for resolutions 1.5 and lower\n# - sigma=0.4, for resolution higher than 1.5\n\ndef eQ_protein (RES, sigma=0.6) :\n if abs(sigma-0.6) < 1e-5 :\n return -0.1775 * RES + 1.1192, \"-0.1775 * RES + 1.1192\"\n elif abs(sigma-0.4) < 1e-5 :\n return -0.1866 * RES + 1.1242, \"-0.1866 * RES + 1.1242\"\n else :\n return None, \"no eqn for protein with sigma=%.2f\" % sigma\n\ndef eQ_nucleic (RES, sigma=0.6) :\n if abs(sigma-0.6) < 1e-5 :\n return -0.1377 * RES + 0.9973, \"-0.1377 * RES + 0.9973\"\n elif abs(sigma-0.4) < 1e-5 :\n return -0.1465 * RES + 0.9436, \"-0.1465 * RES + 0.9436\"\n else :\n return None, \"no eqn for nucleic with sigma=%.2f\" % sigma\n\ndef eQ_ion (RES, sigma=0.6) :\n if abs(sigma-0.6) < 1e-5 :\n return -0.1103 * RES + 1.0795, \"-0.1103 * RES + 1.0795\"\n elif abs(sigma-0.4) < 1e-5 :\n return -0.1103 * RES + 1.0795, \"-0.1103 * RES + 1.0795\"\n else :\n return None, \"no eqn for ion with sigma=%.2f\" % sigma\n\ndef eQ_water ( RES, sigma=0.6) :\n if abs(sigma-0.6) < 1e-5 :\n return -0.0895 * RES + 1.0001, \"-0.0895 * RES + 1.0001\"\n elif abs(sigma-0.4) < 1e-5 :\n return -0.0895 * RES + 1.0001, \"-0.0895 * RES + 1.0001\"\n else :\n return None, \"no eqn for water with sigma=%.2f\" % sigma\n\n\n\ndef SaveQStats ( mol, chainId, dmap, sigma, RES=3.0 ) :\n\n if chainId == None :\n chainId = \"All\"\n\n cres = {}\n for r in mol.residues :\n if r.id.chainId == chainId or chainId == \"All\" :\n if r.id.chainId in cres :\n cres[r.id.chainId].append ( [r.id.position, r] )\n else :\n cres[r.id.chainId] = [ [r.id.position, r] ]\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n mapName = os.path.splitext(dmap.name)[0]\n nname = molPath + \"__Q__\" + mapName + \"_\" + chainId + \".txt\"\n #nname = molPath + \"__Q__\" + mapName + \"_\" + cid + \".txt\"\n\n print \"\"\n print \"Saving per-chain & per-residue Q-scores:\"\n print \" -> res=\", RES\n print \" -> file:\", nname\n print \" -> chain:\", chainId\n\n fp = open (nname, \"w\")\n\n fp.write ( \"\\n\" )\n fp.write ( \"Map: %s\\n\" % dmap.name )\n fp.write ( \"Resolution entered (RES): %g\\n\" % RES )\n fp.write ( \"Model: %s\\n\" % mol.name )\n fp.write ( \"Sigma: %g\\n\" % sigma )\n fp.write ( \"\\n\" )\n\n avgQrna, eq_nucleic = eQ_nucleic(RES, sigma)\n avgQprot, eq_protein = eQ_protein(RES, sigma)\n avgQIon, eq_ion = eQ_ion(RES, sigma)\n avgQWater, eq_water = eQ_water(RES, sigma)\n\n fp.write ( \"Protein: expectedQ = %s\\n\" % eq_protein )\n fp.write ( \"Nucleic: expectedQ = %s\\n\" % eq_nucleic )\n fp.write ( \"Ion: expectedQ = %s\\n\" % eq_ion )\n fp.write ( \"Water: expectedQ = %s\\n\" % eq_water )\n fp.write ( \"\\n\" )\n\n fp.write ( \"Chain\\tType\\t# residues\\tAvg. Q\\tExpectedQ@%.2f\\tEst.Res.\\n\" % RES )\n\n chains = cres.keys()\n chains.sort()\n\n for cid in chains :\n ress = cres[cid]\n\n type_ats = {}\n type_ress = {}\n resAtoms = []\n for ri, r in ress :\n tp = \"\"\n if r.isProt : tp = \"Protein\"\n elif r.isNA : tp = \"Nucleic\"\n elif r.type.upper() in chargedIons : tp = \"Ion\"\n elif r.type.upper() == \"HOH\" : tp = \"Water\"\n else : tp = r.type\n\n if tp in type_ats : type_ats[tp].extend (r.atoms)\n else : type_ats[tp] = r.atoms[:]\n\n if tp in type_ress : type_ress[tp].append ( r )\n else : type_ress[tp] = [r]\n\n for rtype, atoms in type_ats.iteritems() :\n\n qs = [at.Q for at in atoms if (at.element.name != \"H\" and hasattr(at,'Q'))]\n if len(qs) == 0 :\n continue\n avgQ = numpy.average ( qs )\n numR = len ( type_ress[rtype] )\n\n formula, estRes = None, None\n if \"Protein\" in rtype :\n formula = \"=\" + eQ_protein(RES,sigma)[1].replace (\"RES\",'%.2f') % RES\n estRes = (avgQ - 1.1192) / -0.1775\n elif \"Nucleic\" in rtype :\n formula =\"=\" + eQ_nucleic(RES,sigma)[1].replace (\"RES\",'%.2f') % RES\n estRes = (avgQ - 0.9973) / -0.1377\n elif \"Ion\" in rtype :\n formula = \"=\" + eQ_ion(RES,sigma)[1].replace (\"RES\",'%.2f') % RES\n estRes = (avgQ - 1.0795) / -0.1103\n elif \"Water\" in rtype :\n formula =\"=\" + eQ_water(RES,sigma)[1].replace (\"RES\",'%.2f') % RES\n estRes = (avgQ - 1.0001) / -0.0895\n else :\n formula = \"?\"\n estRes = 0.0\n\n fp.write ( \"%s\\t%s\\t%d\\t%.2f\\t%s\\t%.2f\\n\" % (cid, rtype, numR, avgQ, formula, estRes) )\n\n #print \" - cid: %s - %s - %.2f\" % (cid, ctypes, cQ)\n fp.write ( \"\\n\" )\n\n for cid in cres.keys () :\n rs = cres[cid]\n rs.sort()\n r = rs[0][1]\n\n if r.isProt :\n fp.write ( \"Protein - Chain %s\\t\\t\\t\\t\\t\\t\\t\\tAverage over 3 residues\\t\\t\\t\\t\\tAverage over 5 residues\\t\\t\\t\\t\\tAverage over 7 residues\\t\\t\\t\\t\\tAverage over 11 residues\\n\\n\" % cid )\n fp.write ( \"Chain\\tRes\\tRes #\\tQ_backBone\\tQ_sideChain\\tQ_residue\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sideChain\\tQ_residue\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sideChain\\tQ_residue\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sideChain\\tQ_residue\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sideChain\\tQ_residue\\tExpectedQ@%.2f\\t\\n\" % RES )\n elif r.isNA :\n fp.write ( \"Nucleic Acid - Chain %s\\t\\t\\t\\t\\t\\t\\t\\t\\tAverage over 3 nucleotides\\t\\t\\t\\t\\t\\tAverage over 5 nucleotides\\t\\t\\t\\t\\t\\tAverage over 7 nucleotides\\t\\t\\t\\t\\t\\tAverage over 11 nucleotides\\n\\n\" % cid )\n fp.write ( \"Chain\\tRes\\tRes #\\tQ_backBone\\tQ_sugar\\tQ_base\\tQ_nucleotide\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sugar\\tQ_base\\tQ_nucleotide\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sugar\\tQ_base\\tQ_nucleotide\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sugar\\tQ_base\\tQ_nucleotide\\tExpectedQ@%.2f\\t\\t\" % RES )\n fp.write ( \"Q_backBone\\tQ_sugar\\tQ_base\\tQ_nucleotide\\tExpectedQ@%.2f\\t\\n\" % RES )\n else :\n fp.write ( \"Molecule - Chain %s\\n\\n\" % cid )\n fp.write ( \"Chain\\tMolecule\\tMol #\\t\\t\\tQ_molecule\\tExpectedQ@%.2f\\n\" % RES )\n\n\n ress = []\n Qs, AV, CC = [], [], []\n for ri, r in rs :\n\n #if not r.isProt and not r.isNA :\n # print \" - cid: %s - r %d - not prot or RNA\" % (cid, r.id.position)\n # continue\n\n ress.append (r)\n\n qs = [at.Q for at in r.atoms if (at.element.name != \"H\" and hasattr(at,'Q'))]\n if len(qs) == 0 :\n continue\n\n r.Q = numpy.average ( qs )\n\n r.qBB, r.qSC, r.qSugar = 0, 0, 0\n if len(r.bbAtoms) > 0 :\n r.qBB = numpy.average ( [at.Q for at in r.bbAtoms if at.element.name != \"H\"] )\n if len(r.scAtoms) > 0 :\n r.qSC = numpy.average ( [at.Q for at in r.scAtoms if at.element.name != \"H\"] )\n if len(r.sugarAtoms) > 0 :\n r.qSugar = numpy.average ( [at.Q for at in r.sugarAtoms if at.element.name != \"H\"] )\n Qs.append ( [r.qBB, r.qSC, r.Q, r.qSugar] )\n\n if 0 :\n ad = avgdAts ( r.atoms, dmap )\n aSC, aBB = 0, 0\n if len(r.scAtoms) > 0 :\n aSC = avgdAts ( r.scAtoms, dmap )\n if len(r.bbAtoms) > 0 :\n aBB = avgdAts ( r.bbAtoms, dmap )\n AV.append ( [ad, aBB, aSC] )\n\n if 0 :\n cc, ccm = ccAts ( r.atoms, dmap, RES )\n ccSC, ccmSC = ccAts ( r.scAtoms, dmap, RES )\n ccBB, ccmBB = ccAts ( r.bbAtoms, dmap, RES )\n CC.append ( [cc, ccBB, ccSC] )\n #CC.append ( [ccm, ccmBB, ccmSugar, ccmBase] )\n\n\n # averages items in a list over N items before and after\n def N ( A, i, ind, N ) :\n #for i, a in enumerate ( A ) :\n sum, n = 0, 0\n for j in range ( i-N, i+N+1 ) :\n if j >= 0 and j < len(A) :\n sum += A[j][ind]\n n += 1.0\n return sum/n\n\n\n last_i = None\n for i, r in enumerate ( ress ) :\n\n if not hasattr ( r, 'Q' ) or not hasattr (r, 'qBB') :\n continue\n\n # fills in missing residues in proteins and rna\n if (r.isNA or r.isProt) and last_i != None :\n ii = last_i+1\n while ii < r.id.position :\n # fill gaps\n if r.isNA :\n fp.write ( \"%s\\t%s\\t%d\\t\" % (r.id.chainId, \"\", ii ) )\n fp.write ( \"\\t\\t\\t\\t%f\\t\\t\" % (avgQrna ) )\n fp.write ( \"\\t\\t\\t\\t%f\\t\\t\" % (avgQrna) )\n fp.write ( \"\\t\\t\\t\\t%f\\t\\t\" % (avgQrna) )\n fp.write ( \"\\t\\t\\t\\t%f\\t\\t\" % (avgQrna) )\n fp.write ( \"\\t\\t\\t\\t%f\\n\" % (avgQrna) )\n else :\n avgQ = avgQrna if r.isNA else avgQprot\n fp.write ( \"%s\\t%s\\t%d\\t\\t\\t\\t%f\\t\\t\" % (r.id.chainId, \"\", ii, avgQprot ) )\n fp.write ( \"\\t\\t\\t%f\\t\\t\" % (avgQprot) )\n fp.write ( \"\\t\\t\\t%f\\t\\t\" % (avgQprot) )\n fp.write ( \"\\t\\t\\t%f\\t\\t\" % (avgQprot) )\n fp.write ( \"\\t\\t\\t%f\\n\" % (avgQprot) )\n ii += 1\n\n if r.isNA :\n fp.write ( \"%s\\t%s\\t%d\\t\" % (r.id.chainId, r.type, r.id.position) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\t\\t\" % (r.qBB, r.qSugar, r.qSC, r.Q, avgQrna ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,1), N(Qs,i,3,1), N(Qs,i,1,1), N(Qs,i,2,1), avgQrna ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,2), N(Qs,i,3,2), N(Qs,i,1,2), N(Qs,i,2,2), avgQrna ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,3), N(Qs,i,3,3), N(Qs,i,1,3), N(Qs,i,2,3), avgQrna ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\n\" % (N(Qs,i,0,5), N(Qs,i,3,5), N(Qs,i,1,5), N(Qs,i,2,5), avgQrna ) )\n elif r.isProt :\n if len(r.scAtoms) > 0 :\n fp.write ( \"%s\\t%s\\t%d\\t%f\\t%f\\t%f\\t%f\\t\\t\" % (r.id.chainId, r.type, r.id.position, r.qBB, r.qSC, r.Q, avgQprot ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,1), N(Qs,i,1,1), N(Qs,i,2,1), avgQprot ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,2), N(Qs,i,1,2), N(Qs,i,2,2), avgQprot ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,3), N(Qs,i,1,3), N(Qs,i,2,3), avgQprot ) )\n fp.write ( \"%f\\t%f\\t%f\\t%f\\n\" % (N(Qs,i,0,5), N(Qs,i,1,5), N(Qs,i,2,5), avgQprot ) )\n else :\n fp.write ( \"%s\\t%s\\t%d\\t%f\\t\\t%f\\t%f\\t\\t\" % (r.id.chainId, r.type, r.id.position, r.qBB, r.Q, avgQprot ) )\n fp.write ( \"%f\\t\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,1), N(Qs,i,2,1), avgQprot ) )\n fp.write ( \"%f\\t\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,2), N(Qs,i,2,2), avgQprot ) )\n fp.write ( \"%f\\t\\t%f\\t%f\\t\\t\" % (N(Qs,i,0,3), N(Qs,i,2,3), avgQprot ) )\n fp.write ( \"%f\\t\\t%f\\t%f\\n\" % (N(Qs,i,0,5), N(Qs,i,2,5), avgQprot ) )\n elif r.type.upper() in chargedIons :\n fp.write ( \"%s\\t%s\\t%d\\t\\t\\t%f\\t%f\\n\" % (r.id.chainId, r.type, r.id.position, r.Q, avgQIon ) )\n elif r.type.upper() == \"HOH\" :\n fp.write ( \"%s\\t%s\\t%d\\t\\t\\t%f\\t%f\\n\" % (r.id.chainId, r.type, r.id.position, r.Q, avgQWater ) )\n else :\n fp.write ( \"%s\\t%s\\t%d\\t\\t\\t%f\\t?\\n\" % (r.id.chainId, r.type, r.id.position, r.Q ) )\n\n last_i = r.id.position\n\n fp.write ( \"\\n\\n\" )\n\n fp.close()\n print \"\"\n\n\n\n\n\n\ndef CalcRadZ ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :\n\n\n print \"Rad-Z Scores\"\n print \" - map: %s\" % dmap.name\n print \" - mol: %s, chain: %s\" % (mol.name, cid if cid != None else \"_all_\")\n\n\n ress = []\n for r in mol.residues :\n if cid == None or r.id.chainId == cid :\n if not useOld :\n ress.append ( r )\n elif not hasattr (r, 'scS' ) :\n ress.append ( r )\n\n print \" - residues to do: %d\" % len(ress)\n\n\n for ri, r in enumerate ( ress ) :\n\n r.scZ = RadZ ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )\n r.bbZ = RadZ ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2 )\n\n if log and ri % 10 == 0 :\n status ( \"Calculating - res %d/%d\" % (ri, len(ress)) )\n print \".\",\n\n\n scoresBB, scoresSC = [], []\n for r in mol.residues :\n if cid == None or r.id.chainId == cid :\n if r.bbZ != None :\n scoresBB.append ( r.bbZ )\n if r.scZ != None :\n scoresSC.append ( r.scZ )\n\n print \" - avg radz - side chain %.1f, backbone %.1f\" % (numpy.average(scoresSC), numpy.average(scoresBB) )\n\n return numpy.average(scoresBB), numpy.average(scoresSC)\n\n\n\n\ndef qwork (num, ress, dmap, allAtTree, log):\n\n print 'qwork %d - %d res, %d - %d' % (num, len(ress), ress[0].id.position, ress[-1].id.position)\n\n for ri, r in enumerate ( ress ) :\n r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )\n r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )\n\n if num == 0 and log :\n status ( \"Calculating Q scores - %d/%d\" % (ri, len(ress)) )\n print \".\",\n\n\n\ndef CalcSigma ( mol, cid, dmap, allAtTree, useOld=False, log=False ) :\n\n\n print \"Sigma Scores\"\n print \" - map: %s\" % dmap.name\n print \" - mol: %s, chain: %s\" % (mol.name, cid if cid != None else \"_all_\")\n\n ress = []\n for r in mol.residues :\n if cid == None or r.id.chainId == cid :\n if not useOld :\n ress.append ( r )\n elif not hasattr (r, 'scS' ) :\n ress.append ( r )\n\n print \" - residues to do: %d\" % len(ress)\n\n\n\n if 0 :\n\n import multiprocessing, threading\n N = 4 # multiprocessing.cpu_count()\n print \" - cores: %d\" % N\n dn = len(ress) / N\n\n threads = []\n for i in range(N):\n l = i * dn\n h = (i+1)*dn if i != N-1 else len(ress)\n #print \"t %d, %d-%d\" % (i, l, h)\n\n #t = threading.Thread(target=qwork, args=(i,ress[l:h], dmap, allAtTree))\n #threads.append(t)\n #t.start()\n\n #t = threading.Thread(name='d%d'%i, target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))\n #t.setDaemon(True)\n #t.start()\n #threads.append(t)\n\n #print __name__\n if 1 or __name__ == '__main__':\n p = ctx.Process(target=qwork, args=(i,ress[l:h], dmap, allAtTree, log))\n p.start()\n threads.append(p)\n\n for i, t in enumerate(threads) :\n print \"j %d\" % (i)\n t.join()\n\n else :\n\n for ri, r in enumerate ( ress ) :\n\n r.bbZ = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )\n r.scZ = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=2, dRAD=0.2 )\n\n if log and ri % 10 == 0 :\n status ( \"Calculating - res %d/%d\" % (ri, len(ress)) )\n print \".\",\n\n\n\n scoresBB, scoresSC = [], []\n\n ress = []\n for r in mol.residues :\n if cid == None or r.id.chainId == cid :\n ress.append ( r )\n if r.bbZ != None : scoresBB.append ( r.bbZ )\n if r.scZ != None : scoresSC.append ( r.scZ )\n\n #sc = [x for x in scores if x is not None]\n #scSC = [1.0/x for x in scoresSC if x is not None]\n #scBB = [1.0/x for x in scoresBB if x is not None]\n\n #print \" - %d res, SC min %.2f max %.2f, avg %.2f\" % (len(ress), min(scSC), max(scSC), numpy.average(scSC))\n print \" - avg sigma - side chain %.1f, backbone %.1f\" % (numpy.average(scoresSC), numpy.average(scoresBB) )\n\n\n if 0 :\n\n sByType = {}\n rByType = {}\n for r in ress :\n if r.scZ != None :\n if not r.type in sByType :\n rByType[r.type] = []\n sByType[r.type] = []\n rByType[r.type].append ( [r.scZ, r] )\n sByType[r.type].append ( [r.scZ] )\n\n avgs = []\n for rtype, ra in sByType.iteritems () :\n avgs.append ( [numpy.average (ra), rtype] )\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n avgs.sort ( reverse=True, key=lambda x: x[0] )\n\n\n mapName = os.path.splitext(dmap.name)[0]\n molName = os.path.splitext(mol.name)[0]\n mdir, mpfile = os.path.split(dmap.data.path)\n foname = mdir + \"/\" + mapName + \"__\" + molName + \".txt\"\n\n\n print \" - scores to: \" + foname\n fp = open (foname,\"w\")\n\n for avgScore, rtype in avgs :\n\n rscores = rByType[rtype]\n rscores.sort ( reverse=False, key=lambda x: x[0] )\n hr = rscores[0]\n R = hr[1]\n highestScore = hr[0]\n numRes = len(rscores)\n\n rts = \"\"\n if R.isProt : rts = protein3to1[rtype]\n else : rts = nucleic3to1[rtype]\n\n print \"%s\\t%s\\t%d\\t%f\\t%d\\t.%s\\t%f\" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore)\n fp.write ( \"%s\\t%s\\t%d\\t%f\\t%d\\t.%s\\t%f\\n\" % (rtype, rts, numRes, avgScore, R.id.position, R.id.chainId, highestScore) )\n\n fp.close()\n\n\n return numpy.average(scoresBB), numpy.average(scoresSC)\n\n\ndef CalcResQ (r, dmap=None, sigma=0.6, allAtTree=None, numPts=8, toRAD=2.0, dRAD=0.1, minD=0.0, maxD=1.0, useOld=False ) :\n\n scQ, bbQ, Q, numSC, numBB = 0.0, 0.0, 0.0, 0.0, 0.0\n for at in r.atoms :\n if at.element.name == \"H\" :\n continue\n if not hasattr ( at, 'isBB' ) :\n SetBBAts ( at.molecule )\n if hasattr (at, 'Q') :\n Q += at.Q\n if r.isProt or r.isNA :\n if at.isBB :\n bbQ += at.Q\n numBB += 1.0\n else :\n scQ += at.Q\n numSC += 1.0\n\n if r.isProt or r.isNA :\n if numSC > 0 :\n r.scQ = scQ / numSC\n else :\n r.scQ = None\n if numBB > 0 :\n r.bbQ = bbQ / numBB\n else :\n r.bbQ = None\n\n r.Q = Q / float ( len(r.atoms) )\n\n\n\ndef CalcQ_ ( mol, cid, dmap, sigma=0.5, allAtTree=None, useOld=False, log=False ) :\n\n print \"Q Scores - in parallel\"\n print \" - map: %s\" % dmap.name\n print \" - mol: %s, chain: %s\" % (mol.name, cid if cid != None else \"_all_\")\n\n ress = []\n for r in mol.residues :\n if cid == None or r.id.chainId == cid :\n ress.append ( r )\n\n print \" - residues to do: %d\" % len(ress)\n\n\n import multiprocessing\n threads = multiprocessing.cpu_count() / 2\n print 'calc q using %d threads' % threads\n\n # Avoid periodic Python context switching.\n import sys\n original_check_interval = sys.getcheckinterval()\n sys.setcheckinterval(1000000000)\n\n # Define thread class for fitting.\n from threading import Thread\n class Q_Thread(Thread):\n def __init__(self, ress, ti):\n Thread.__init__(self)\n self.ress = ress\n self.ti = ti\n def run(self):\n print \"run - %d - %d\" % (self.ti, len(ress))\n for ri, r in enumerate ( self.ress ) :\n #CalcResQ (r, dmap, sigma, allAtTree=allAtTree, numPts=2, toRAD=2.0, dRAD=0.2 )\n #print \"%d-%d/%d\" % (ti,ri/len(self.ress)),\n for at in r.atoms :\n if at.element.name != \"H\" :\n qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.5 )\n\n\n # Starts threads with each calculating an equal number of fits.\n n = len(ress)\n g = [ress[(n*c)/threads:(n*(c+1))/threads] for c in range(threads)]\n threads = []\n for mi, ml in enumerate(g) :\n #print \"%d - %d, %d-%d\" % (mi,len(ml),ml[0].id.position,ml[-1].id.position)\n t = Q_Thread(ml,mi)\n threads.append(t)\n\n for t in threads:\n t.start()\n print \"\"\n\n # Wait for all threads to finish\n for t in threads:\n t.join()\n\n # Restore periodic context switching.\n sys.setcheckinterval(original_check_interval)\n\n # Collect fit results from all threads.\n #for t in threads:\n # print \"\",\n\n\n\n\ndef CalcQ ( mol, cid, dmap, sigma, useOld=False, log=False ) :\n\n print \"\"\n print \"Q Scores\"\n print \" - map: %s\" % dmap.name\n print \" - mol: %s, chain: %s\" % (mol.name, cid if cid != None else \"_all_\")\n print \" - sigma: %.2f\" % sigma\n\n minD, maxD = MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n ats = [at for at in mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - atoms tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n #allAtTree = None\n\n atoms = []\n\n import time\n start = time.time()\n\n #ress = []\n for r in mol.residues :\n if cid == None or cid == \"All\" or r.id.chainId == cid :\n for at in r.atoms :\n if not at.element.name == \"H\" :\n atoms.append ( at )\n\n print \" - atoms to do: %d\" % len(atoms)\n\n #for ai, at in enumerate ( atoms[0:2] ) :\n # qs = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Calculating Q-scores', modal = True)\n\n SetBBAts ( mol )\n\n modi = 100\n if len(atoms) > 100000 :\n modi = 1000\n\n try :\n\n for ai, at in enumerate ( atoms ) :\n\n at.Q = Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )\n at.bfactor = at.Q\n\n end = time.time()\n totSec = end - start\n\n leftTime = \"\"\n leftSec = 0.0\n iPerSec = float(ai) / totSec\n if iPerSec > 0 :\n leftSec = float ( len(atoms) - ai ) / iPerSec\n leftHour = numpy.floor ( leftSec / 60.0 / 60.0 )\n leftSec = leftSec - leftHour * 60.0 * 60.0\n leftMin = numpy.floor ( leftSec / 60.0 )\n leftSec = leftSec - leftMin * 60.0\n leftTime = \"%.0f:%.0f:%.0f\" % (leftHour, leftMin, leftSec)\n\n\n if ai+1 == 100 :\n if log :\n print \" - atom %d/%d - eta: %s\" % (ai+1, len(atoms), leftTime)\n\n elif (ai+1) % modi == 0 :\n if log :\n print \" - atom %d/%d - eta: %s\" % (ai+1, len(atoms), leftTime)\n\n task.updateStatus( \" - Q scores - atom %d/%d - eta: %s\" % (ai+1, len(atoms), leftTime) )\n\n except :\n print \" - something went wrong...\"\n return None\n\n finally :\n task.finished()\n\n\n end = time.time()\n print \"\"\n print \" - done, time: %f\" % ( end-start )\n totSec = end - start\n totMin = numpy.floor ( totSec / 60.0 )\n totSec = totSec - totMin * 60.0\n print \" - done, time: %.0f min, %.1f sec\" % ( totMin, totSec )\n\n\n SaveQFile ( mol, cid, dmap, sigma )\n Qavg = QStats1 ( mol, cid )\n\n return Qavg\n\n\n\n\ndef SaveQFile ( mol, cid, dmap, sigma ) :\n\n if not hasattr ( mol, 'openedAs' ) :\n print \"\"\n print \" >>> Could not save file with Q-scores - molecule was not opened from file?\"\n print \"\"\n return\n\n molPath, molExt = os.path.splitext(mol.openedAs[0])\n mapName = os.path.splitext(dmap.name)[0]\n\n if hasattr ( mol, 'cif' ) and molExt == '.cif' :\n import mmcif\n reload ( mmcif )\n fout = molPath + \"__Q__\" + mapName + \".cif\"\n mmcif.WriteMol ( mol, fout )\n\n else :\n nname_ = molPath + \"__Q__\" + mapName + \"_.pdb\"\n try :\n chimera.PDBio().writePDBfile ( [mol], nname_ )\n except :\n print \" - could not save Q-scores file\"\n return\n\n nname = molPath + \"__Q__\" + mapName + \".pdb\"\n\n fpo = open ( nname, \"w\" )\n fpi = open ( nname_ )\n\n ver = \"\"\n try :\n from Segger.mapq import mapqVersion\n ver = mapqVersion\n print \" ----1- version: %s\" % mapqVersion\n except :\n pass\n try :\n from mapq.mapq import mapqVersion\n ver = mapqVersion\n print \" ----2- version: %s\" % mapqVersion\n except :\n pass\n try :\n from mapq import mapqVersion\n ver = mapqVersion\n print \" ----3- version: %s\" % mapqVersion\n except :\n pass\n\n fpo.write ( \"REMARK 0 \\n\" )\n fpo.write ( \"REMARK 0 Q-scores calculated with MapQ\\n\" )\n fpo.write ( \"REMARK 0 - sigma %.1f\\n\" % sigma )\n fpo.write ( \"REMARK 0 - more info: github.com/gregdp/mapq\\n\" )\n fpo.write ( \"REMARK 0 - Q-scores for each atom are stored in B-factor column\\n\" )\n fpo.write ( \"REMARK 0 - Model: %s\\n\" % mol.name )\n if cid == None :\n fpo.write ( \"REMARK 0 - for all atoms\\n\" )\n else :\n fpo.write ( \"REMARK 0 - for atoms in chain: %s\\n\" % cid )\n fpo.write ( \"REMARK 0 - (other atoms have original B-factor values)\\n\" )\n fpo.write ( \"REMARK 0 - Map: %s\\n\" % dmap.name )\n fpo.write ( \"REMARK 0 \\n\" )\n for l in fpi :\n fpo.write (l)\n fpo.close()\n fpi.close()\n\n print \" - saved %s with Q-scores\" % nname\n\n from os import remove\n try :\n remove(nname_)\n except :\n print \" - could not remove %s\" % nname_\n\n\n\ndef QsFromPdbFile ( mol, qfpath ) :\n\n rids = {}\n for r in mol.residues :\n rids[\"%d.%s\" % (r.id.position,r.id.chainId)] = r\n\n # http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n try :\n fin = open ( qfpath, \"r\" )\n except :\n #print \" - file not found\"\n return False\n\n print \" - Qs from file: %s\" % qfpath\n\n for line in fin :\n if line[0:4] == \"ATOM\" or line[0:6] == \"HETATM\" :\n aname, aloc, cid, resi, occ, bfac = line[12:16].strip(), line[16:17].strip(), line[21], int(line[22:26]), float ( line[54:60] ), float ( line[60:66] )\n #if occ < 1.0 :\n rid = \"%s.%s\" % (resi,cid)\n if rid in rids :\n r = rids[rid]\n\n if aname in r.atomsMap :\n ats = r.atomsMap[aname]\n found = False\n for at in ats :\n if at.altLoc == aloc :\n at.Q = bfac\n at.bfactor = bfac\n #at.bfactor = 100.0 * (1.0 - at.Q)\n #dval = self.cur_dmap.interpolated_values ( [ at.coord() ], self.cur_mol.openState.xform ).astype(numpy.float64, copy=False)[0]\n found = True\n if not found :\n #print \" -xx- %s.%s - atom %s - loc %s\" % (resi, cid, aname, aloc)\n continue\n else :\n #print \" -xx- %s.%s - atom %s\" % (resi,cid, aname)\n continue\n\n\n fin.close ()\n\n return True\n\n\n\ndef QsFromCifFile ( mol, qfpath ) :\n\n print \" - Qs from file: %s\" % qfpath\n from mmcif import ReadMol\n qmol = ReadMol ( qfpath, log=False )\n\n rids = {}\n for r in qmol.residues :\n rids[\"%d.%s\" % (r.id.position,r.id.chainId)] = r\n\n numNotFound, numQ, numNoQ = 0, 0, 0\n for at in mol.atoms :\n rid = \"%d.%s\" % (at.residue.id.position,at.residue.id.chainId)\n if rid in rids :\n qres = rids[rid]\n if at.name in qres.atomsMap :\n found = False\n for qat in qres.atomsMap[at.name] :\n #print \"[%s] [%s]\" % (at.altLoc, qat.altLoc)\n if at.altLoc == qat.altLoc :\n found = True\n #print qat.Q\n if hasattr ( qat, 'Q' ) :\n at.Q = qat.Q\n numQ += 1\n else :\n numNoQ += 1\n if not found :\n #print \" -xx- %s.%s - atom %s - loc %s\" % (resi, cid, aname, aloc)\n continue\n else :\n #print \" -xx- %s.%s - atom %s\" % (resi,cid, aname)\n numNotFound += 1\n\n\n if numNotFound != 0 :\n print \" - %d/%d atoms not found in q-score file\" % (numNotFound, len(mol.atoms))\n\n print \" - got Q-scores for %d/%d atoms - %d no Q\" % (numQ, len(mol.atoms), numNoQ)\n\n return True\n\n\n\ndef QScoreFileName ( mol, dmap ) :\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n mapName = os.path.splitext(dmap.name)[0]\n qfpath = molPath + \"__Q__\" + mapName + \".pdb\"\n\n return qfpath\n\n\n\n\n\n\ndef AddSpherePts ( pts, clr, rad, mname = \"RAD points\" ) :\n\n from chimera import elements, Coord, Atom, MolResId\n\n ptsMol = GetMod ( mname )\n\n res = None\n if ptsMol == None:\n from chimera import Molecule, openModels\n ptsMol = Molecule()\n ptsMol.name = mname\n ptsMol.isRealMolecule = False\n openModels.add ( [ptsMol], noprefs = True )\n res = ptsMol.newResidue('marker', chimera.MolResId('1', 1) )\n else :\n res = ptsMol.residues[0]\n\n for pt in pts :\n a = ptsMol.newAtom('', elements.H)\n res.addAtom(a)\n\n a.setCoord ( chimera.Point(*pt) ) # ( chimera.Point(*xyz) )\n a.radius = rad\n a.drawMode = Atom.Sphere\n a.color = chimera.MaterialColor ( *clr )\n a.surfaceCategory = 'markers'\n\n return ptsMol\n\n\n\ndef SpherePts ( ctr, rad, N ) :\n\n thetas, phis = [], []\n from math import acos, sin, cos, sqrt, pi\n for k in range ( 1, N+1 ) :\n h = -1.0 + ( 2.0*float(k-1)/float(N-1) )\n phis.append ( acos(h) )\n thetas.append ( 0 if k == 1 or k == N else\n (thetas[k-2] + 3.6/sqrt(N*(1.0-h**2.0))) % (2*pi) )\n\n pts = [None] * N\n for i, theta, phi in zip ( range(N), thetas, phis ):\n v = chimera.Vector (sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi))\n #if numpy.abs ( v.length - 1.0 ) > 1e-3 :\n # print \"x\"\n pt = ctr + v * rad\n pts[i] = pt\n\n return pts\n\n\n\nimport threading\n\n\n\ndef Calc_ ( label=\"\", res=0.0 ) :\n\n print \"Calc Q scores:\", label\n\n from VolumeViewer import Volume\n vols = chimera.openModels.list(modelTypes = [Volume])\n if len(vols) == 0 :\n print \" - no volumes loaded\"\n return\n dmap = vols[0]\n print \" - dmap: %s\" % dmap.name\n print \" - res: %s\" % res\n\n #fp = open ( \"/Users/greg/_data/_mapsq/scores.txt\", \"a\" )\n #fp.write ( \"%s...\\n\" % dmap.name.split(\"_\")[0] )\n #fp.close ()\n\n from chimera import Molecule\n mols = chimera.openModels.list(modelTypes = [Molecule])\n if len(mols) == 0 :\n print \" - no molecules loaded\"\n return\n mol = mols[0]\n print \" - mol: %s\" % mol.name\n SetBBAts ( mol )\n\n ats = [at for at in mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n #allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n allAtTree = None\n\n\n qs, dr, q, qcc, emr = 0,0,0,0,0\n #bbRadZ, scRadZ, scRotaZ = 0,0,0\n\n sigma = 0.4\n\n cid = None\n #cid = mol.residues[0].id.chainId\n\n qs = CalcQp ( mol, cid, dmap, sigma=sigma, allAtTree=allAtTree, useOld=False )\n\n print \"\"\n print \"Avg. Q scores:\"\n print \"\"\n tps = qs.keys()\n tps.sort()\n for tp in tps :\n print \" - %s : %.2f\" % (tp, qs[tp])\n print \"\"\n\n\n if 1 :\n at = 30\n fp = None\n if os.path.isdir(\"/Users/greg/Dropbox/_mapsq\") :\n fp = open ( \"/Users/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt\" % (at, label, sigma*100.0), \"a\" )\n elif os.path.isdir(\"/home/greg/Dropbox/_mapsq\") :\n fp = open ( \"/home/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt\" % (at, label, sigma*100.0), \"a\" )\n elif os.path.isdir(\"C:/Users/greg/Dropbox/_mapsq\") :\n fp = open ( \"C:/Users/greg/Dropbox/_mapsq/scores%d_Q_allc_%s_sig%.0f.txt\" % (at, label, sigma*100.0), \"a\" )\n else :\n fp = open ( \"scores%d_Q_allc_%s_sig%.0f.txt\" % (at, label, sigma*100.0), \"a\" )\n\n fp.write ( \"%s\\t%s\\t%s\" % (dmap.name, mol.name, res) )\n\n for tp in tps :\n fp.write ( \"\\t%s\\t%.2f\" % (tp, qs[tp]) )\n\n fp.write ( \"\\n\" )\n\n #nProt = len ( [at for at in mol.atoms if at.residue.isProt == True] )\n #nNA = len ( [at for at in mol.atoms if at.residue.isNA == True] )\n #fp.write ( \"%s\\t%s\\t%s\\t%d\\t%d\\n\" % (dmap.name, mol.name, res, nProt, nNA) )\n\n fp.close ()\n\n\n\n\ndef emringer ( dmap, mol ) :\n\n print \"----- %s ____________ EMRINGER ____________ %s -----\" % (dmap.name, mol.name)\n\n cdir = os.getcwd()\n print \" - now in: \", cdir\n\n #print \" - splitting \" + mol.openedAs[0]\n mpath, mname = os.path.split ( mol.openedAs[0] )\n dpath, dname = os.path.split ( dmap.data.path )\n\n bs = os.path.splitext ( mol.openedAs[0] )[0]\n\n\n print \" - copying mol file... removes symmetry/connect stuff\"\n fin = open ( mol.openedAs[0], \"r\" )\n fout = open ( bs + \"_.pdb\", \"w\" )\n for line in fin :\n if \"ATOM\" in line or \"HETATM\" in line :\n fout.write ( line )\n fin.close ()\n fout.close ()\n\n\n phPath = \"/Users/greg/_mol/phenix-1.14-3260/build/bin/\"\n #phPath = \"/Users/greg/_mol/phenix-1.15rc3-3435/build/bin/\"\n\n args = [phPath+'phenix.emringer', dmap.data.path, bs+\"_.pdb\" ]\n print \"running: \",\n for arg in args : print arg,\n print \"\"\n\n outf = mpath + '/' + '_out.txt'\n errf = mpath + '/' + '_err.txt'\n fout = open ( outf, \"w\" )\n ferr = open ( errf, \"w\" )\n import subprocess\n p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)\n p.wait()\n fout.close()\n ferr.close()\n\n print \" - getting score from \" + outf\n score = -100\n fin = open ( outf )\n for l in fin :\n if \"EMRinger Score:\" in l :\n s = l [ len(\"EMRinger Score:\")+1 : ]\n print \"Score: \", s\n score = float( s )\n print \" - found score: %.3f\" % score\n\n print \" - removing \", bs + \"_.pdb\"\n import shutil\n try :\n os.remove ( bs + \"_.pdb\" )\n os.remove ( bs + \"__emringer.pkl\" )\n os.remove ( bs + \"__emringer.csv\" )\n shutil.rmtree ( bs + \"__emringer_plots\" )\n print \" - done\"\n except :\n print \" -- did not find\"\n\n return score\n\n\ndef refine ( dmap, mol, res ) :\n\n print \"----- %s ____________ REFINE ____________ %s -----\" % (dmap.name, mol.name)\n\n cdir = os.getcwd()\n print \" - now in: \", cdir\n\n #print \" - splitting \" + mol.openedAs[0]\n mpath, mname = os.path.split ( mol.openedAs[0] )\n dpath, dname = os.path.split ( dmap.data.path )\n\n bs = os.path.splitext ( mol.openedAs[0] )[0]\n\n\n print \" - copying mol file... removes symmetry/connect stuff\"\n fin = open ( mol.openedAs[0], \"r\" )\n fout = open ( bs + \"_.pdb\", \"w\" )\n for line in fin :\n if \"ATOM\" in line or \"HETATM\" in line :\n fout.write ( line )\n fin.close ()\n fout.close ()\n\n\n phPath = \"/Users/greg/_mol/phenix-1.14-3260/build/bin/\"\n phPath = \"/Users/greg/_mol/phenix-1.15rc3-3435/build/bin/\"\n\n args = [phPath+'phenix.real_space_refine', dmap.data.path, bs+\"_.pdb\", \"resolution=%.1f\"%res ]\n print \"running: \",\n for arg in args : print arg,\n print \"\"\n\n outf = mpath + '/' + '_out.txt'\n errf = mpath + '/' + '_err.txt'\n fout = open ( outf, \"w\" )\n ferr = open ( errf, \"w\" )\n import subprocess\n p = subprocess.Popen(args, stdout=fout, stderr=ferr, cwd=mpath)\n p.wait()\n fout.close()\n ferr.close()\n\n print \" - getting score from \" + outf\n score = -100\n fin = open ( outf )\n for l in fin :\n if \"EMRinger Score:\" in l :\n s = l [ len(\"EMRinger Score:\")+1 : ]\n print \"Score: \", s\n score = float( s )\n print \" - found score: %.3f\" % score\n\n print \" - removing \", bs + \"_.pdb\"\n import shutil\n try :\n os.remove ( bs + \"_.pdb\" )\n os.remove ( bs + \"__emringer.pkl\" )\n os.remove ( bs + \"__emringer.csv\" )\n shutil.rmtree ( bs + \"__emringer_plots\" )\n print \" - done\"\n except :\n print \" -- did not find\"\n\n return score\n\n\ndef refdir ( rdir ) :\n\n print \"Refining in\", rdir\n\n\n\ndef CalcR_ ( label = \"\" ) :\n\n print \"Calc all scores -\", label\n\n from VolumeViewer import Volume\n dmap = chimera.openModels.list(modelTypes = [Volume])[0]\n print \" - dmap: %s\" % dmap.name\n\n #fp = open ( \"/Users/greg/_data/_mapsq/scores.txt\", \"a\" )\n #fp.write ( \"%s...\\n\" % dmap.name.split(\"_\")[0] )\n #fp.close ()\n\n from chimera import Molecule\n mol = chimera.openModels.list(modelTypes = [Molecule])[0]\n print \" - mol: %s\" % mol.name\n SetBBAts ( mol )\n\n\n mapName = os.path.splitext(dmap.name)[0]\n molName = os.path.splitext(mol.name)[0]\n ddir, dfile = os.path.split(dmap.data.path)\n\n molFile = mol.openedAs[0]\n mdir, mfile = os.path.split(molFile)\n\n print \"PhFmap -- \" + molFile\n\n RES = 3.0\n print \" -- res %.1f -- \" % RES\n\n outFile = molFile + \"_r%.0f\" % RES + \"_fmodel.ccp4\"\n\n if not os.path.isfile ( outFile ) :\n\n phPath = \"/usr/local/phenix-1.14-3260/build/bin/\"\n\n args = [phPath+'phenix.fmodel', \"high_resolution=%.1f\"%RES, \"scattering_table=electron\", \"generate_fake_p1_symmetry=True\", molFile ]\n print \"running: \",\n for arg in args : print arg,\n print \"\"\n\n fout = open ( mdir + '/' + '_0_fmodel.log', \"w\" )\n import subprocess\n p = subprocess.Popen(args, stdout=fout, cwd=mdir)\n p.wait()\n fout.close()\n\n print \"\"\n args = [phPath+'phenix.mtz2map', \"high_resolution=%.1f\"%RES, \"include_fmodel=true\", \"scattering_table=electron\", molFile, molFile + \".mtz\" ]\n print \"running: \",\n for arg in args : print arg,\n print \"\"\n\n fout = open ( mdir + '/' + '_1_mtz2map.log', \"w\" )\n p = subprocess.Popen(args, stdout=fout, cwd=mdir)\n p.wait()\n fout.close()\n\n print \" - renaming to:\", outFile\n os.rename( molFile + \"_fmodel.ccp4\", outFile )\n os.remove( molFile + \".mtz\" )\n\n\n print \" - loading map:\", outFile\n dm = chimera.openModels.open ( outFile )[0]\n\n\n\n molg = MyMolMapX ( mol, mol.atoms, RES, dmap.data.step[0], chimera.Xform.identity() )\n fpoints, fpoint_weights = fit_points_g ( molg, 0.1 )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n\n mmolap, mmcorr1, mmcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n print \"Molmap - olap: %f, CC: %f, CCm: %f\" % (mmolap, mmcorr1, mmcorr2)\n\n fpoints, fpoint_weights = fit_points_g ( dm.data, 5.0 )\n map_values = dmap.interpolated_values ( fpoints, dm.openState.xform )\n olap, phcorr1, phcorr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n print \"Phenix - olap: %f, CC: %f, CCm: %f\" % (olap, phcorr1, phcorr2)\n\n #fpoints, fpoint_weights = fit_points_g ( dmap.data, -1e6 )\n #map_values = dm.interpolated_values ( fpoints, dmap.openState.xform )\n #olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #print \"Phenix - olap: %f, CC: %f, CCm: %f\" % (olap, corr1, corr2)\n\n\n print \"%f\\t%f\\t%f\\t%f\" % (mmcorr1, mmcorr2, phcorr1, phcorr2)\n\n fp = open ( \"/Users/greg/Dropbox/_mapsq/scores3_R_%s.txt\" % label, \"a\" )\n fp.write ( \"%s\\t%f\\t%f\\t%f\\t%f\\n\" % (dmap.name.split(\"_\")[0], mmcorr1, mmcorr2, phcorr1, phcorr2) )\n fp.close ()\n\n\n#[ 0.04964269]\n#[ 0.08007674]\n#[ 0.08772154]\n#[ 0.06052513]\n#[ 0.05444193]\n#[ 0.05091212]\n#[ 0.04454869]\n#[ 0.03272544]\n#[ 0.036254]\n#[ 0.02918004]\n\n\ndef MaskMapResize ( atoms, bound, dmap, fout=None ) :\n\n\n import _multiscale\n import _contour\n import _volume\n from _contour import affine_transform_vertices as transform_vertices\n from VolumeData import grid_indices, zone_masked_grid_data, interpolate_volume_data\n\n points = _multiscale.get_atom_coordinates ( atoms, transformed = True )\n #print \" %d points\" % len(points)\n fpoints = points\n\n\n if 0 :\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, R )\n #mdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = \"atom masked\" )\n\n mat = mdata.full_matrix()\n threshold = 1e-3\n\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n #print \" %d points\" % len(points)\n\n nz = numpy.nonzero( fpoint_weights )[0]\n #print \" %d pts nonzero\" % len(nz)\n if len(nz) > 0 and len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n else :\n _contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n #transform_vertices( fpoints, dmap.data.ijk_to_xyz_transform )\n transform_vertices( fpoints, dmap.data.xyz_to_ijk_transform )\n\n #print \" - %s mask %d atoms, %d nonzero points\" % ( dmap.name, len(atoms), len(nz) )\n\n #transform_vertices( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )\n #transform_vertices( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n #transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )\n\n #bound = 10\n li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )\n\n O = dmap.data.origin\n #print \" - %s origin:\" % dmap.name, O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n #print \" - new map origin:\", nO\n\n ox = round ( nO[0]/dmap.data.step[0] ) * dmap.data.step[0]\n oy = round ( nO[1]/dmap.data.step[1] ) * dmap.data.step[1]\n oz = round ( nO[2]/dmap.data.step[2] ) * dmap.data.step[2]\n\n nO = ( ox, oy, oz )\n\n #print \" - new map origin:\", nO\n\n\n nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n\n npoints = grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n # todo - don't interpolate\n\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n\n if fout == None :\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n dmap_base = os.path.splitext(dmap.name)[0]\n dmap_path = os.path.splitext (dmap.data.path)[0]\n nv.name = dmap_base + \"_masked\"\n nv.openState.xform = dmap.openState.xform\n return nv\n\n else :\n\n from VolumeData import save_grid_data\n #d = self.grid_data()\n format = save_grid_data(ndata, fout, None, {}, False)\n #print \" - saved data\"\n\n\n\n\ndef SetBBAts ( mol ) :\n\n #if hasattr ( mol, \"bbats\" ) :\n # return\n #mol.bbats = True\n\n #print \" - setting bbAts in %s\" % mol.name\n for r in mol.residues :\n\n #r.isProt = \"C\" in r.atomsMap and \"CA\" in r.atomsMap and \"N\" in r.atomsMap\n #r.isProt = \"CA\" in r.atomsMap\n #r.isNA = \"O3'\" in r.atomsMap and \"O5'\" in r.atomsMap\n\n from chimera.resCode import nucleic3to1\n from chimera.resCode import protein3to1\n protein3to1['HSD'] = protein3to1['HIS']\n protein3to1['HSE'] = protein3to1['HIS']\n\n r.isProt = r.type in protein3to1\n r.isNA = r.type in nucleic3to1\n\n r.score1 = None\n r.score2 = None\n\n if r.isProt :\n r.rtype = \"prot\"\n elif r.isNA :\n r.rtype = \"na\"\n else :\n r.rtype = \"?\"\n\n\n if r.isNA :\n try :\n if nucleic3to1[r.type] == \"G\" :\n r.baseAt = r.atomsMap[\"N9\"][0]\n elif nucleic3to1[r.type] == \"C\" :\n r.baseAt = r.atomsMap[\"N1\"][0]\n elif nucleic3to1[r.type] == \"A\" :\n r.baseAt = r.atomsMap[\"N9\"][0]\n elif nucleic3to1[r.type] == \"U\" :\n r.baseAt = r.atomsMap[\"N1\"][0]\n except :\n #print \" - baseAt not found - \"\n pass\n\n\n r.bbAtoms = []\n r.scAtoms = []\n r.sugarAtoms = []\n\n if r.isProt :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n a.isBB = n==\"C\" or n==\"CA\" or n==\"O\" or n==\"N\" or n==\"OT1\" or n==\"OT2\"\n a.isSC = not a.isBB\n if a.isBB :\n r.bbAtoms.append ( a )\n else :\n r.scAtoms.append ( a )\n\n a.isSugar, a.isBase = False, False\n\n elif r.isNA :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n\n n = a.name\n\n a.isBB = n==\"P\" or n==\"O1P\" or n==\"O2P\" or n==\"OP1\" or n==\"OP2\" or n==\"O5'\" or n==\"C5'\" or n==\"O3'\"\n a.isSugar = n==\"C1'\" or n==\"C2'\" or n==\"O4'\" or n==\"O2'\" or n==\"C3'\" or n==\"C4'\"\n #a.isBB = a.isBB or a.isSugar\n a.isBase = not a.isBB and not a.isSugar\n a.isSC = a.isBase\n\n #if nucleic3to1[r.type] == \"G\" : a.isBase = n==\"N9\" or n==\"C8\" or n==\"N7\" or n==\"C5\" or n==\"C4\" or n==\"C6\" or n==\"O6\" or n==\"N1\" or n==\"C2\" or n==\"N2\" or n==\"N3\"\n #elif nucleic3to1[r.type] == \"C\" : a.isBase = n==\"N1\" or n==\"C2\" or n==\"O2\" or n==\"N3\" or n==\"C4\" or n==\"N4\" or n==\"C5\" or n==\"C6\"\n #elif nucleic3to1[r.type] == \"A\" : a.isBase = n==\"N9\" or n==\"C8\" or n==\"N7\" or n==\"C5\" or n==\"C4\" or n==\"N3\" or n==\"C2\" or n==\"N1\" or n==\"C6\" or n==\"N6\"\n #elif nucleic3to1[r.type] == \"U\" : a.isBase = n==\"N1\" or n==\"C2\" or n==\"O2\" or n==\"N3\" or n==\"C4\" or n==\"O4\" or n==\"C5\" or n==\"C6\"\n #else : #print \" -x- NA res %d.%s is ?\" % (r.id.position, r.type) break\n\n if a.isBB :\n r.bbAtoms.append ( a )\n elif a.isSugar :\n r.sugarAtoms.append ( a )\n else :\n r.scAtoms.append ( a )\n\n else :\n for a in r.atoms :\n a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False\n\n\n\n\ndef fit_points_g (fdata, threshold = 1e-5) :\n\n mat = fdata.full_matrix()\n\n import _volume\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n nz = numpy.nonzero( fpoint_weights )[0]\n if len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n from _contour import affine_transform_vertices\n affine_transform_vertices ( fpoints, fdata.ijk_to_xyz_transform )\n\n if 0 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return fpoints, fpoint_weights\n\n\ndef MyMolMapX2 ( atoms, resolution, step=1.0, xf=None ) :\n\n from math import sqrt, pi\n\n pad = 3*resolution\n cutoff_range = 5 # in standard deviations\n sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution\n\n from _multiscale import get_atom_coordinates\n xyz = get_atom_coordinates(atoms, transformed = False)\n\n\n # Transform coordinates to local coordinates of the molecule containing\n # the first atom. This handles multiple unaligned molecules.\n # Or if on_grid is specified transform to grid coordinates.\n #m0 = atoms[0].molecule\n\n #xf = m0.openState.xform\n if xf :\n import Matrix\n #Matrix.transform_points(xyz, M.xform_matrix(xf.inverse()))\n Matrix.transform_points ( xyz, Matrix.xform_matrix(xf) )\n\n anum = [a.element.number for a in atoms]\n\n grid = bounding_grid(xyz, step, pad, [])\n grid.name = \"\"\n\n sdev = resolution * sigma_factor\n add_gaussians(grid, xyz, anum, sdev, cutoff_range, [])\n\n #return grid, molecules\n return grid\n\n\n\n# -----------------------------------------------------------------------------\n#\ndef bounding_grid(xyz, step, pad, transforms):\n\n xyz_min, xyz_max = point_bounds(xyz, transforms)\n origin = [x-pad for x in xyz_min]\n from math import ceil\n shape = [int(ceil((xyz_max[a] - xyz_min[a] + 2*pad) / step)) for a in (2,1,0)]\n from numpy import zeros, float32\n matrix = zeros(shape, float32)\n from VolumeData import Array_Grid_Data\n grid = Array_Grid_Data(matrix, origin, (step,step,step))\n return grid\n\n\n# -----------------------------------------------------------------------------\n#\ndef add_gaussians(grid, xyz, weights, sdev, cutoff_range, transforms = []):\n\n from numpy import zeros, float32, empty\n sdevs = zeros((len(xyz),3), float32)\n for a in (0,1,2):\n sdevs[:,a] = sdev / grid.step[a]\n\n import Matrix as M\n if len(transforms) == 0:\n transforms = [M.identity_matrix()]\n from _gaussian import sum_of_gaussians\n ijk = empty(xyz.shape, float32)\n matrix = grid.matrix()\n for tf in transforms:\n ijk[:] = xyz\n M.transform_points(ijk, M.multiply_matrices(grid.xyz_to_ijk_transform, tf))\n sum_of_gaussians(ijk, weights, sdevs, cutoff_range, matrix)\n\n from math import pow, pi\n normalization = pow(2*pi,-1.5)*pow(sdev,-3)\n matrix *= normalization\n\n\n\n# -----------------------------------------------------------------------------\n#\ndef point_bounds(xyz, transforms = []):\n\n from _multiscale import bounding_box\n if transforms :\n from numpy import empty, float32\n xyz0 = empty((len(transforms),3), float32)\n xyz1 = empty((len(transforms),3), float32)\n txyz = empty(xyz.shape, float32)\n import Matrix as M\n for i, tf in enumerate(transforms) :\n txyz[:] = xyz\n M.transform_points(txyz, tf)\n xyz0[i,:], xyz1[i,:] = bounding_box(txyz)\n xyz_min, xyz_max = xyz0.min(axis = 0), xyz1.max(axis = 0)\n else:\n xyz_min, xyz_max = bounding_box(xyz)\n\n return xyz_min, xyz_max\n\n\n\n\ndef GetMod ( name ) :\n for m in chimera.openModels.list() :\n if m.name == name :\n return m\n return None\n", "id": "11390322", "language": "Python", "matching_score": 8.424535751342773, "max_stars_count": 6, "path": "Segger/qscores.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nimport tkFont\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport Surface\nimport VolumeViewer\nimport FitMap\nfrom sys import stderr\nfrom time import clock\nimport _contour\nimport chimera.match\nimport time\n\n\nfrom axes import prAxes\nimport _multiscale\nfrom CGLutil.AdaptiveTree import AdaptiveTree\nimport random\nfrom VolumePath import Marker_Set, Marker, Link\nfrom _contour import affine_transform_vertices as transform_vertices\nfrom Matrix import xform_matrix, multiply_matrices, chimera_xform, identity_matrix, invert_matrix, shift_and_angle\nimport struct\n\n\nfrom Rotamers import getRotamers\nfrom chimera.resCode import protein1to3\n\n\ntry :\n from segment_dialog import current_segmentation, segmentation_map\n import molbuild\n reload (molbuild)\nexcept :\n pass\n\n\n#gSigma = 0.6\nmapqVersion = \"1.8.2\"\nshowDevTools = True\n\n\ntry :\n import molref\n reload (molref)\nexcept :\n pass\n\n\nimport qscores\nreload (qscores)\n\nimport mmcif\nreload (mmcif)\n\n\nOML = chimera.openModels.list\n\nisModelZ = False\n\ndlgName = \"mapqdlg\"\ndlgTitle = \"MapQ (v\"+mapqVersion+\")\"\ndlgHelp = 'https://github.com/gregdp/mapq'\n\nif isModelZ :\n dlgName = \"modelzdlg\"\n dlgTitle = \"ModelZ (v1.2)\"\n dlgHelp = 'https://github.com/gregdp/modelz'\n\n\nchargedIons = { \"MG\":2, \"NA\":1, \"CL\":-1, \"CA\":2, \"ZN\":2, \"MN\":2, \"FE\":3, \"CO\":2, \"NI\":2 }\n\natomColors = {'C' : chimera.MaterialColor (0.565,0.565,0.565),\n 'Cbb' : chimera.MaterialColor (0.2,0.6,0.2),\n 'S' : chimera.MaterialColor (1.000,1.000,0.188),\n 'O' : chimera.MaterialColor (1.000,0.051,0.051),\n 'N' : chimera.MaterialColor (0.188,0.314,0.973),\n 'P' : chimera.MaterialColor (1.0, 0.502, 0.0),\n 'H' : chimera.MaterialColor (0.9,.9,.9),\n ' ' : chimera.MaterialColor (0.2,1,.2),\n \"MG\" : chimera.MaterialColor (.4,.4,.6),\n \"NA\" : chimera.MaterialColor (.7,.4,.9),\n \"CL\" : chimera.MaterialColor (0,1,0),\n \"CA\" : chimera.MaterialColor (.4,.4,.6),\n \"ZN\" : chimera.MaterialColor (.4,.4,.6),\n \"MN\" : chimera.MaterialColor (.4,.4,.6),\n \"FE\" : chimera.MaterialColor (.4,.4,.6),\n \"CO\" : chimera.MaterialColor (.4,.4,.6),\n \"NI\" : chimera.MaterialColor (.4,.4,.6)\n}\n\n\natomColors = {'C' : chimera.MaterialColor (0.565,0.565,0.565),\n 'Cbb' : chimera.MaterialColor (0.2,0.6,0.2),\n 'S' : chimera.MaterialColor (1.000,1.000,0.188),\n 'O' : chimera.MaterialColor (1.000,0.051,0.051),\n 'N' : chimera.MaterialColor (0.188,0.314,0.973),\n 'P' : chimera.MaterialColor (1.0, 0.502, 0.0),\n 'H' : chimera.MaterialColor (0.9,.9,.9),\n ' ' : chimera.MaterialColor (0.2,1,.2),\n \"MG\" : chimera.MaterialColor (0,1,0),\n \"NA\" : chimera.MaterialColor (.6,.3,.6),\n \"CL\" : chimera.MaterialColor (.2,.6,.2),\n \"CA\" : chimera.MaterialColor (.4,.4,.6),\n \"ZN\" : chimera.MaterialColor (.2,.8,.2),\n \"MN\" : chimera.MaterialColor (.4,.4,.6),\n \"FE\" : chimera.MaterialColor (.4,.4,.6),\n \"CO\" : chimera.MaterialColor (.4,.4,.6),\n \"NI\" : chimera.MaterialColor (.4,.4,.6)\n}\n\n\n\n\ndef umsg ( txt ) :\n print txt\n status ( txt )\n\ndef status ( txt ) :\n txt = txt.rstrip('\\n')\n msg.configure(text = txt)\n msg.update_idletasks()\n\n\nclass MapQ_Dialog ( chimera.baseDialog.ModelessDialog ) :\n\n name = dlgName\n if showDevTools :\n buttons = ( \"Options\", \"Select\", \"Log\", \"Close\" )\n else :\n buttons = ( \"Options\", \"Log\", \"Close\" )\n title = dlgTitle\n help = dlgHelp\n\n\n def fillInUI(self, parent):\n\n self.group_mouse_mode = None\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n self.parent = parent\n\n parent.columnconfigure(0, weight = 1)\n\n row = 0\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n\n #l = Tkinter.Label(f, text=' ')\n #l.grid(column=0, row=row, sticky='w')\n\n\n if 0 :\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n file_menu_entries = (\n ('Open Model...', self.LoadModel),\n ('Save Model...', self.SaveModel)\n )\n fmenu = Hybrid.cascade_menu(menubar, 'File', file_menu_entries)\n\n from chimera.tkgui import aquaMenuBar\n aquaMenuBar(menubar, parent, row = 0, columnspan=3)\n\n\n\n # ---------------------------------------------------------------------------------\n\n self.InitVars()\n\n\n if 1 :\n #row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='nsew', pady=0, padx=0)\n\n Tkinter.Grid.columnconfigure(f, 0, weight=1)\n Tkinter.Grid.columnconfigure(ff, 0, weight=1)\n\n Tkinter.Grid.rowconfigure(f, row, weight=1)\n Tkinter.Grid.rowconfigure(ff, 0, weight=1)\n\n\n self.Canvas = Tkinter.Canvas(ff, height=80)\n self.Canvas.grid(column=0, row=0, sticky='nsew')\n\n self.modX = 10; self.modY = 10; self.modH = 30\n self.seqX = 10; self.seqY = 45; self.seqH = 30\n\n self.Canvas.bind(\"<ButtonPress-1>\", lambda event : self.B1_Down ( event ) )\n self.Canvas.bind(\"<Control-ButtonPress-1>\", lambda event : self.B1_Down_Ctrl ( event ) )\n self.Canvas.bind(\"<Shift-ButtonPress-1>\", lambda event : self.B1_Down_Shift ( event ) )\n self.Canvas.bind(\"<Option-ButtonPress-1>\", lambda event : self.B1_Down_Alt ( event ) )\n self.Canvas.bind(\"<Alt-ButtonPress-1>\", lambda event : self.B1_Down_Alt ( event ) )\n self.Canvas.bind(\"<ButtonPress-2>\", lambda event : self.B2_Down (event) )\n self.Canvas.bind(\"<ButtonPress-3>\", lambda event : self.B3_Down (event) )\n self.Canvas.bind(\"<ButtonRelease-1>\", lambda event : self.B1_Up ( event ) )\n self.Canvas.bind(\"<Control-ButtonRelease-1>\", lambda event : self.B1_Up_Ctrl ( event ) )\n self.Canvas.bind(\"<Shift-ButtonRelease-1>\", lambda event : self.B1_Up_Shift ( event ) )\n self.Canvas.bind(\"<Alt-ButtonRelease-1>\", lambda event : self.B1_Up_Alt ( event ) )\n self.Canvas.bind(\"<Option-ButtonRelease-1>\", lambda event : self.B1_Up_Alt ( event ) )\n\n self.Canvas.bind(\"<ButtonRelease-2>\", lambda event : self.B2_Up (event) )\n self.Canvas.bind(\"<Option-ButtonRelease-2>\", lambda event : self.B2_Up_Alt (event) )\n self.Canvas.bind(\"<Alt-ButtonRelease-2>\", lambda event : self.B2_Up_Alt (event) )\n self.Canvas.bind(\"<Control-ButtonRelease-2>\", lambda event : self.B2_Up_Ctrl (event) )\n self.Canvas.bind(\"<Command-ButtonRelease-2>\", lambda event : self.B2_Up_Comm (event) )\n self.Canvas.bind(\"<Shift-ButtonRelease-2>\", lambda event : self.B2_Up_Shift (event) )\n\n self.Canvas.bind(\"<ButtonRelease-3>\", lambda event : self.B2_Up (event) )\n self.Canvas.bind(\"<Option-ButtonRelease-3>\", lambda event : self.B2_Up_Alt (event) )\n self.Canvas.bind(\"<Alt-ButtonRelease-3>\", lambda event : self.B2_Up_Alt (event) )\n self.Canvas.bind(\"<Control-ButtonRelease-3>\", lambda event : self.B2_Up_Ctrl (event) )\n self.Canvas.bind(\"<Command-ButtonRelease-3>\", lambda event : self.B2_Up_Comm (event) )\n self.Canvas.bind(\"<Shift-ButtonRelease-3>\", lambda event : self.B2_Up_Shift (event) )\n\n self.Canvas.bind(\"<B1-Motion>\", lambda event : self.B1_Drag ( event ) )\n self.Canvas.bind(\"<B2-Motion>\", lambda event : self.B2_Drag ( event ) )\n self.Canvas.bind(\"<B3-Motion>\", lambda event : self.B3_Drag ( event ) )\n self.Canvas.bind(\"<Motion>\", lambda event : self.Mouse_Move ( event ) )\n self.Canvas.bind(\"<Configure>\", lambda event : self.Canvas_Config (event) )\n self.Canvas.bind(\"<Leave>\", lambda event : self.Canvas_Leave (event) )\n self.Canvas.bind(\"<MouseWheel>\", lambda event : self.Canvas_Wheel (event) )\n\n\n row += 1\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w', pady=0, padx=2)\n\n if 1 :\n ff = Tkinter.Frame(f)\n ff.grid(column=0, row=row, sticky='w', pady=1, padx=0)\n\n l = Tkinter.Label(ff, text=' Map:', anchor=Tkinter.W)\n l.grid(column=0, row=0, sticky='w')\n\n self.dmap = Tkinter.StringVar(parent)\n self.dmapMB = Tkinter.Menubutton ( ff, textvariable=self.dmap, relief=Tkinter.RAISED, width=17 )\n self.dmapMB.grid (column=1, row=0, sticky='we', padx=1)\n self.dmapMB.menu = Tkinter.Menu ( self.dmapMB, tearoff=0, postcommand=self.MapMenu )\n self.dmapMB[\"menu\"] = self.dmapMB.menu\n\n self.cur_dmap = None\n self.SetVisMap ()\n if self.cur_dmap == None :\n self.dmap.set ( \"Open a map ...\" )\n\n\n l = Tkinter.Label(ff, text='Model:', anchor=Tkinter.W)\n l.grid(column=2, row=0, sticky='w')\n\n self.struc = Tkinter.StringVar(parent)\n self.strucMB = Tkinter.Menubutton ( ff, textvariable=self.struc, relief=Tkinter.RAISED, width=17 )\n self.strucMB.grid (column=3, row=0, sticky='we', padx=1)\n self.strucMB.menu = Tkinter.Menu ( self.strucMB, tearoff=0, postcommand=self.StrucMenu )\n self.strucMB[\"menu\"] = self.strucMB.menu\n\n self.cur_mol = None\n self.cur_chains = []\n self.SetVisMol ()\n if self.cur_mol == None :\n self.struc.set ( \"Open a model ...\" )\n\n l = Tkinter.Label(ff, text=\" Chain:\" )\n l.grid(column=4, row=0, sticky='w')\n\n self.chain = Tkinter.StringVar(parent)\n self.chainMB = Tkinter.Menubutton ( ff, textvariable=self.chain, relief=Tkinter.RAISED, width=5 )\n self.chainMB.grid (column=5, row=0, sticky='we', padx=1)\n self.chainMB.menu = Tkinter.Menu ( self.chainMB, tearoff=0, postcommand=self.ChainMenu )\n self.chainMB[\"menu\"] = self.chainMB.menu\n\n b = Tkinter.Button(ff, text=\"...\", command=self.LoadModel)\n b.grid (column=6, row=0, sticky='w', padx=1)\n\n #b = Tkinter.Button(ff, text=\"S\", command=self.SaveModel)\n #b.grid (column=7, row=0, sticky='w', padx=1)\n\n\n l = Tkinter.Label(ff, text=\" Show:\" )\n l.grid(column=12, row=0, sticky='w')\n\n\n b = Tkinter.Button(ff, text=\"Chain\", command=self.AllChain)\n b.grid (column=13, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"All\", command=self.AllChains)\n b.grid (column=14, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"Sel.\", command=self.ShowOnlySel)\n b.grid (column=15, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"At.\", command=self.SetSelAtoms)\n b.grid (column=16, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"Rib.\", command=self.SetSelRibbon)\n b.grid (column=17, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"SCs\", command=self.ShowSCs)\n b.grid (column=18, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"~SCs\", command=self.HideSCs)\n b.grid (column=19, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\"W\", command=self.Wire)\n b.grid (column=20, row=0, sticky='w', padx=1)\n\n b = Tkinter.Button(ff, text=\" \", command=self.HideSel)\n b.grid (column=21, row=0, sticky='w', padx=0)\n\n\n\n if 1 :\n\n l = Tkinter.Label(ff, text=' Go:', fg=\"#777\")\n l.grid(column=35, row=0, sticky='e')\n\n b = Tkinter.Button(ff, text=\"<\", command=self.ZoomBegin)\n b.grid (column=38, row=0, sticky='w', padx=0)\n\n b = Tkinter.Button(ff, text=\">\", command=self.ZoomEnd)\n b.grid (column=39, row=0, sticky='w', padx=0)\n\n\n\n\n # ----------- select panel ----------------------------------\n\n if 0 :\n\n row += 1\n op = Hybrid.Popup_Panel(f)\n ff = op.frame\n ff.grid(row = row, column = 0, sticky = 'news')\n ff.grid_remove()\n #ff.columnconfigure(0, weight=1)\n self.selPanel = op.panel_shown_variable\n\n #ff = Tkinter.Frame(f)\n #ff.grid(column=0, row=row, sticky='w', pady=0, padx=2)\n\n l = Tkinter.Label(ff, text=' Sel:', font = 'TkCaptionFont')\n l.grid(column=1, row=0, sticky='w', pady=1)\n\n\n if 0 :\n #b = Tkinter.Button(ff, text=\"Asp\", command=self.asp )\n #b.grid (column=1, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Extr\", command=self.Extract )\n b.grid (column=2, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Al 1\", command=self.AlignRes1 )\n b.grid (column=3, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Al 2\", command=self.AlignRes2 )\n b.grid (column=4, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Avg\", command=self.Avg )\n b.grid (column=5, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"~Extr\", command=self.CloseExtracted )\n b.grid (column=6, row=0, sticky='w', padx=2)\n\n\n #b = Tkinter.Button(ff, text=\"Sbb\", command=self.BB_Sigma )\n #b.grid (column=8, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"Z\", command=self.ZScoreSel )\n #b.grid (column=9, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"Zr\", command=self.RotaZ1 )\n #b.grid (column=10, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"R1\", command=self.R1 )\n #b.grid (column=11, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"ExA\", command=self.ExCustA )\n #b.grid (column=12, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"ExB\", command=self.ExCustB )\n #b.grid (column=13, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"ExC\", command=self.ExCustC )\n #b.grid (column=14, row=0, sticky='w', padx=2)\n\n\n b = Tkinter.Button(ff, text=\"S-sel\", command=self.S_sel )\n b.grid (column=20, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Q-sel\", command=self.Q_sel )\n b.grid (column=21, row=0, sticky='w', padx=2)\n\n if 0 :\n b = Tkinter.Button(ff, text=\"Q-show\", command=self.Q_show )\n b.grid (column=22, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"SA-Q\", command=self.SA_Q )\n b.grid (column=23, row=0, sticky='w', padx=2)\n\n\n #b = Tkinter.Button(ff, text=\"Ats\", command=self.ShowAts)\n #b.grid (column=25, row=0, sticky='w', padx=10)\n\n if 1 :\n b = Tkinter.Button(ff, text=\"Alts\", command=self.FindAlts)\n b.grid (column=28, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"X-Alts\", command=self.DelAlts)\n b.grid (column=29, row=0, sticky='w', padx=2)\n\n if 0 :\n b = Tkinter.Button(ff, text=\"APro\", command=self.AProfs)\n b.grid (column=28, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"Ligs\", command=self.Ligs)\n #b.grid (column=43, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"Scale\", command=self.Scale)\n #b.grid (column=44, row=0, sticky='w', padx=2)\n\n\n\n b = Tkinter.Label(ff, text=\" Str:\")\n b.grid (column=30, row=0, sticky='w', padx=0, pady=1)\n\n self.selText = Tkinter.StringVar(f)\n self.selText.set ( \"\" )\n e = Tkinter.Entry(ff, width=20, textvariable=self.selText)\n e.grid(column=31, row=0, sticky='w', padx=2, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"Sel\", command=self.SelText)\n b.grid (column=32, row=0, sticky='w', padx=2)\n\n\n\n b = Tkinter.Label(ff, text=\"Rad:\")\n b.grid (column=33, row=0, sticky='w', padx=0, pady=1)\n\n self.maskRad = Tkinter.StringVar(f)\n self.maskRad.set ( \"2.5\" )\n e = Tkinter.Entry(ff, width=3, textvariable=self.maskRad)\n e.grid(column=34, row=0, sticky='w', padx=2, pady=1)\n\n\n b = Tkinter.Button(ff, text=\"AddSel\", command=self.AdSel)\n b.grid (column=35, row=0, sticky='w', padx=2)\n\n\n b = Tkinter.Button(ff, text=\"Ds\", command=self.ShowDists)\n b.grid (column=41, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Inter\", command=self.Inter)\n b.grid (column=42, row=0, sticky='w', padx=2)\n\n\n b = Tkinter.Button(ff, text=\"Occ\", command=self.Occ)\n b.grid (column=43, row=0, sticky='w', padx=2)\n\n\n b = Tkinter.Button(ff, text=\"Rmsd\", command=self.RMSD)\n b.grid (column=44, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"RibD\", command=self.RibD)\n b.grid (column=45, row=0, sticky='w', padx=2)\n\n self.selPanel.set(True)\n\n\n\n\n if 1 :\n row += 1\n op = Hybrid.Popup_Panel(f)\n ff = op.frame\n ff.grid(row = row, column = 0, sticky = 'news')\n ff.grid_remove()\n #ff.columnconfigure(0, weight=1)\n self.optionsPanel = op.panel_shown_variable\n self.optionsPanel.set ( showDevTools )\n\n b = Tkinter.Label(ff, text=\" Resolution:\")\n b.grid (column=0, row=0, sticky='w', padx=0, pady=1)\n\n self.mapRes = Tkinter.StringVar(f)\n self.mapRes.set ( \"3\" )\n e = Tkinter.Entry(ff, width=3, textvariable=self.mapRes)\n e.grid(column=1, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Label(ff, text=\"A \")\n b.grid (column=2, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Label(ff, text=\" Sigma:\")\n b.grid (column=3, row=0, sticky='w', padx=0, pady=1)\n\n self.sigma = Tkinter.StringVar(f)\n self.sigma.set ( \"0.6\" )\n e = Tkinter.Entry(ff, width=3, textvariable=self.sigma)\n e.grid(column=4, row=0, sticky='w', padx=0, pady=1)\n\n\n self.qmenu = Tkinter.StringVar(parent)\n self.qmenu.set ( \"Q-scores ...\" )\n self.qmenuMB = Tkinter.Menubutton ( ff, textvariable=self.qmenu, relief=Tkinter.RAISED, width=11 )\n self.qmenuMB.grid (column=5, row=0, sticky='we', padx=1)\n self.qmenuMB.menu = Tkinter.Menu ( self.qmenuMB, tearoff=0, postcommand=self.QMenu )\n self.qmenuMB[\"menu\"] = self.qmenuMB.menu\n\n self.cmenu = Tkinter.StringVar(parent)\n self.cmenu.set ( \"Show ...\" )\n self.cmenuMB = Tkinter.Menubutton ( ff, textvariable=self.cmenu, relief=Tkinter.RAISED, width=11 )\n self.cmenuMB.grid (column=6, row=0, sticky='we', padx=1)\n self.cmenuMB.menu = Tkinter.Menu ( self.cmenuMB, tearoff=0, postcommand=self.CMenu )\n self.cmenuMB[\"menu\"] = self.cmenuMB.menu\n\n #l = Tkinter.Label(ff, text=' | Seq: ', fg=\"#000\")\n #l.grid(column=21, row=0, sticky='ens')\n\n oft = Hybrid.Checkbutton(ff, 'Gaps', True)\n oft.button.grid(column = 22, row = 0, sticky = 'w')\n self.showGaps = oft.variable\n #self.showRibbon.set ( 1 )\n\n\n #l = Tkinter.Label(ff, text=' Select: ', fg=\"#000\", font = 'TkCaptionFont')\n #l = Tkinter.Label(ff, text=' | On Select: ', fg=\"#000\")\n #l.grid(column=35, row=0, sticky='ens')\n\n oft = Hybrid.Checkbutton(ff, 'Extract', False)\n oft.button.grid(column = 37, row = 0, sticky = 'w')\n self.selExtract = oft.variable\n self.selExtract.set ( 1 )\n\n oft = Hybrid.Checkbutton(ff, 'Mesh', False)\n oft.button.grid(column = 38, row = 0, sticky = 'w')\n self.showMesh = oft.variable\n #self.showRibbon.set ( 1 )\n\n self.showLigands = Tkinter.IntVar()\n self.showLigands.set(True)\n oft = Tkinter.Checkbutton( ff, text=\"Ligands\", variable=self.showLigands )\n oft.grid(column = 39, row = 0, sticky = 'w')\n\n #oft = Hybrid.Checkbutton(ff, 'Preserve', False, command=self.cb)\n #oft.button.grid(column = 39, row = 0, sticky = 'w')\n #self.preserveSel = oft.variable\n self.preserveSel = Tkinter.IntVar()\n oft = Tkinter.Checkbutton( ff, text=\"Keep\", variable=self.preserveSel, command=self.preserveSelCb)\n oft.grid(column = 40, row = 0, sticky = 'w')\n #self.showRibbon.set ( 1 )\n\n self.preserveVol = Tkinter.IntVar()\n oft = Tkinter.Checkbutton( ff, text=\"+Map\", variable=self.preserveVol, command=self.preserveVolCb)\n oft.grid(column = 41, row = 0, sticky = 'w')\n #self.showRibbon.set ( 1 )\n\n self.showH = Tkinter.IntVar()\n oft = Tkinter.Checkbutton( ff, text=\"H\", variable=self.showH)\n oft.grid(column = 42, row = 0, sticky = 'w')\n #self.showRibbon.set ( 1 )\n\n self.showW = Tkinter.IntVar()\n if 0 :\n oft = Tkinter.Checkbutton( ff, text=\"W\", variable=self.showW)\n oft.grid(column = 43, row = 0, sticky = 'w')\n\n\n b = Tkinter.Button(ff, text=\"<\", command=self.KeepBack)\n b.grid (column=45, row=0, sticky='w', padx=0)\n\n b = Tkinter.Button(ff, text=\"Redo\", command=self.SelReLoad)\n b.grid (column=46, row=0, sticky='w', padx=0)\n\n if 0 and showDevTools :\n\n b = Tkinter.Button(ff, text=\"L\", command=self.SelLoad)\n b.grid (column=47, row=0, sticky='w', padx=2)\n\n b = Tkinter.Button(ff, text=\"Near\", command=self.ShowNear)\n b.grid (column=47, row=0, sticky='w', padx=2)\n\n\n if 0 :\n b = Tkinter.Button(ff, text=\"Zone\", command=self.Zone)\n b.grid (column=48, row=0, sticky='w', padx=1, pady=1)\n\n self.zoneRad = Tkinter.StringVar(ff)\n self.zoneRad.set ( \"2\" )\n e = Tkinter.Entry(ff, width=2, textvariable=self.zoneRad)\n e.grid(column=49, row=0, sticky='w', padx=1, pady=1)\n\n\n\n #l = Tkinter.Label(ff, text=' Zoom:', fg=\"#777\")\n l = Tkinter.Label(ff, text=' Zoom:' )\n l.grid(column=60, row=0, sticky='e')\n\n b = Tkinter.Button(ff, text=\"-\", command=self.ZoomMinus)\n b.grid (column=61, row=0, sticky='w', padx=0)\n\n b = Tkinter.Button(ff, text=\"+\", command=self.ZoomPlus)\n b.grid (column=62, row=0, sticky='w', padx=0)\n\n\n #b = Tkinter.Button(ff, text=\"D\", command=self.Domains)\n #b.grid (column=18, row=0, sticky='w', padx=2)\n\n #b = Tkinter.Button(ff, text=\"S\", command=self.SS)\n #b.grid (column=19, row=0, sticky='w', padx=2)\n\n\n\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=7, padx=1, sticky='we')\n row += 1\n\n\n global msg\n msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg=\"red\", pady=1, padx=10)\n msg.grid(column=0, row=row, sticky='ew')\n self.msg = msg\n\n self.showingAtoms = False\n\n if len ( self.cur_chains ) > 0 :\n self.chain.set ( self.cur_chains[0] )\n #self.ShowCh ( self.cur_chains[0] )\n self.GetSeq ()\n else :\n self.chain.set ( '-' )\n\n #umsg ( 'Select one or more segmented regions then press \"Place Points\" to start' )\n\n callbacks = (self.mouse_down_cb, self.mouse_drag_cb, self.mouse_up_cb)\n #callbacks = (self.mouse_down_cb)\n from chimera import mousemodes\n mousemodes.addFunction('mark mapq', callbacks, self.mouse_mode_icon())\n\n if 0 :\n # bind, unbind in case it was left bound before...\n from chimera import mousemodes\n print \" - unbinding mouse...\"\n button, modifiers = ('3', ['Ctrl'])\n def_mode = mousemodes.getDefault(button, modifiers)\n mousemodes.setButtonFunction(button, modifiers, def_mode)\n self.bound_button = None\n\n\n chimera.openModels.addRemoveHandler(self.ModelClosed, None)\n\n\n def ModelClosed(self, trigger, n, mlist):\n\n # Clear menus that are showing closed models.\n if self.cur_dmap in mlist:\n self.cur_dmap = None\n self.dmap.set ( \"Select a map ...\" )\n\n if self.cur_mol in mlist:\n self.struc.set ( \"Select a model ...\" )\n self.cur_mol = None\n self.cur_chains = []\n self.chain.set ( \"-\" )\n self.RemoveSeq ()\n #self.UpdateSeqFont ()\n #self.UpdateSeq ()\n\n\n def bind_placement_button_cb(self) :\n\n if self.use_mouse.get() :\n print \" - binding mouse...\"\n button, modifiers = ('3', ['Ctrl'])\n from chimera import mousemodes\n mousemodes.setButtonFunction(button, modifiers, 'mark mapq')\n self.bound_button = (button, modifiers)\n elif self.bound_button:\n print \" - unbinding mouse...\"\n button, modifiers = self.bound_button\n from chimera import mousemodes\n def_mode = mousemodes.getDefault(button, modifiers)\n mousemodes.setButtonFunction(button, modifiers, def_mode)\n self.bound_button = None\n\n\n def mouse_mode_icon(self) :\n\n import os.path\n icon_path = os.path.join(os.path.dirname(__file__), 'marker.gif')\n from PIL import Image\n image = Image.open(icon_path)\n from chimera import chimage\n from chimera import tkgui\n icon = chimage.get(image, tkgui.app)\n return icon\n\n def mouse_down_cb(self, viewer, event) :\n\n print \" mouse - \"\n\n #print event.x, event.y\n if 0 :\n print dir(event)\n print event.char\n print event.keycode\n print event.keysym\n print event.keysym_num\n print event.num\n print event.state\n\n hits = []\n import VolumePath.tracer as tracer\n\n if 1 :\n from VolumeViewer import volume_list\n hits.extend(tracer.volume_maxima(event.x, event.y, volume_list()))\n print \"vol\"\n\n if 0 :\n from VolumeViewer import volume_list\n hits.extend(VolumePath.tracer.volume_plane_intercepts(event.x, event.y, volume_list()))\n\n if 0 :\n from Surface import surface_models\n hits.extend(tracer.surface_intercepts(event.x, event.y, surface_models()))\n print \"surf\"\n\n for C, vol in hits :\n print \" --> \", vol.name, \" --> %.1f, %.1f, %.1f\" % (C[0], C[1], C[2])\n self.PlaceAt ( C, vol )\n\n\n\n\n\n #grabbed = (self.move_markers.get() and self.grab_marker(event.x, event.y))\n #if not grabbed:\n # self.add_marker_at_screen_xy(event.x, event.y)\n\n\n\n def mouse_drag_cb(self, viewer, event):\n shift_mask = 1\n shift = (event.state & shift_mask)\n capslock_mask = 2\n capslock = (event.state & capslock_mask)\n #self.move_or_resize_marker(event.x, event.y, shift, capslock):\n\n\n def mouse_up_cb(self, viewer, event):\n #self.ungrab_marker()\n #self.pause_marker_placement = False\n #print \"mouse up\"\n pass\n\n\n\n def Options ( self ) :\n self.optionsPanel.set (not self.optionsPanel.get())\n\n\n def Select ( self ) :\n self.selPanel.set (not self.selPanel.get())\n\n\n def Log ( self ) :\n import Idle\n Idle.start_shell()\n\n\n def InitVars ( self ) :\n\n self.mag = 13\n self.seqt = []\n self.boldSeqT = None\n self.drag = ''\n\n #self.sheetBaseClr = numpy.array ( [50.0,205.0,50.0] )\n #self.sheetClr = numpy.array ( [204.0,255.0,204.0] )\n self.sheetBaseClr = numpy.array ( [55.0,55.0,150.0] )\n self.sheetClr = numpy.array ( [150.0,150.0,250.0] )\n self.sheetClrD = self.sheetClr - self.sheetBaseClr\n\n self.helixBaseClr = numpy.array ( [150.0,50.0,50.0] )\n self.helixClr = numpy.array ( [255.0,150.0,150.0] )\n self.helixClrD = self.helixClr - self.helixBaseClr\n\n c = self.helixBaseClr; self.helix1 = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n c = self.helixClr; self.helix2 = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n\n self.switch = \"#522\"\n\n c = self.sheetBaseClr; self.strand1 = \"#77F\"\n c = self.sheetClr; self.strand2 = \"#77F\"\n\n c = self.sheetBaseClr; self.sheet1 = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n c = self.sheetClr; self.sheet2 = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n\n self.loop1 = \"#999\"\n\n self.selColor = \"#7e7\"\n\n\n self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')\n #self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')\n self.tw = self.font.measure ( \"a\" )\n\n self.seq = \"\"\n\n #self.OrderMods ()\n\n\n def SetVisMap ( self ) :\n dmap = None\n mlist = OML(modelTypes = [VolumeViewer.volume.Volume])\n for m in mlist :\n if m.display and not \"sel_masked\" in m.name :\n dmap = m\n break\n\n if dmap == None :\n if len(mlist) > 0 :\n dmap = mlist[0]\n\n if dmap != None :\n self.dmap.set ( \"[%d] %s\" % (dmap.id,dmap.name) )\n self.cur_dmap = dmap\n\n\n def QMenu ( self ) :\n self.qmenuMB.menu.delete ( 0, 'end' ) # Clear menu\n options = []\n options.append ( \"Calculate for selected atoms\" )\n options.append ( \"Calculate for all atoms in selected chain (single process)\" )\n options.append ( \"Calculate for all atoms in selected chain (2 processes)\")\n #options.append ( \"Calculate for all atoms in selected chain (3 processes)\")\n options.append ( \"Calculate for all atoms in selected chain (4 processes)\")\n #options.append ( \"Calculate for all atoms in selected chain (5 processes)\")\n #options.append ( \"Calculate for all atoms in selected chain (6 processes)\")\n #options.append ( \"Calculate for all atoms in selected chain (7 processes)\")\n options.append ( \"Calculate for all atoms in selected chain (8 processes)\")\n options.append ( \"Calculate for all atoms in selected chain (auto # processses)\" )\n options.append ( \"Load (and calculate stats for selected chain)\" )\n for op in options :\n self.qmenuMB.menu.add_radiobutton ( label=op, variable=self.qmenu,\n command=lambda o=op: self.QMenuSelected(o) )\n\n def QMenuSelected ( self, op ) :\n\n self.qmenu.set ( \"Q-scores ...\" )\n print op\n if \"single\" in op :\n self.CalcAllQ()\n elif \"multiple\" in op :\n self.CalcAllQp()\n elif \"2\" in op :\n self.CalcAllQp(2)\n elif \"4\" in op :\n self.CalcAllQp(4)\n elif \"8\" in op :\n self.CalcAllQp(8)\n elif \"auto\" in op :\n self.CalcAllQp()\n elif \"Load\" in op :\n self.GetQsFromFile ()\n elif \"selected\" in op :\n self.CalcSelQ ()\n\n\n\n def CMenu ( self ) :\n self.cmenuMB.menu.delete ( 0, 'end' ) # Clear menu\n options = []\n options.append ( \"Residue Q-scores\" )\n options.append ( \"Backbone Q-scores\" )\n options.append ( \"Sidechain Q-scores\" )\n options.append ( \"Atom Q-scores\" )\n options.append ( \"Random color all chains\" )\n for op in options :\n self.cmenuMB.menu.add_radiobutton ( label=op, variable=self.cmenu,\n command=lambda o=op: self.CMenuSelected(o) )\n\n def CMenuSelected ( self, op ) :\n\n self.cmenu.set ( \"Show ...\" )\n print op\n if \"Residue\" in op :\n self.DoColorRes()\n elif \"Backbone\" in op :\n self.DoColorBB()\n elif \"Sidechain\" in op :\n self.DoColorSC()\n elif \"Atom\" in op :\n self.DoColorAtoms ()\n elif \"Random\" in op :\n self.DoColorRandom ()\n\n\n\n def MapMenu ( self ) :\n #print \"Map menu...\"\n self.dmapMB.menu.delete ( 0, 'end' ) # Clear menu\n #self.cur_dmap = None\n #self.dmap.set(\"\")\n mlist = OML(modelTypes = [VolumeViewer.volume.Volume])\n if len(mlist) == 0 :\n self.LoadModel()\n return\n for m in mlist :\n self.dmapMB.menu.add_radiobutton ( label=\"[%d] %s\"%(m.id,m.name), variable=self.dmap,\n command=lambda m=m: self.MapSelected(m) )\n\n\n def MapSelected ( self, dmap ) :\n\n self.cur_dmap = dmap\n print \"Selected \" + dmap.name\n self.dmap.set( \"[%d] %s\" % (dmap.id, dmap.name) )\n\n #self.GetSeq ()\n #self.ZoomBegin ()\n\n\n def GetChains ( self, mol ) :\n ct = {}\n for r in mol.residues:\n ct[r.id.chainId] = 1\n clist = ct.keys()\n clist.sort()\n return clist\n\n\n def SetVisMol ( self ) :\n mol = None\n mlist = OML(modelTypes = [chimera.Molecule])\n for m in mlist :\n if m.display :\n mol = m\n break\n\n if mol == None :\n if len(mlist) > 0 :\n mol = mlist[0]\n\n if mol != None :\n self.struc.set ( \"[%d] %s\" % (mol.id, mol.name) )\n self.cur_mol = mol\n self.cur_chains = self.GetChains ( mol )\n SetBBAts ( mol )\n\n\n def StrucSelected ( self, mol ) :\n\n self.cur_mol = mol\n print \"Selected \", mol.name, \" - \", mol.id\n if mol :\n\n #mlist = OML(modelTypes = [chimera.Molecule])\n #for m in mlist :\n # m.display = False\n\n mol.display = True\n\n self.cur_chains = self.GetChains ( mol )\n\n if len(self.cur_chains) == 0 :\n self.chain.set ( \"-\" )\n elif self.chain.get() in self.cur_chains :\n print \" - ch \" + self.chain.get() + \" already sel\"\n #self.ShowCh ( self.chain.get() )\n else :\n self.chain.set ( self.cur_chains[0] )\n #self.ShowCh ( self.chain.get() )\n\n\n SetBBAts ( mol )\n self.parent.after(100, self.DoSeq)\n\n\n def DoSeq ( self ) :\n #print \"after 100\"\n\n self.GetSeq ()\n #self.ZoomBegin ()\n\n if self.cur_mol != None :\n self.ShowQScores ()\n\n\n def ChainSelected ( self, ch ) :\n print \" - sel chain: \", ch, self.chain.get()\n #self.ShowCh ( ch )\n self.parent.after(100, self.DoSeq)\n\n\n\n def StrucMenu ( self ) :\n self.strucMB.menu.delete ( 0, 'end' ) # Clear menu\n mlist = OML(modelTypes = [chimera.Molecule])\n if len(mlist) == 0 :\n self.LoadModel()\n return\n for m in mlist :\n self.strucMB.menu.add_radiobutton ( label=\"[%d] %s\"%(m.id,m.name), variable=self.struc,\n command=lambda m=m: self.StrucSelected(m) )\n\n def ChainMenu ( self ) :\n self.chainMB.menu.delete ( 0, 'end' ) # Clear menu\n #print \" - chain menu\"\n #print self.cur_chains\n for ch in self.cur_chains :\n self.chainMB.menu.add_radiobutton ( label=ch, variable=self.chain, command=lambda ch=ch: self.ChainSelected(ch) )\n\n self.chainMB.menu.add_radiobutton ( label=\"All\", variable=self.chain, command=lambda ch=\"All\": self.ChainSelected(\"All\") )\n\n\n\n def loadFile ( self, path ) :\n\n print \"Loading - %s\" % path\n\n ext = os.path.splitext ( path )[1]\n mol = None\n\n if ext == \".cif\" :\n start = time.time()\n mol = mmcif.LoadMol2 ( path, log=False )\n print \"Loaded %s in %.1fs\" % (mol.name, time.time()-start)\n\n elif ext == \".mrc\" or ext == \".map\" or ext == \".ccp4\" :\n om = chimera.openModels.open ( path )[0]\n chimera.runCommand ( \"vol #%d style surface region all step 1\" % om.id )\n for sp in om.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n #sp.displayStyle = sp.Mesh\n sp.color = sp.color = (.7, .7, .7, .7)\n\n self.MapSelected (om)\n\n elif ext == \".pdb\" or ext == \".ent\" :\n mol = chimera.openModels.open ( path )[0]\n\n if mol :\n mmcif.ColorMol ( mol )\n\n #def nucleicOff():\n from NucleicAcids.cmd import sidechain\n sidechain(\"atoms\", sel=\"#%d\" % mol.id)\n from chimera.resCode import nucleic3to1\n for r in mol.residues :\n if r.type in nucleic3to1 :\n r.fillDisplay = False\n\n self.struc.set ( \"[%d] %s\" % (mol.id, mol.name) )\n self.cur_mol = mol\n self.cur_chains = self.GetChains ( mol )\n if len ( self.cur_chains ) > 0 :\n self.chain.set ( self.cur_chains[0] )\n self.GetSeq ()\n self.ShowQScores ()\n\n\n\n def load ( self, okay, dialog ):\n if okay:\n paths = dialog.getPaths ( )\n print \"%d files\" % len(paths)\n\n # load maps first...\n for path in paths :\n ext = os.path.splitext ( path )[1]\n if ext == \".mrc\" or ext == \".map\" or ext == \".ccp4\" :\n self.loadFile ( path )\n\n # then models...\n for path in paths :\n ext = os.path.splitext ( path )[1]\n if ext == \".pdb\" or ext == \".cif\" or ext == \".ent\" :\n self.loadFile ( path )\n\n def LoadModel ( self ) :\n init = None\n mol = None\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule and m.display == True and hasattr ( m, 'openedAs' ) :\n init = os.path.split ( m.openedAs[0] ) [0]\n break\n if type(m) == VolumeViewer.volume.Volume :\n init = os.path.split( m.data.path ) [0]\n\n if init == None :\n init = \"/Users/greg/Box Sync/_data\"\n\n print \"init: %s\" % init\n\n if 1 :\n from OpenSave import OpenModeless\n OpenModeless ( title = 'Open Model',\n #filters = [('TXT', '*.txt', '.txt')],\n filters = [],\n initialfile = init, command = self.load )\n\n else :\n fpath = \"/Users/greg/Box Sync/_data/problems/emd_30342/7cec.cif\"\n\n\n\n\n\n def SaveModel ( self ) :\n print \"save\"\n\n mol = None\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule :\n mol = m\n\n fpath = \"/Users/greg/Box Sync/_data/problems/emd_30342/7cec_Q.cif\"\n print \"Writing %s -> %s\" % (mol.name, fpath)\n\n mmcif.WriteMol ( mol, fpath )\n\n\n\n def DoColorBB ( self ) :\n\n self.UpdateModColor ( \"bb\" )\n # self.RandColorChains()\n\n def DoColorSC ( self ) :\n\n self.UpdateModColor ( \"sc\" )\n # self.RandColorChains()\n\n\n def DoColorRes ( self ) :\n\n self.UpdateModColor ( \"res\" )\n # self.RandColorChains()\n\n\n def DoColorAtoms ( self ) :\n\n self.UpdateModColor ( \"ats\" )\n # self.RandColorChains()\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n doRess = self.GetCurRess()\n for r in doRess :\n for at in r.atoms :\n at.label = \"\"\n\n\n if 1 :\n for at in chimera.selection.currentAtoms () :\n if at.display == True :\n if 1 and hasattr (at, 'Q1') and hasattr (at, 'Q2') :\n at.label = \"(%.2f)\" % ( (at.Q1+at.Q2)/2.0 )\n elif hasattr (at, 'Q') :\n at.label = \"%.2f\" % at.Q\n\n at.labelColor = chimera.MaterialColor (0,0,0,1)\n\n else :\n doRess = chimera.selection.currentResidues()\n #if len(doRess) == 0 :\n # doRess = self.GetCurRess()\n\n if len(doRess) > 0 :\n for r in doRess :\n for at in r.atoms :\n if at.display == True :\n if 1 and hasattr (at, 'Q1') and hasattr (at, 'Q2') :\n at.label = \"(%.2f)\" % ( (at.Q1+at.Q2)/2.0 )\n elif hasattr (at, 'Q') :\n at.label = \"%.2f\" % at.Q\n\n at.labelColor = chimera.MaterialColor (0,0,0,1)\n #at.labelOffset = chimera.Vector(0,0,0)\n\n nats = self.AtsWithin ( r.atoms, 3.0, allAtTree )\n for at in nats :\n if at.display == True :\n if 1 and hasattr (at, 'Q1') and hasattr (at, 'Q2') :\n at.label = \"(%.2f)\" % ( (at.Q1+at.Q2)/2.0 )\n elif hasattr (at, 'Q') :\n at.label = \"%.2f\" % at.Q\n\n at.labelColor = chimera.MaterialColor (0,0,0,1)\n #at.labelOffset = chimera.Vector(0,0,0)\n\n\n\n # at.label, labelColor, labelCoord, labelOffset\n # at.label = \"HI\"\n # at.labelColor = chimera.MaterialColor (0,0,0,1)\n umsg ( \"Labeled atoms\" )\n\n\n\n def DoColorRandom ( self ) :\n\n self.RandColorChains ()\n\n\n\n def UpdateSurfColor ( self ) :\n\n print \" - surf of %s, by %s\" % ( self.cur_dmap.name, self.cur_mol.name )\n\n numAt = 0\n for r in self.cur_mol.residues :\n for at in r.atoms :\n if at.element.name == \"H\" :\n pass\n else :\n numAt += 1\n\n allAtPos = numpy.zeros ( (numAt, 3) )\n allAts = [None] * numAt\n\n numAt = 0\n for r in self.cur_mol.residues :\n for at in r.atoms :\n if at.element.name == \"H\" :\n pass\n else :\n allAtPos[numAt] = at.coord().data()\n allAts[numAt] = at\n at.allPtI = numAt\n numAt += 1\n\n\n print \" - tree with %d ats\" % numAt\n allAtTree = AdaptiveTree ( allAtPos.tolist(), allAts, 4.0)\n print \" - done\"\n\n\n\n\n\n def UpdateModColor ( self, colorMod ) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n if self.cur_mol :\n ress = self.cur_mol.residues\n else :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n if not hasattr (self, 'scores') :\n umsg ( \"No scores... press Q, Qp, or Qf button first\" )\n return\n\n foundScore = True\n for ri, r in enumerate ( ress ) :\n if r != None and not hasattr (r, 'Q') :\n foundScore = False\n\n if not foundScore :\n umsg ( \"No scores... press Calc or Load button first\" )\n return\n\n\n minScore, maxScore = 0,0\n if colorMod == \"sc\" :\n minScore, maxScore = self.minScore1, self.maxScore1\n else :\n minScore, maxScore = self.minScore2, self.maxScore2\n\n cH = numpy.array( [0.0,1.0,0.0] )\n cL = numpy.array( [1.0,0.0,0.0] )\n\n #cH = numpy.array( [50.0/255.0,250.0/255.0,50.0/255.0] )\n #cL = numpy.array( [250.0/255.0,50.0/255.0,50.0/255.0] )\n\n\n for ri, r in enumerate ( ress ) :\n sc = None\n if r == None :\n continue\n #sc = self.scores[ri] if colorSC else self.scores2[ri]\n if colorMod == \"sc\" :\n sc = r.scQ if hasattr (r, 'scQ') else 0\n elif colorMod == \"bb\" :\n sc = r.bbQ if hasattr (r, 'bbQ') else 0\n else :\n sc = r.Q if hasattr (r, 'Q') else 0\n\n if sc == None :\n r.ribbonColor = chimera.MaterialColor ( .7, .7, .7, 1.0 )\n for at in r.atoms :\n #at.color = r.ribbonColor\n try :\n at.color = atomColors[at.element.name.upper()]\n except :\n at.color = atomColors[' ']\n\n else :\n h = (sc - minScore) / (maxScore - minScore)\n if h > 1 : h = 1\n if h < 0 : h = 0\n c = h * cH + (1-h) * cL\n r.ribbonColor = chimera.MaterialColor ( c[0], c[1], c[2], 1.0 )\n for at in r.atoms :\n #at.color = r.ribbonColor\n try :\n at.color = atomColors[at.element.name.upper()]\n except :\n at.color = atomColors[' ']\n\n\n\n\n\n def RandColorChains ( self ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n m = self.cur_mol\n\n from random import random as rand\n\n ct = {}\n for r in m.residues: ct[r.id.chainId] = 1\n clist = ct.keys()\n clist.sort()\n chains_clrs = {}\n cnames = \"\"\n\n for ci, cid in enumerate ( clist ) :\n clr = ( rand()*.8+.1, rand()*.8+.1, rand()*.8+.1 )\n chains_clrs[cid] = chimera.MaterialColor ( clr[0], clr[1], clr[2], 1.0 )\n cnames = cnames + cid\n\n print \"%s - color ribbon for %d chains -\" % ( m.name, len(cnames) ), cnames\n\n # color atoms\n for r in m.residues :\n clr = chains_clrs[r.id.chainId]\n r.ribbonColor = clr\n for at in r.atoms :\n at.color = clr\n\n\n def AllChain ( self ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n chainId = self.chain.get()\n if len(chainId) == 0 :\n umsg (\"Select a chain first\")\n return\n\n umsg ( \"Showing mol %s chain %s\" % (self.cur_mol.name, chainId) )\n\n SetBBAts ( self.cur_mol )\n #ct = {}\n #for r in self.cur_mol.residues: ct[r.id.chainId] = 1\n #clist = ct.keys()\n #clist.sort()\n\n for r in self.cur_mol.residues :\n if r.id.chainId == chainId :\n if r.isProt or r.isNA :\n r.ribbonDisplay = True\n r.ribbonDrawMode = 2\n else :\n r.ribbonDisplay = False\n for at in r.atoms :\n at.drawMode = at.EndCap\n at.display = True\n else :\n r.ribbonDisplay = False\n for at in r.atoms :\n #at.drawMode = at.EndCap\n at.display = False\n\n\n def ShowOnlySel ( self ) :\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n m = self.cur_mol\n\n rsel = chimera.selection.currentResidues ()\n\n if len(rsel) == 0 :\n umsg (\"Show only selected residues - no residue found to be selected\")\n return\n\n risel = {}\n for r in rsel :\n risel[\"%d.%s\" % (r.id.position, r.id.chainId)] = 1\n\n for r in m.residues :\n rid = \"%d.%s\" % (r.id.position, r.id.chainId)\n if rid in risel :\n r.ribbonDisplay = not self.showingAtoms\n for at in r.atoms :\n if at.element.name == \"H\" :\n at.display = False\n else :\n at.display = True\n else :\n r.ribbonDisplay = False\n for at in r.atoms :\n at.display = False\n\n\n\n def FindAlts ( self ) :\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n m = self.cur_mol\n\n atMap = {}\n for r in m.residues :\n\n hasAlt = False\n for at in r.atoms :\n if len(at.altLoc) > 0 :\n hasAlt = True\n break\n\n if hasAlt :\n r.ribbonDisplay = True\n for at in r.atoms :\n if at.element.name == \"H\" :\n at.display = False\n else :\n at.display = True\n atMap[at] = 1\n at.drawMode = at.EndCap\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = atomColors[\" \"]\n else :\n r.ribbonDisplay = True\n for at in r.atoms :\n at.display = False\n\n for bond in m.bonds :\n #if bond.atoms[0] in atMap or bond.atoms[1] in atMap :\n bond.display = bond.Smart\n bond.drawMode = bond.Stick\n\n\n\n\n def DelAlts ( self ) :\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n m = self.cur_mol\n\n atMap = {}\n for r in m.residues :\n\n altScores = {}\n for at in r.atoms :\n if at.isSC :\n alt = \"_\" if at.altLoc == '' else at.altLoc\n if alt in altScores :\n altScores[alt].append ( at.Q )\n else :\n altScores[alt] = [at.Q]\n\n if len ( altScores.keys() ) > 1 :\n #print \" - res %s %d.%s\" % (r.type, r.id.position, r.id.chainId)\n keepAlt = ''\n maxScore = 0\n for alt, scores in altScores.iteritems() :\n avg = numpy.mean(scores)\n #print \" %s: %.2f - %d\" % (alt, avg, len(scores))\n if avg > maxScore :\n keepAlt = alt\n maxScore = avg\n print \" - %s %d.%s, keeping %s score %.2f\" % (r.type, r.id.position, r.id.chainId, keepAlt, maxScore)\n\n for at in r.atoms :\n if len(at.altLoc) > 0 :\n if at.altLoc == keepAlt :\n at.altLoc = ''\n else :\n m.deleteAtom ( at )\n\n\n\n\n def AllChains ( self ) :\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n m = self.cur_mol\n\n #ct = {}\n #for r in m.residues: ct[r.id.chainId] = 1\n #clist = ct.keys()\n #clist.sort()\n\n for r in m.residues :\n if (\"CA\" in r.atomsMap and \"N\" in r.atomsMap and \"C\" in r.atomsMap) or (\"O3'\" in r.atomsMap and \"O5'\" in r.atomsMap) :\n r.ribbonDisplay = True\n r.ribbonDrawMode = 2\n else :\n r.ribbonDisplay = False\n for at in r.atoms :\n #at.drawMode = at.Ball\n at.display = True\n\n\n def GetCurRess ( self ) :\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n if len(chainId) == 0 :\n umsg (\"Select a chain first\")\n return []\n\n ress = []\n for r in self.cur_mol.residues :\n if r.id.chainId == chainId :\n ress.append ( r )\n\n return ress\n\n\n def SetSelRibbon ( self ) :\n\n selRess = chimera.selection.currentResidues()\n if len(selRess) > 0 :\n self.SetDrawMode ( chimera.selection.currentResidues(), showRibbon = True )\n else :\n self.SetDrawMode ( self.GetCurRess(), showRibbon = True )\n\n self.showingAtoms = False\n\n\n def SetSelAtoms ( self ) :\n\n selRess = chimera.selection.currentResidues()\n if len(selRess) > 0 :\n self.SetDrawMode ( chimera.selection.currentResidues(), showRibbon = False )\n print \" - on %d res\" % len(selRess)\n else :\n self.SetDrawMode ( self.GetCurRess(), showRibbon = False )\n\n self.showingAtoms = True\n\n\n\n\n def SetDrawMode ( self, ress, showRibbon = None ) :\n\n #if showRibbon == None :\n # showRibbon = segmod_dialog().showRibbon.get()\n\n #showRibbon = True\n\n #SetBBAts ( ress[0].molecule )\n\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule :\n if not hasattr ( m, 'bbats' ) :\n SetBBAts(m)\n m.bbats = True\n\n\n #for at in ress[0].molecule.atoms :\n # at.drawMode = at.EndCap\n # at.display = False # not showRibbon\n\n #for res in ress[0].molecule.residues :\n # res.ribbonDisplay = res.ribbonDisplay\n\n showH = self.showH.get()\n\n atMap = {}\n #atI = 0\n #c1 = (1.0,0.0,0.0,1)\n #c1 = (1.0,0.0,0.0,1)\n for res in ress :\n for at in res.atoms :\n\n if not hasattr (res, 'isProt') :\n SetBBAts (res.molecule)\n\n if res.isProt or res.isNA :\n at.drawMode = at.EndCap\n\n if at.element.name == \"H\" :\n at.display = showH\n else :\n at.display = True # not showRibbon\n\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = atomColors[\" \"]\n atMap[at] = 1\n\n res.ribbonDisplay, res.ribbonDrawMode = showRibbon, res.Ribbon_Round\n\n\n #f = float(atI) / float(len(ress)-1)\n #res.ribbonColor = chimera.MaterialColor( f*0.8+0.2, 0.02, (1-f)*0.8+0.2, 1.0 );\n #atI+=1\n\n for bond in ress[0].molecule.bonds :\n if bond.atoms[0] in atMap or bond.atoms[1] in atMap :\n bond.display = bond.Smart\n bond.drawMode = bond.Stick\n\n\n def Wire ( self ) :\n\n showH = self.showH.get()\n\n selRess = chimera.selection.currentResidues()\n if len(selRess) > 0 :\n\n atMap = {}\n for res in selRess :\n for at in res.atoms :\n if res.isProt or res.isNA :\n at.drawMode = at.EndCap\n at.display = True # not showRibbon\n if showH == False and at.element.name == \"H\" :\n at.display = False\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = atomColors[\" \"]\n atMap[at] = 1\n\n res.ribbonDisplay, res.ribbonDrawMode = False, res.Ribbon_Round\n\n\n for bond in selRess[0].molecule.bonds :\n if bond.atoms[0] in atMap or bond.atoms[1] in atMap :\n bond.display = bond.Smart\n bond.drawMode = bond.Wire\n\n\n def ShowAts ( self ) :\n\n for mod in chimera.openModels.list() :\n if type(mod) == chimera.Molecule and mod.display == True :\n for res in mod.residues :\n #if res.id.position in rs and res.id.chainId == cid :\n if res.id.position in rs :\n for at in res.atoms :\n at.drawMode = at.EndCap\n at.display = True\n try :\n at.color = atomColors[at.element.name.upper()]\n except :\n at.color = atomColors[\" \"]\n\n\n\n\n def HideSCs ( self ) :\n\n for mol in chimera.selection.currentMolecules() :\n if 1 or not hasattr ( mol, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n ress = chimera.selection.currentResidues()\n if len(ress) == 0 :\n ress = self.GetCurRess()\n\n for res in ress :\n #if res.id.position in rs and res.id.chainId == cid :\n for at in res.atoms :\n #at.drawMode = at.EndCap\n\n if not hasattr (at, 'isBB') :\n SetBBAts (at.molecule)\n\n if self.showingAtoms :\n at.display = at.isBB\n else :\n at.display = at.isBB and not at.isSugar\n #if at.residue.isNA : at.display = at.isBB\n\n #try :\n # at.color = atomColors[at.element.name.upper()]\n #except :\n # at.color = atomColors[\" \"]\n\n\n def ShowSCs ( self ) :\n\n for mol in chimera.selection.currentMolecules() :\n if 1 or not hasattr ( mol, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n ress = chimera.selection.currentResidues()\n if len(ress) == 0 :\n ress = self.GetCurRess()\n\n showH = self.showH.get()\n\n for res in ress :\n for at in res.atoms :\n #at.drawMode = at.EndCap\n if at.element.name == \"H\" :\n at.display = showH\n else :\n at.display = True\n\n try :\n at.color = atomColors[at.element.name.upper()]\n except :\n at.color = atomColors[\" \"]\n\n\n def ShowNear ( self ) :\n\n for mol in chimera.selection.currentMolecules() :\n if 1 or not hasattr ( mol, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n ress = chimera.selection.currentResidues()\n if len(ress) == 0 :\n ress = self.GetCurRess()\n\n print \"Near %d res:\" % len(ress)\n #for r in ress :\n # print \"%s.%d.%s - %d atoms\" % (r.type, r.id.position, r.id.chainId, len(r.atoms))\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n #chimera.selection.clearCurrent ()\n\n nearRes = {}\n for r in ress :\n nats = self.AtsWithin ( r.atoms, 4.0, allAtTree )\n for at in nats :\n nearRes[at.residue] = 1\n\n for r in nearRes.keys() :\n #print \" -- %s.%d.%s - %d atoms\" % (r.type, r.id.position, r.id.chainId, len(r.atoms))\n #chimera.selection.mergeCurrent ( chimera.selection.EXTEND, chimera.selection.OSLSelection (\"\") )\n if r in ress :\n continue\n chimera.selection.addCurrent ( r )\n for at in r.atoms :\n #at.drawMode = at.EndCap\n at.display = True\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n\n\n\n def Zone ( self ) :\n\n print \"Zone:\", self.zoneRad.get()\n\n try :\n rad = float ( self.zoneRad.get() )\n except :\n umsg ( \"Enter a number for zone radius\" )\n return\n\n atoms = chimera.selection.currentAtoms()\n if len(atoms) == 0 :\n umsg ( \"Nothing selected\" )\n return\n\n if self.cur_dmap == None :\n umsg ( \"Select a Map\" )\n return\n\n dmap = self.cur_dmap\n m = atoms[0].molecule\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = True )\n\n mods = {}\n for m in chimera.openModels.list() :\n mods[m.name] = m\n\n for i in range ( 10000 ) :\n nname = os.path.splitext(dmap.name)[0] + \"_Z%.0f_%d\" % (rad,i+1) + \".mrc\"\n if not nname in mods :\n break\n\n cmap = self.PtsToMap ( points, dmap, rad, nname, showMesh=False, alpha=0.2 )\n #self.PtsToMap ( points, dmap, R, dmap.name + label, False, alpha=0.2 if self.showMesh.get() else 0.4 )\n\n umsg ( \"Made zone map: \" + nname )\n dmap.display = False\n\n chimera.runCommand ( \"vol #%d style surface region all step 1\" % cmap.id )\n\n\n\n def Inter ( self ) :\n\n for mol in chimera.selection.currentMolecules() :\n if 1 or not hasattr ( mol, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n print \"\"\n print \"Interactions for: %s, %d atoms\" % ( self.cur_mol, len(self.cur_mol.atoms) )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n\n polar, hyd, wat, watm = {}, {}, {}, {}\n\n def setI_ (I, R1, R2) :\n if R1 in I :\n if R2 in I[R1] :\n I[R1][R2] += 1\n else :\n I[R1][R2] = 1\n else :\n I[R1] = {}\n I[R1][R2] = 1\n\n def setI (I, R1, R2) :\n setI_(I, R1, R2)\n setI_(I, R2, R1)\n\n\n def addI (at1, at2) :\n\n R1 = at1.residue.id.chainId\n R2 = at2.residue.id.chainId\n\n if (at1.element.name == \"O\" or at1.element.name == \"N\") and (at2.element.name == \"O\" or at2.element.name == \"N\") :\n setI ( polar, R1, R2 )\n else :\n setI ( hyd, R1, R2 )\n\n\n for at in self.cur_mol.atoms :\n\n nats = self.AtsWithin ( [at], 3.5, allAtTree )\n\n chains = {}\n if at.residue.type == \"HOH\" :\n for nat in nats :\n if nat.residue.type == \"HOH\" :\n continue\n chains[nat.residue.id.chainId] = nat\n\n if len(chains.keys()) == 2 :\n c1, c2 = chains.keys()\n a1, a2 = chains[c1], chains[c2]\n\n if (a1.coord() - a2.coord()).length > 3.5 :\n setI ( watm, chains.keys()[0], chains.keys()[1] )\n else :\n setI ( wat, chains.keys()[0], chains.keys()[1] )\n\n if len(chains.keys()) > 2 :\n print \"wat:\", chains.keys()\n\n else :\n for nat in nats :\n if nat.residue.type == \"HOH\" :\n continue\n if at.residue.id.chainId == nat.residue.id.chainId :\n continue\n addI ( at, nat )\n\n\n print \"Polar: \"\n print polar\n\n print \"Hydrophobic: \"\n print hyd\n\n print \"Water: \"\n print wat\n\n print \"Water Mediated: \"\n print watm\n\n\n\n\n\n\n\n def ShowDists ( self ) :\n\n m1, m2 = [m for m in chimera.openModels.list() if m.display==True and type(m) == chimera.Molecule]\n print \" - m1: %s\" % m1.name\n print \" - m2: %s\" % m2.name\n\n\n amap = {}\n for at in m2.atoms :\n atId = \"%d.%s.%s.%s\" % (at.residue.id.position,at.residue.id.chainId,at.name,at.altLoc)\n amap[atId] = at\n\n from chimera.resCode import protein3to1\n\n tt, tt2, nt = {}, {}, {}\n\n for at in m1.atoms :\n atId = \"%d.%s.%s.%s\" % (at.residue.id.position,at.residue.id.chainId,at.name,at.altLoc)\n if atId in amap :\n at2 = amap[atId]\n d = (at.coord()-at2.coord()).length\n else :\n print \" - not found:\", atId\n continue\n\n if at.display and not at.residue.type in protein3to1 :\n if not at.name in tt :\n tt[at.name] = d; tt2[at.name] = d*d; nt[at.name] = 1.0\n else :\n tt[at.name] += d; tt2[at.name] += d*d; nt[at.name] += 1.0\n\n if at.residue.type in protein3to1 :\n if at.isBB :\n if not \"BB\" in tt :\n tt[\"BB\"] = d; tt2[\"BB\"] = d*d; nt[\"BB\"] = 1.0\n else :\n tt[\"BB\"] += d; tt2[\"BB\"] += d*d; nt[\"BB\"] += 1.0\n else :\n if not \"SC\" in tt :\n tt[\"SC\"] = d; tt2[\"SC\"] = d*d; nt[\"SC\"] = 1.0\n else :\n tt[\"SC\"] += d; tt2[\"SC\"] += d*d; nt[\"SC\"] += 1.0\n\n\n for tp, D in tt.iteritems () :\n N, D2 = nt[tp], tt2[tp]\n rmsd = numpy.sqrt ( D2/N )\n avgd = D/N\n print \"%s - %.0f atoms, avgd %.5f, rmsd: %.5f\" % ( tp, N, avgd, rmsd )\n\n\n\n\n def ShowCh ( self, ch ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n print \" - showing chain:\", ch\n\n SetBBAts ( self.cur_mol )\n\n m = self.cur_mol\n print \" - cur mol:\", m.name\n\n ct = {}\n for r in m.residues: ct[r.id.chainId] = 1\n clist = ct.keys()\n clist.sort()\n\n atsMap = {}\n for r in m.residues :\n show = True if r.id.chainId == ch else False\n\n if r.isProt or r.isNA :\n r.ribbonDisplay = show\n #r.ribbonDrawMode = 2\n for at in r.atoms :\n at.display = False\n else :\n r.ribbonDisplay = False\n for at in r.atoms :\n #at.drawMode = at.Ball\n at.display = show\n atsMap[at] = 1\n if show :\n at.drawMode = at.EndCap\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = atomColors[\" \"]\n for bond in m.bonds :\n #if bond.atoms[0] in atsMap or bond.atoms[1] in atsMap :\n bond.display = bond.Smart\n #else :\n # bond.display = bond.Never\n\n\n\n\n def GetMod ( self, name ) :\n for m in chimera.openModels.list() :\n if name != None and len(name) > 0 :\n if m.name == name :\n return m\n else :\n if m.display == True :\n return m\n return None\n\n\n\n def GetSeq ( self ) :\n\n if self.cur_mol == None :\n umsg ( \"No selected molecule\" )\n return\n\n if len ( self.chain.get() ) == 0 :\n umsg ( \"No selected chain\" )\n return\n\n self.RemoveSeq ()\n print \" - removed seq\"\n\n try :\n print self.cur_mol.name\n except :\n print \" - mol may have been closed\"\n return\n\n self.GetSeqFromStruc ( self.cur_mol, self.chain.get() )\n\n if len(self.seq) > 0 :\n\n print \"-- seq from open mol -- %d res\" % len(self.seq)\n #print self.seq\n\n self.seqt = []\n self.seqSheetR = [None] * len(self.seq)\n self.seqHelixR = [None] * len(self.seq)\n self.seqScoreR = [None] * len(self.seq)\n self.seqScoreR2 = [None] * len(self.seq)\n self.scores2 = [None] * len(self.seq)\n self.scores = [None] * len(self.seq)\n\n self.UpdateSeqFont ()\n self.UpdateSeq ()\n\n return True\n\n return False\n\n\n\n def RemoveSeq (self) :\n\n if self.seq == \"\" :\n return\n\n for si in range ( len(self.seq) ) :\n res = self.seq[si]\n pred = self.pred[si]\n conf = float ( self.conf[si] ) / 10.0\n\n if pred == 'E' :\n if self.seqSheetR[si] != None :\n self.Canvas.delete ( self.seqSheetR[si] )\n\n elif pred == 'H' :\n if self.seqHelixR[si] != None :\n self.Canvas.delete ( self.seqHelixR[si] )\n\n if self.seqScoreR[si] != None :\n self.Canvas.delete ( self.seqScoreR[si] )\n\n if self.seqScoreR2[si] != None :\n self.Canvas.delete ( self.seqScoreR2[si] )\n\n\n # box showing selected Residue\n if hasattr ( self, 'seqMouseR' ) :\n self.Canvas.delete ( self.seqMouseR )\n del self.seqMouseR\n\n if hasattr ( self, 'seqText' ) :\n self.Canvas.delete ( self.seqText )\n self.seqText = None\n del self.seqText\n\n self.seqSel = None\n self.seq = \"\"\n self.UpdateSeqSel ()\n\n\n\n def GetSeqFromStruc ( self, mol, chainId ) :\n\n print \"Getting seq from %s, %s\" % (mol.name, chainId)\n\n if self.showGaps.get() :\n print \" - showing gaps\"\n\n self.conf = \"\"\n self.pred = \"\"\n self.seq = \"\"\n self.seqRes = []\n self.seqRi = []\n\n if chainId == 'All' :\n return\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n protein3to1['HSD'] = protein3to1['HIS']\n\n minri, maxri = None, None\n rids = {}\n for r in mol.residues :\n if r.id.chainId == chainId :\n if r.type in protein3to1 or r.type in nucleic3to1 :\n rids[r.id.position] = r\n if minri == None or r.id.position < minri : minri = r.id.position\n if maxri == None or r.id.position > maxri : maxri = r.id.position\n\n\n ris = rids.keys()\n ris.sort()\n\n if maxri == None :\n return\n\n for ri in range ( minri, maxri+1 ) :\n if ri in rids :\n r = rids[ri]\n if r.type in protein3to1 :\n self.seq = self.seq + protein3to1[r.type]\n self.conf = self.conf + \"9\"\n predi = \"C\"\n if r.isSheet : predi = \"E\"\n if r.isHelix : predi = \"H\"\n self.pred = self.pred + predi\n self.seqRes.append ( r )\n self.seqRi.append ( ri )\n elif r.type in nucleic3to1 :\n self.seq = self.seq + nucleic3to1[r.type]\n self.conf = self.conf + \"9\"\n self.predi = \"C\"\n self.pred = self.pred + self.predi\n self.seqRes.append ( r )\n self.seqRi.append ( ri )\n else :\n if self.showGaps.get() :\n self.seq = self.seq + \".\"\n self.conf = self.conf + \"9\"\n self.pred = self.pred + \"C\"\n self.seqRes.append ( None )\n self.seqRi.append ( ri )\n\n\n\n\n\n def SSE ( self ) :\n\n print \"sse\"\n #self.GetFromMol ( mod, chainId )\n\n\n def CurRes ( self ) :\n\n #self.GetFromMol ( mod, chainId )\n\n if self.cur_mol == None :\n umsg ( \"No selected molecule\" )\n return []\n\n if self.cur_dmap == None :\n umsg ( \"No selected map\" )\n return []\n\n if len ( self.chain.get() ) == 0 :\n umsg ( \"No selected chain\" )\n return []\n\n from chimera.resCode import protein3to1\n protein3to1['HSD'] = protein3to1['HIS']\n\n rids = {}\n for r in self.cur_mol.residues :\n if r.id.chainId == self.chain.get() :\n if r.type in protein3to1 :\n rids[r.id.position] = r\n\n print \" - %d residues\" % len(rids.values())\n #return [ rids[6] ]\n return rids.values ()\n\n\n\n def CalcZScores ( self ) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n self.scores2 = [None] * len(self.seqRes)\n scoreI = 0\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n status ( \"Getting secondary structure elements...\" )\n\n resolution = 3.0 * self.cur_dmap.data.step[0]\n #resolution = 3.0\n umsg ( \"Calculating backbone Z-scores...\" )\n\n zscores2 = []\n\n if 0 : # old\n sses = SSEs ( self.seqRes )\n #print \" - \",len(sses),\"sse for \", len(ress), \"res\"\n\n atI = 1\n\n for el in sses :\n si, ei, ss, elRess = el\n\n if atI % 10 == 0 :\n status ( \"BB scores: %d/%d\" % (atI,len(sses) ) )\n atI += 1\n\n #if 1 or (startRes < 129 and endRes > 129) :\n startResI, endResI, sseType, ress = el\n #print \" : %d-%d, %s, %d res\" % (startResI, endResI, sseType, len(ress))\n\n zscore, ccs = zBB ( self.cur_mol, ress, resolution, self.cur_dmap )\n #print ss, si, \"-\", ei, zscore\n if zscore != None :\n zscores2.append ( zscore )\n\n for r in elRess :\n r.bbZ = zscore\n self.scores2[scoreI] = zscore\n scoreI += 1\n\n else :\n\n bbs = BBsegs ( self.seqRes )\n W = 5\n atRes = 0\n\n for bb in bbs :\n print \"%d res, %d-%d\" % (len(bb),bb[0].id.position,bb[-1].id.position)\n\n for ri, r in enumerate ( bb ) :\n firstRi = max ( 0, ri-(W-1)/2 )\n lastRi = min ( len(bb)-1, ri+(W-1)/2 )\n ress = bb[firstRi:lastRi+1]\n zscore, ccs = zBB ( self.cur_mol, ress, resolution, self.cur_dmap )\n\n #print \" %d : %d - %d, %.3f\" % (ri, firstRi, lastRi, zscore)\n if atRes % 50 == 0 :\n status ( \"Backbone - residue %d/%d\" % (atRes,len(self.seqRes) ) )\n #print \"%d/%d\" % (atRes,len(self.seqRes))\n print \".\"\n\n atRes += 1\n\n if zscore != None :\n zscores2.append ( zscore )\n\n r.bbZ = zscore\n r.CCS = ccs\n r.bbQ = zscore\n self.scores2[scoreI] = zscore\n scoreI += 1\n\n\n #print zscores2\n\n print \" - %d res, min %.2f max %.2f, avg %.2f\" % (len(ress), min(zscores2), max(zscores2), numpy.average(zscores2) )\n self.avgScore2 = numpy.average ( zscores2 )\n\n doRes = []\n\n doAllResInMol = False\n\n if doAllResInMol :\n for res in self.cur_mol.residues :\n if \"CA\" in res.atomsMap and \"N\" in res.atomsMap and \"C\" in res.atomsMap :\n doRes.append ( res )\n\n print \"++++ added all %d res from %s ++++\" % (len(doRes), self.cur_mol.name)\n\n else :\n for r in self.seqRes :\n try :\n blah\n ra = r.scZ\n except :\n doRes.append ( r )\n\n\n\n #doRes = self.seqRes\n #doRes = self.CurRes()\n print \" - need score for %d res\" % len(doRes)\n\n umsg ( \"Calculating Side Chains / Bases Z-scores...\" )\n\n sczScores = []\n if len(doRes) > 0 :\n sczScores = CalcRotaZ ( self.cur_dmap, self.cur_mol, doRes )\n #avgA, stdA = numpy.average ( A ), numpy.std ( A )\n #umsg ( \"Avg side chain Z-score: %.3f\" % ( avgA ) )\n\n if not doAllResInMol :\n doRes = self.seqRes\n\n self.scores = [None] * len(doRes)\n for ri, r in enumerate ( doRes ) :\n self.scores[ri] = r.scZ\n\n scores = [x for x in self.scores if x is not None]\n\n self.minScore = min ( scores )\n self.maxScore = max ( scores )\n self.avgScore = numpy.average ( scores )\n\n print \" - %d res, min %.2f max %.2f, avg %.2f\" % (len(doRes),self.minScore,self.maxScore, self.avgScore)\n\n self.minScore1, self.maxScore1 = 0,2\n self.minScore2, self.maxScore2 = 0,4\n\n bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334\n scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261\u000b\n #scRes = (self.avgScore2 - 3.507) / -0.721\n #bbRes = (self.avgScore - 6.1234) / -0.9191\n\n umsg ( \"Average BB Z-score: %.2f (%.1fA), Average Side Chain Z-score: %.2f (%.1fA)\" % (self.avgScore2, bbRes, self.avgScore, scRes) )\n\n self.UpdateSeq ()\n\n\n\n sByType = {}\n rByType = {}\n for r in doRes :\n if r.scZ != None :\n if not r.type in sByType :\n rByType[r.type] = []\n sByType[r.type] = []\n rByType[r.type].append ( [r.scZ, r] )\n sByType[r.type].append ( [r.scZ] )\n\n avgs = []\n for rtype, ra in sByType.iteritems () :\n avgs.append ( [numpy.average (ra), rtype] )\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n avgs.sort ( reverse=True, key=lambda x: x[0] )\n\n\n #mpath, mname = os.path.split ( dmap.data.path )\n dname, dext = os.path.splitext ( self.cur_dmap.data.path )\n #mfname = os.path.split ( self.cur_mol.openedAs[0] )[-1]\n #mname, mext = os.path.splitext ( mfname )\n\n avgm, numt = {}, {}\n for avgScore, rtype in avgs :\n\n rscores = rByType[rtype]\n rscores.sort ( reverse=True, key=lambda x: x[0] )\n hr = rscores[0]\n R = hr[1]\n highestScore = hr[0]\n numRes = len(rscores)\n\n if R.isProt :\n print \"%s\\t%s\\t%d\\t%f\\t%d\\t.%s\\t%f\" % (rtype, protein3to1[rtype], numRes, avgScore, R.id.position, R.id.chainId, highestScore)\n else :\n print \"%s\\t%s\\t%d\\t%f\\t%d\\t.%s\\t%f\" % (rtype, nucleic3to1[rtype], numRes, avgScore, R.id.position, R.id.chainId, highestScore)\n\n avgm[rtype] = avgScore\n numt[rtype] = numRes\n\n\n ofname = \"%s__%s__scz_rtype.txt\" % (dname, self.cur_mol.name)\n print \" -> \", ofname\n fp = open ( ofname, \"w\" )\n\n for rt in [\"PHE\", \"PRO\", \"ILE\", \"LEU\", \"VAL\"] : # , \"GLY\", , \"ALA\"\n fp.write ( \"%s\\t%d\\t%f\\n\" % (rt, numt[rt], avgm[rt]) )\n\n for rt in [\"MET\"] :\n fp.write ( \"%s\\t%d\\t%f\\n\" % (rt, numt[rt], avgm[rt]) )\n\n for rt in [\"HIS\", \"ARG\", \"LYS\", \"TRP\", \"CYS\"] : #\n try :\n fp.write ( \"%s\\t%d\\t%f\\n\" % (rt, numt[rt], avgm[rt]) )\n except :\n print \" - no %s\" % rt\n\n for rt in [\"GLN\", \"ASN\", \"THR\"] :\n fp.write ( \"%s\\t%d\\t%f\\n\" % (rt, numt[rt], avgm[rt]) )\n\n for rt in [\"TYR\", \"GLU\", \"ASP\", \"SER\"] :\n fp.write ( \"%s\\t%d\\t%f\\n\" % (rt, numt[rt], avgm[rt]) )\n\n fp.close()\n\n\n\n\n\n def CalcAllR (self) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n cid = self.chain.get()\n\n CalcSCBBr ( self.cur_mol, cid, self.cur_dmap )\n\n\n self.scores, self.scores2 = [], []\n scBB, scSC = [], []\n\n for r in self.cur_mol.residues :\n if cid == None or r.id.chainId == cid :\n self.scores2.append ( r.SCBBr )\n self.scores.append ( r.SCBBr )\n r.scZ = r.SCBBr\n r.bbZ = r.SCBBr\n if r.SCBBr != None :\n scBB.append ( r.SCBBr )\n if r.SCBBr != None :\n scSC.append ( r.SCBBr )\n\n\n scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)\n bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)\n\n\n print \"Average R sc : %.2f - %.2f, avg %.2f\" % (scMin, scMax, scAvg)\n print \"Average R bb : %.2f - %.2f, avg %.2f\" % (bbMin, bbMax, bbAvg)\n\n\n\n self.minSCscore, self.maxSCscore = 0.0,1\n self.minBBscore, self.maxBBscore = 0.0,1\n\n self.UpdateSeq ()\n\n\n\n def CalcAllSigma (self) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n\n cid = self.chain.get()\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n\n scBB, scSC = [], []\n\n for r in self.cur_mol.residues :\n if cid == None or r.id.chainId == cid :\n r.scores2 = r.bbZ\n r.scores1 = r.scZ\n if r.bbZ != None : scBB.append ( r.bbZ )\n if r.scZ != None : scSC.append ( r.scZ )\n\n\n #bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334\n #scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261\u000b\n #scRes = (self.avgScore2 - 3.507) / -0.721\n #bbRes = (self.avgScore - 6.1234) / -0.9191\n\n scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)\n bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)\n\n\n print \"Average Sigma sc : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f\" % (scMin, scMax, scAvg, 1.0/scMin, 1.0/scMax, 1.0/scAvg)\n print \"Average Sigma bb : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f\" % (bbMin, bbMax, bbAvg, 1.0/bbMin, 1.0/bbMax, 1.0/bbAvg)\n\n\n self.minScore1, self.maxScore1 = 0.0,0.5\n self.minScore2, self.maxScore2 = 0.0,0.2\n\n self.UpdateSeq ()\n\n\n\n\n def CalcAllQ (self) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n #return\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n cid = self.chain.get()\n\n umsg ( \"Calculating Q-scores - see bottom of main window for status or to cancel...\" )\n\n sigma = float(self.sigma.get())\n\n Qavg = qscores.CalcQ (self.cur_mol, self.chain.get(), self.cur_dmap, sigma, log=True )\n qscores.SaveQStats ( self.cur_mol, self.chain.get(), self.cur_dmap, sigma, float(self.mapRes.get()) )\n self.ShowQScores ()\n\n #umsg ( \"Average Q-score for %s: %.2f\" % (self.cur_mol.name, Qavg) )\n umsg ( \"Done Q-scores for %s\" % (self.cur_mol.name) )\n\n\n\n def CalcAllQp (self, numProc=None) :\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n cid = self.chain.get()\n\n if cid == \"All\" :\n cid = None\n\n #ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n #points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n #print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n #allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n if 0 :\n for r in self.cur_mol.residues :\n if hasattr ( r, 'Q' ) : del r.Q\n if hasattr ( r, 'scQ' ) : del r.scQ\n if hasattr ( r, 'bbQ' ) : del r.bbQ\n\n if len(self.cur_dmap.data.path) == 0 :\n umsg ( \"No file for map - %s - must be saved first...\" % (self.cur_dmap.name) )\n return\n\n if not os.path.isfile ( self.cur_dmap.data.path ) :\n umsg ( \"Map file not found - %s - must be saved first...\" % (self.cur_dmap.data.path) )\n return\n\n if not hasattr (self.cur_mol, 'openedAs') :\n umsg ( \"No file for model %s - must be saved first...\" % (self.cur_mol.name) )\n return\n\n if not os.path.isfile ( self.cur_mol.openedAs[0] ) :\n umsg ( \"Model file not found - %s - must be saved first...\" % (self.cur_mol.openedAs[0]) )\n return\n\n sigma = float(self.sigma.get())\n\n qscores.CalcQp (self.cur_mol, cid, self.cur_dmap, sigma, numProc=numProc )\n qscores.SaveQStats ( self.cur_mol, self.chain.get(), self.cur_dmap, sigma, float(self.mapRes.get()) )\n\n self.ShowQScores ()\n\n\n\n\n def ShowQScores (self) :\n\n cid = self.chain.get()\n scBB, scSC = [], []\n for r in self.cur_mol.residues :\n #if cid == None or cid == \"All\" or r.id.chainId == cid :\n if r.id.chainId == cid :\n qscores.CalcResQ ( r )\n if r.isProt or r.isNA :\n r.score1 = r.scQ\n r.score2 = r.bbQ\n if r.scQ != None : scSC.append ( r.scQ )\n if r.bbQ != None : scBB.append ( r.bbQ )\n else :\n r.score1 = r.Q\n r.score2 = r.Q\n\n\n #bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334\n #scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261\u000b\n #scRes = (self.avgScore2 - 3.507) / -0.721\n #bbRes = (self.avgScore - 6.1234) / -0.9191\n\n\n #try :\n if len(scSC) > 0 and len(scBB) > 0 :\n scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)\n bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)\n print \"Average Q sc : %.2f - %.2f, avg %.2f\" % (scMin, scMax, scAvg )\n print \"Average Q bb : %.2f - %.2f, avg %.2f\" % (bbMin, bbMax, bbAvg )\n self.GetMaxScores()\n\n #except :\n # pass\n\n self.UpdateSeq ()\n\n\n\n def QuickQ (self) :\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n cid = self.chain.get()\n\n if cid == \"All\" :\n cid = None\n\n #ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n #points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n #print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n #allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n if 0 :\n for r in self.cur_mol.residues :\n if hasattr ( r, 'Q' ) : del r.Q\n if hasattr ( r, 'scQ' ) : del r.scQ\n if hasattr ( r, 'bbQ' ) : del r.bbQ\n\n\n sigma = float(self.sigma.get())\n CalcQp (self.cur_mol, cid, self.cur_dmap, sigma)\n\n\n\n scBB, scSC = [], []\n\n for r in self.cur_mol.residues :\n if cid == None or r.id.chainId == cid :\n if r.isProt or r.isNA :\n r.score1 = r.scQ\n r.score2 = r.bbQ\n if r.bbQ != None : scBB.append ( r.bbQ )\n if r.scQ != None : scSC.append ( r.scQ )\n else :\n r.score1 = r.Q\n r.score2 = r.Q\n\n scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)\n bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)\n\n\n print \" - Average Q sc : %.2f - %.2f, avg %.2f\" % (scMin, scMax, scAvg )\n print \" - Average Q bb : %.2f - %.2f, avg %.2f\" % (bbMin, bbMax, bbAvg )\n\n\n self.minScore1, self.maxScore1 = 0.0,1\n self.minScore2, self.maxScore2 = 0.0,1\n\n\n sigma = float(self.sigma.get())\n self.UpdateSeq ()\n qscores.SaveQStats ( self.cur_mol, self.chain.get(), self.cur_dmap, sigma, float(self.mapRes.get()) )\n\n\n\n\n\n def GetQsFromFile (self) :\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n chainId = self.chain.get()\n umsg ( \"Loading Q-scores for chain %s...\" % chainId )\n\n molPath, molExt = os.path.splitext(self.cur_mol.openedAs[0])\n\n if molExt == \".pdb\" or molExt == \".ent\" :\n mapName = os.path.splitext(self.cur_dmap.name)[0]\n nname = molPath + \"__Q__\" + mapName + \".pdb\"\n if not os.path.isfile ( nname ) :\n print nname\n umsg ( \"Q scores not found for this map and file\" )\n return\n qscores.QsFromPdbFile ( self.cur_mol, nname )\n else :\n mapName = os.path.splitext(self.cur_dmap.name)[0]\n nname = molPath + \"__Q__\" + mapName + \".cif\"\n if not os.path.isfile ( nname ) :\n print nname\n umsg ( \"Q scores not found for this map and file\" )\n return\n qscores.QsFromCifFile ( self.cur_mol, nname )\n\n if 0 :\n umsg ( \"Saving files with Q-score B-factor\" )\n self.SaveQsBfs ( self.cur_mol, 50.0 )\n self.SaveQsBfs ( self.cur_mol, 100.0 )\n self.SaveQsBfs ( self.cur_mol, 150.0 )\n self.SaveQsBfs ( self.cur_mol, 200.0 )\n self.SaveQsBfs ( self.cur_mol, 300.0 )\n\n umsg ( \"Saving stats files for chain %s...\" % chainId )\n sigma = float(self.sigma.get())\n qscores.SaveQStats ( self.cur_mol, chainId, self.cur_dmap, sigma, float(self.mapRes.get()) )\n\n #qscores.QStatsProt ( self.cur_mol, self.cur_dmap, chainId )\n qscores.QStatsRNA ( self.cur_mol, self.cur_dmap, chainId )\n #qscores.QStats1 (self.cur_mol, chainId)\n\n umsg ( \"Showing Q-scores for chain %s\" % chainId )\n self.ShowQScores ()\n\n\n def SaveQsBfs ( self, mol, f ) :\n\n for at in mol.atoms :\n if not at.element.name == \"H\" :\n at.bfactor = f * (1.0 - at.Q)\n\n bondedAts = {}\n for b in mol.bonds :\n bondedAts[b.atoms[0]] = b.atoms[1]\n bondedAts[b.atoms[1]] = b.atoms[0]\n\n for at in mol.atoms :\n if at.element.name == \"H\" :\n bat = bondedAts[at]\n at.bfactor = bat.bfactor\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n\n nname = molPath + \"__Bf%.0f__.pdb\" % f\n print \" - saving %s\" % nname\n chimera.PDBio().writePDBfile ( [mol], nname )\n\n\n\n def SA_Q (self) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n\n chainId = self.chain.get()\n\n\n\n #ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n #points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n #print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n #allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n umsg ( \"Solvent Accessibility vs. Q... making surface for %d atoms...\" % len(self.cur_mol.atoms) )\n print \".\",\n\n\n # https://en.m.wikipedia.org/wiki/Van_der_Waals_radius\n vdwRadii = { 'H' : 1.2, # (1.09)[1]\n 'C' : 1.7,\n 'N' : 1.55,\n 'O' : 1.52,\n 'F' : 1.47,\n 'P' : 1.8,\n 'S' : 1.8 }\n\n #vdwRadii = { 'H' : 1.5, 'C' : 1.5, 'N' : 1.5, 'O' : 1.5, 'F' : 1.5, 'P' : 1.5, 'S' : 1.5 }\n\n\n\n if GetMod ( \"Surface Pts\" ) : chimera.openModels.close ( [GetMod ( \"Surface Pts\" )] )\n if GetMod ( \"SA pts\" ) : chimera.openModels.close ( [GetMod ( \"SA pts\" )] )\n if GetMod ( \"SA- pts\" ) : chimera.openModels.close ( [GetMod ( \"SA pts\" )] )\n if GetMod ( \"ASP pts\" ) : chimera.openModels.close ( [GetMod ( \"SA pts\" )] )\n\n\n surfPts = []\n for at in self.cur_mol.atoms :\n VWR = vdwRadii[at.element.name] if at.element.name in vdwRadii else 1.55\n apts = SpherePts ( at.coord(), VWR, 100 )\n #apts.extend ( SpherePts ( at.coord(), VWR/2.0, 50 ) )\n apts.append ( at.coord().data() )\n\n surfPts.extend ( apts )\n\n #AddSpherePts ( apts, atomColors2[at.element.name], 0.1, \"Surface Pts\" )\n #AddSpherePts ( apts, (.7,.7,.7,1), 0.1, \"Surface Pts\" )\n\n\n umsg ( \"Solvent Accessibility vs. Q... making tree for %d atoms, %d points...\" % (len(self.cur_mol.atoms), len(surfPts) ) )\n print \".\",\n\n surfPtsTree = AdaptiveTree ( surfPts, surfPts, 2.0 )\n\n\n #AddSpherePts ( surfPts, atomColors2[at.element.name], 0.1, \"Surface Pts\" )\n\n\n\n molPath = os.path.splitext(self.cur_mol.openedAs[0])[0]\n mapName = os.path.splitext(self.cur_dmap.name)[0]\n\n nname = molPath + \"__SA-Q__\" + mapName + \".txt\"\n fp = open ( nname, \"w\" )\n\n umsg ( \"Solvent Accessibility vs. Q ... saving to file %s\" % nname )\n print \".\",\n\n doRess = []\n for r in self.cur_mol.residues :\n if r.id.chainId == chainId :\n doRess.append ( r )\n\n print \" - calc for %d res...\" % len (doRess)\n waterRad = 1.4\n waterRad2 = 1.4*1.4\n\n rt_sa = {}\n\n for ri, r in enumerate ( doRess ) :\n\n if 0 or r.type == \"ASP\" :\n\n showPts = r.type == \"ASP\"\n\n if 1 or not hasattr ( r, 'SAArea' ) :\n\n numPtsOnSAS, tryPts = 0.0, 300.0\n for at in r.scAtoms :\n VWR = vdwRadii[at.element.name] if at.element.name in vdwRadii else 1.55\n outPts = SpherePts ( at.coord(), VWR + waterRad, int(tryPts) )\n #AddSpherePts ( outPts, (.9,.9,.2,1.0), 0.1, \"ASP pts\" )\n for pt in outPts :\n vPt = [pt[0], pt[1], pt[2]]; apt = numpy.array ( vPt )\n opointsNear = surfPtsTree.searchTree ( vPt, waterRad )\n onSurf = True\n for npt in opointsNear :\n v = apt - npt; r2 = numpy.sum ( v * v )\n if r2 < waterRad2 :\n onSurf = False; break\n if onSurf :\n numPtsOnSAS += 1.0\n if showPts :\n v = chimera.Point(pt[0], pt[1], pt[2]) - at.coord(); v.normalize()\n pt = at.coord() + v * vdwRadii[at.element.name]\n AddSpherePts ( [pt.data()], (.9,.2,.9,1.0), 0.1, \"SA pts\" )\n #AddSpherePts ( [vPt], (.2,.9,.9,0.8), 0.11, \"SA- pts\" )\n\n r.SAArea = 4 * numpy.pi * numpy.power ( vdwRadii[at.element.name], 2.0 ) * numPtsOnSAS / tryPts\n\n if hasattr (r, 'scQ') and r.scQ != None :\n fp.write ( \"%s\\t%d\\t%f\\t%f\\n\" % (r.type, r.id.position, r.scQ, r.SAArea) )\n elif hasattr (r, 'Q') and r.Q != None :\n fp.write ( \"%s\\t%d\\t%f\\t%f\\n\" % (r.type, r.id.position, r.Q, r.SAArea) )\n\n if r.type in rt_sa :\n rt_sa[r.type] += r.SAArea\n else :\n rt_sa[r.type] = r.SAArea\n if ri % 10 == 0 :\n umsg ( \"SA - res %d/%d\" % (ri, len(doRess)) )\n\n fp.close()\n umsg ( \"Solvent Accessibility vs. Q ... saved to file %s ... done\" % nname )\n\n #nname = molPath + \"__SAa-Q__\" + mapName + \".txt\"\n #fp = open ( nname, \"w\" )\n\n print \"SA area by rtype:\"\n totalSA = 0.0\n for rt, saa in rt_sa.iteritems () :\n print \"%s\\t%f\" % (rt, saa)\n totalSA += saa\n\n print \" - total SA:%f\" % totalSA\n\n print \"SA area / total area by rtype:\"\n for rt, saa in rt_sa.iteritems () :\n print \"%s\\t%f\" % (rt, saa/totalSA)\n\n\n\n\n\n def CalcAllRadZ (self) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n cid = self.chain.get()\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n #allAtTree = None\n\n\n CalcRadZ ( self.cur_mol, cid, self.cur_dmap, allAtTree, useOld=False, log=True )\n\n self.scores, self.scores2 = [], []\n scBB, scSC = [], []\n\n for r in self.cur_mol.residues :\n if cid == None or r.id.chainId == cid :\n self.scores2.append ( r.bbZ )\n self.scores.append ( r.scZ )\n if r.bbZ != None :\n scBB.append ( r.bbZ )\n if r.scZ != None :\n scSC.append ( r.scZ )\n\n\n #bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334\n #scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261\u000b\n #scRes = (self.avgScore2 - 3.507) / -0.721\n #bbRes = (self.avgScore - 6.1234) / -0.9191\n\n scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)\n bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)\n\n print \"Average RadZ sc : %.2f - %.2f, avg %.2f\" % (scMin, scMax, scAvg)\n print \"Average RadZ bb : %.2f - %.2f, avg %.2f\" % (bbMin, bbMax, bbAvg)\n\n umsg ( \"Average Side Chain: %.2f, Backbone: %.2f\" % (scAvg, bbAvg) )\n\n\n self.minSCscore, self.maxSCscore = 0.0,4\n self.minBBscore, self.maxBBscore = 0.0,4\n\n self.UpdateSeq ()\n\n\n\n\n def CalcAllRotaZ (self) :\n\n ress = []\n try :\n ress = self.seqRes\n except :\n pass\n\n if len ( ress ) == 0 :\n umsg ( \"No molecule/chain selected?\" )\n return\n\n if self.cur_dmap == None :\n status ( \"Select or open a map...\" )\n return\n\n if self.cur_mol == None :\n status ( \"Select or open a model...\" )\n return\n\n cid = self.chain.get()\n\n self.scores, self.scores2 = [], []\n scBB, scSC = [], []\n\n print \"...\"\n\n #CalcRotaZ ( self.cur_dmap, self.cur_mol, self.cur_mol.residues )\n\n\n for r in self.cur_mol.residues :\n\n\n\n for at in r.atoms :\n if not hasattr ( at, 'isBB' ) :\n print \" - noBB - atom %s, res %d.%s, chain %s\" % (at.name, at.residue.id.position, at.residue.type, at.residue.id.chainId)\n\n if cid == None or r.id.chainId == cid :\n self.scores2.append ( r.bbZ )\n self.scores.append ( r.scZ )\n if r.bbS != None :\n scBB.append ( r.bbZ )\n if r.scS != None :\n scSC.append ( r.scZ )\n\n\n #bbRes = numpy.power ( numpy.e, (self.avgScore2 - 8.0334) / -4.128 ) # y = -4.128ln(x) + 8.0334\n #scRes = numpy.power ( numpy.e, (self.avgScore - 4.8261) / -3.097 ) # y = -3.097ln(x) + 4.8261\u000b\n #scRes = (self.avgScore2 - 3.507) / -0.721\n #bbRes = (self.avgScore - 6.1234) / -0.9191\n\n scMin, scMax, scAvg = min(scSC), max(scSC), numpy.average(scSC)\n bbMin, bbMax, bbAvg = min(scBB), max(scBB), numpy.average(scBB)\n\n\n umsg ( \"Average Sigma sc : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f\" % (scMin, scMax, scAvg, 1.0/scMin, 1.0/scMax, 1.0/scAvg) )\n umsg ( \"Average Sigma bb : %.2f - %.2f, avg %.2f | %.2f - %.2f, avg %.2f\" % (bbMin, bbMax, bbAvg, 1.0/bbMin, 1.0/bbMax, 1.0/bbAvg) )\n\n\n self.minSCscore, self.maxSCscore = 0.0,2.0\n self.minBBscore, self.maxBBscore = 0.0,2.0\n\n self.UpdateSeq ()\n\n\n\n\n def RtypeOut ( self, avgScore, rtype, rByType, fout ) :\n pass\n\n\n\n\n def UpdateSeqFont ( self ) :\n # http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back\n\n if not hasattr ( self, 'seq' ) :\n print \" - update seq font - no seq\"\n return\n\n #print \"seq len %d, text w %d\" % ( len(self.seq), self.tw )\n\n # boxes for BBs\n x_at = self.seqX\n y_at = self.seqY + self.seqH/2\n\n y0 = self.seqY+5\n y1 = self.seqY+self.seqH-5\n\n for si in range ( len(self.seq) ) :\n res = self.seq[si]\n pred = self.pred[si]\n conf = float ( self.conf[si] ) / 10.0\n\n if pred == 'E' :\n x0 = self.seqX + si * self.tw\n x1 = x0 + self.tw\n #self.Canvas.coords ( self.seqMouseR, x0, y0, x1, y1 )\n #self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.NORMAL )\n\n if self.seqSheetR[si] == None :\n c = self.sheetBaseClr + self.sheetClrD * conf\n clr = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n self.seqSheetR[si] = self.Canvas.create_rectangle(x0, y0, x1, y1, outline=clr, fill=clr)\n else :\n self.Canvas.coords ( self.seqSheetR[si], x0, y0, x1, y1 )\n\n elif pred == 'H' :\n x0 = self.seqX + si * self.tw\n x1 = x0 + self.tw\n\n if self.seqHelixR[si] == None :\n c = self.helixBaseClr + self.helixClrD * conf\n clr = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n self.seqHelixR[si] = self.Canvas.create_rectangle(x0, y0, x1, y1, outline=clr, fill=clr)\n else :\n self.Canvas.coords ( self.seqHelixR[si], x0, y0, x1, y1 )\n\n\n\n # box showing selected Residue\n if hasattr ( self, 'seqMouseR' ) :\n self.Canvas.coords ( self.seqMouseR, 0, 0, 0, 0 )\n else :\n self.seqMouseR = self.Canvas.create_rectangle(0, 0, 0, 0, outline=\"#aab\", fill=\"#bbc\", state=Tkinter.HIDDEN)\n\n\n\n x_at = self.seqX\n y_at = self.seqY + self.seqH/2\n\n if hasattr ( self, 'seqText' ) and self.seqText != None :\n #self.Canvas.coords ( self.seqText, x_at, y_at )\n #self.Canvas.itemconfigure ( self.seqText, font=self.font )\n #print \" - has seq?\"\n self.Canvas.delete ( self.seqText )\n #self.seqText = None\n #del self.seqText\n\n self.seqText = self.Canvas.create_text( x_at, y_at, text=self.seq, font=self.font, anchor='w')\n print \" - created seq text - font\"\n\n\n #self.UpdateSeqSel ()\n\n\n def GetMaxScores ( self ) :\n\n try :\n RES = float(self.mapRes.get())\n except :\n umsg ( \"Please enter a numeric value for Resolution in Options\" )\n self.minScore1, self.maxScore1 = 0.0,1.0\n self.minScore2, self.maxScore2 = 0.0,1.0\n\n avgQrna = -0.1574 * RES + 1.0673 # rna\n avgQprot = -0.1794 * RES + 1.1244 # protein\n avgQIon = -0.1103 * RES + 1.0795 # ion\n avgQWater = -0.0895 * RES + 1.0001 # water\n\n print \" - res %.2f - exp Q-score: %.2f\" % (RES, avgQprot)\n\n self.minScore1, self.maxScore1 = 0.0,avgQprot\n self.minScore2, self.maxScore2 = 0.0,avgQprot\n\n\n\n def UpdateSeq ( self ) :\n\n if not hasattr ( self, 'seq' ) :\n print \" - update seq - no seq\"\n return\n\n if not hasattr ( self, 'maxScore1' ) :\n self.GetMaxScores ()\n\n x_at = self.seqX\n y_at = self.seqY + self.seqH/2\n\n if hasattr ( self, 'seqText' ) and self.seqText != None :\n self.Canvas.coords ( self.seqText, x_at, y_at )\n #else :\n # self.seqText = self.Canvas.create_text( x_at, y_at, text=self.seq, font=self.font, anchor='w')\n # print \" - created seq text\"\n\n if 1 :\n y0 = self.seqY+5\n y1 = self.seqY+self.seqH-5\n\n cH = numpy.array( [50,250,50] )\n cL = numpy.array( [250,50,50] )\n\n for si in range ( len(self.seq) ) :\n #if i >= len ( self.seqt ) :\n # t = self.Canvas.create_text( x_at, y_at, text=self.seq[i], font=self.font)\n # self.seqt.append ( t )\n #else :\n # t = self.seqt [ i ]\n # self.Canvas.coords ( t, x_at, y_at )\n # x_at += self.tw\n\n res = self.seqRes[si]\n\n pred = self.pred[si]\n if pred == 'E' :\n if self.seqSheetR[si] != None :\n x0 = self.seqX + si * self.tw\n x1 = x0 + self.tw\n self.Canvas.coords ( self.seqSheetR[si], x0, y0, x1, y1 )\n\n elif pred == 'H' :\n if self.seqHelixR[si] != None :\n x0 = self.seqX + si * self.tw\n x1 = x0 + self.tw\n self.Canvas.coords ( self.seqHelixR[si], x0, y0, x1, y1 )\n\n if res == None :\n continue\n\n\n if not hasattr(res, 'score1') or res.score1 == None :\n if self.seqScoreR[si] != None :\n self.Canvas.delete ( self.seqScoreR[si] )\n self.seqScoreR[si] = None\n else :\n xx0 = self.seqX + si * self.tw + 2\n xx1 = xx0 + self.tw - 2\n h = (res.score1 - self.minScore1) / (self.maxScore1 - self.minScore1)\n if h > 1 : h = 1\n if h < 0 : h = 0\n Y, H = self.modY, (self.modH/2 - 2)\n yy0, yy1 = numpy.ceil(Y+H - H*h), numpy.floor(Y+H)\n #c = self.helixBaseClr + self.helixClrD * conf\n c = h * cH + (1-h) * cL\n clr = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n if self.seqScoreR[si] != None :\n self.Canvas.coords ( self.seqScoreR[si], xx0, yy0, xx1, yy1 )\n self.Canvas.itemconfigure ( self.seqScoreR[si], outline=clr, fill=clr )\n else :\n self.seqScoreR[si] = self.Canvas.create_rectangle(xx0, yy0, xx1, yy1, outline=clr, fill=clr)\n\n if not hasattr(res, 'score2') or res.score2 == None :\n if self.seqScoreR2[si] != None :\n self.Canvas.delete ( self.seqScoreR2[si] )\n self.seqScoreR2[si] = None\n else :\n xx0 = self.seqX + si * self.tw + 2\n xx1 = xx0 + self.tw - 2\n h = (res.score2 - self.minScore2) / (self.maxScore2 - self.minScore2)\n if h > 1 : h = 1\n if h < 0 : h = 0\n Y, H = self.modY, self.modH/2\n #yy0, yy1 = Y+H, Y+H+H*h #upside down chart\n yy0, yy1 = numpy.ceil(Y+H+H-H*h), numpy.floor(Y+H+H)\n #c = self.helixBaseClr + self.helixClrD * conf\n c = h * cH + (1-h) * cL\n clr = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n if self.seqScoreR2[si] != None :\n self.Canvas.coords ( self.seqScoreR2[si], xx0, yy0, xx1, yy1 )\n self.Canvas.itemconfigure ( self.seqScoreR2[si], outline=clr, fill=clr )\n else :\n self.seqScoreR2[si] = self.Canvas.create_rectangle(xx0, yy0, xx1, yy1, outline=clr, fill=clr)\n\n\n self.UpdateSeqSel ()\n\n\n\n\n def SeqRec ( self, sel ) :\n y0 = self.seqY+5\n y1 = self.seqY+self.seqH-5\n\n x0 = self.seqX + sel[0] * self.tw\n x1 = self.seqX + (sel[1]+1) * self.tw\n\n return x0, y0, x1, y1\n\n\n def UpdateSeqSel ( self ) :\n\n if not hasattr ( self, 'seqSel' ) :\n return\n\n if self.seqSel == None :\n if hasattr(self, 'seqSelRect') :\n self.Canvas.delete ( self.seqSelRect )\n self.seqSelRect = None\n return\n\n x0, y0, x1, y1 = self.SeqRec ( self.seqSel )\n\n if hasattr(self, 'seqSelRect') and self.seqSelRect != None :\n self.Canvas.coords ( self.seqSelRect, x0, y0, x1, y1 )\n else :\n #c = self.helixBaseClr + self.helixClrD * conf\n #clr = \"#\" + struct.pack('BBB',c[0],c[1],c[2]).encode('hex')\n self.seqSelRect = self.Canvas.create_rectangle(x0, y0, x1, y1, outline=self.selColor, width=3)\n\n\n\n\n\n def B1_Down (self, event):\n self.drag = ''\n\n #print \"b1 _\", event.x, event.y\n if self.isInSeq ( event.x, event.y ) :\n self.drag = 'seq'\n self.last_x = event.x\n self.last_y = event.y\n\n\n def B1_Down_Ctrl ( self, event ) :\n #print \"b1 _ <ctrl>\", event.x, event.y\n self.drag = ''\n\n if self.isInSeq ( event.x, event.y ) :\n self.drag = 'seqSel'\n\n if hasattr ( self, 'seqSel' ) and self.seqSel != None :\n self.prevSeqSel = self.seqSel\n else :\n self.prevSeqSel = None\n\n #print \"sel seq...\"\n seqI = ( event.x - self.seqX ) / self.tw\n\n resStartI = seqI\n try :\n resStartI = self.seqRes[seqI].id.position\n except :\n pass\n\n status ( \"Start sequence sel at %d\" % resStartI )\n\n self.seqSel = [seqI, seqI]\n self.UpdateSeqSel ()\n\n self.last_x = event.x\n self.last_y = event.y\n\n\n def B1_Down_Shift ( self, event ) :\n print \"B1 down - shift\"\n\n self.drag = ''\n\n if self.isInSeq ( event.x, event.y ) :\n if hasattr ( self, 'seqSel' ) and self.seqSel != None :\n seqI = ( event.x - self.seqX ) / self.tw\n if seqI >= self.seqSel[0] and seqI <= self.seqSel[1] :\n self.drag = \"con\"\n if not hasattr ( self, 'conLine' ) or self.conLine == None :\n self.conLine = self.Canvas.create_line( event.x, event.y, event.x, event.y, fill=\"red\", dash=(1, 1), width=2)\n status ( \"In selected sequence at %d\" % seqI )\n\n\n def B1_Down_Alt ( self, event ) :\n print \"B1 down - alt\"\n\n self.drag = ''\n\n if self.isInMod ( event.x, event.y ) :\n self.dragMod = self.SelectedMod ( event.x, event.y )\n if self.dragMod != None :\n if self.dragMod.type == \"Helix\" :\n self.drag = 'modRot'\n self.dragStartX = event.x\n\n\n\n def B1_Up_Ctrl ( self, event ) :\n #print \"b1 up - ctrl - \", event.x, event.y\n self.B1_Up ( event )\n\n\n def B1_Up_Shift ( self, event ) :\n #print \"b1 up - shift - \"\n self.B1_Up ( event )\n\n def B1_Up_Alt ( self, event ) :\n #print \"b1 up - alt - \"\n self.B1_Up ( event )\n\n\n def B1_Up (self, event) :\n #print \"b1 up - \", event.x, event.y\n\n if self.drag == 'seqSel' and hasattr ( self, 'seqSel' ) :\n status ( \"Selected: %d-%d\" % (self.seqSel[0], self.seqSel[1]) )\n\n if hasattr ( self, 'prevSeqSel' ) and self.prevSeqSel != None :\n if self.seqSel[0] == self.seqSel[1] :\n self.seqSel = None\n self.prevSeqSel = None\n self.UpdateSeqSel ()\n status ( \"Cleared sequence selection\" )\n chimera.selection.clearCurrent ()\n\n if self.seqSel != None :\n m, cid = self.cur_mol, self.chain.get()\n if m != None :\n\n #startI = self.seqRes [ max(self.seqSel[0],0) ].id.position\n #endI = self.seqRes [ min(self.seqSel[1],len(self.seqRes)-1) ].id.position\n\n startI = self.seqRi [ max(self.seqSel[0],0) ]\n endI = self.seqRi [ min(self.seqSel[1],len(self.seqRes)-1) ]\n\n selStr = \"#%d:%d-%d.%s\" % (m.id,startI,endI,cid)\n\n self.lastSelStr = selStr # \"%d-%d.%s\" % (startI,endI,cid)\n sel = chimera.selection.OSLSelection ( selStr )\n chimera.selection.setCurrent ( sel )\n\n if hasattr ( self, 'prevSel' ) and self.preserveSel.get () :\n for s in self.prevSel :\n print \" -s- adding to sel:\", s\n chimera.selection.mergeCurrent ( chimera.selection.EXTEND, chimera.selection.OSLSelection (s) )\n else :\n self.prevSel = []\n\n if self.preserveSel.get () :\n #self.prevSel.append ( \"%d-%d.%s\" % (startI,endI,cid) )\n self.prevSel.append ( selStr )\n print \" - added to selection list: \", selStr\n\n umsg ( \"Selected: \" + selStr )\n #chimera.selection.addCurrent ( sel )\n\n if self.selExtract.get () :\n self.ShowSel ()\n\n else :\n status ( \"no model visible\" )\n\n #else :\n # print \"cleared past sel\"\n # self.prevSel = []\n\n\n elif self.drag == 'modSel' :\n status ( 'Selected %d mods' % len(self.selMods) )\n\n elif self.drag == 'con' :\n selMod = None\n if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :\n selMod = self.selModPiece\n self.selModPiece = None\n else :\n return\n\n if hasattr ( self, 'conLine' ) and self.conLine != None :\n self.Canvas.delete ( self.conLine )\n self.conLine = None\n\n status ( \"connected to %s\" % selMod.type )\n\n selMod.seq = self.seqSel\n selMod.numRes = (self.seqSel[1] - self.seqSel[0] + 1)\n selMod.MakeMod ()\n\n self.UpdateMod ()\n\n self.drag = ''\n #print \"mod: \", self.modX, \" seq:\", self.seqX\n\n\n def KeepBack ( self ) :\n\n if not hasattr ( self, 'prevSel' ) or self.prevSel == None :\n umsg ( \"Nothing selected previously... select something by Ctrl+Click+Drag on the sequence; this undoes the last selection, use Keep\" )\n return\n\n if hasattr ( self, 'prevSel' ) and len(self.prevSel) > 0 :\n self.prevSel.pop()\n\n chimera.selection.clearCurrent()\n\n for s in self.prevSel :\n print \" -s- adding to sel:\", s\n chimera.selection.mergeCurrent ( chimera.selection.EXTEND, chimera.selection.OSLSelection (s) )\n\n if self.selExtract.get () :\n self.ShowSel ()\n\n\n\n def SelReLoad ( self ) :\n\n if not hasattr ( self, 'prevSel' ) or self.prevSel == None :\n umsg ( \"Nothing selected previously... select something by Ctrl+Click+Drag on the sequence; this refreshes the selection\" )\n return\n\n for s in self.prevSel :\n print \" -s- adding to sel:\", s\n chimera.selection.mergeCurrent ( chimera.selection.EXTEND, chimera.selection.OSLSelection (s) )\n\n if self.selExtract.get () :\n self.ShowSel ()\n\n def SelLoad ( self ) :\n\n self.prevSel = []\n\n if 1 :\n self.prevSel.append ( \"#%d:735-735.A\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:796-796.A\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:799-799.A\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:137-137.L\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:108-108.C\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:789-789.A\" % self.cur_mol.id )\n\n elif 0 :\n self.prevSel.append ( \"#%d:41-41.A\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:7-7.A\" % self.cur_mol.id )\n #self.prevSel.append ( \"#%d:63-63.A\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:2-2.A\" % self.cur_mol.id )\n self.prevSel.append ( \"#%d:7-7.A\" % self.cur_mol.id )\n\n\n\n for s in self.prevSel :\n print \" -s- adding to sel:\", s\n chimera.selection.mergeCurrent ( chimera.selection.EXTEND, chimera.selection.OSLSelection (s) )\n\n if self.selExtract.get () :\n self.ShowSel ()\n\n\n\n\n\n def SelText ( self ) :\n\n self.prevSel = []\n\n print self.selText.get()\n\n ls = self.selText.get().split(\";\")\n\n for l in ls :\n #self.prevSel.append ( \"#%d:%s\" % (self.cur_mol.id,l) )\n self.prevSel.append ( \"%s\" % l )\n\n for s in self.prevSel :\n print \" -s- adding to sel:\", s\n chimera.selection.mergeCurrent ( chimera.selection.EXTEND, chimera.selection.OSLSelection (s) )\n\n if self.selExtract.get () :\n self.ShowSel ()\n\n\n fp = os.path.split ( self.cur_dmap.data.path )[0] + \"/_sel.txt\"\n found = False\n ls = []\n try :\n fo = open ( fp, \"r\" )\n for l in fo :\n if self.selText.get() in l :\n found = True\n fo.close()\n except :\n pass\n\n if found :\n print \" - found sel text\"\n else :\n fo = open ( fp, \"a\" )\n fo.write ( \"%s\\n\" % self.selText.get() )\n fo.close()\n\n\n\n\n\n def preserveSelCb (self) :\n print \"Preserve set to \", self.preserveSel.get()\n if self.preserveSel.get() :\n print \" - setting current selection to preserve...\"\n if hasattr ( self, 'lastSelStr' ) :\n self.prevSel = [ self.lastSelStr ]\n else :\n print \" - clearing current\"\n self.prevSel = []\n\n\n def preserveVolCb (self) :\n print \"Preserve vol set to \", self.preserveVol.get()\n\n\n #def keepExMapCb (self) :\n # print \"Kep ex map set to \", self.keepExMap.get()\n\n\n def ClearSel ( self ) :\n self.prevSel = []\n self.seqSel = None\n self.prevSeqSel = None\n self.UpdateSeqSel ()\n status ( \"Cleared sequence selection\" )\n chimera.selection.clearCurrent ()\n\n\n\n\n def ExCustA ( self ) :\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n #selStr = \"#%d:80-87.I,171-184.I,227-239.I,80-87.A,171-184.A,227-239.A,80-87.B,171-184.B,227-239.B,80-87.J,171-184.J,227-239.J,80-87.H,171-184.H,227-239.H\" % self.cur_mol.id\n selStr = \"#%d:80-87.I,171-184.I,227-239.I,80-87.A,171-184.A,227-239.A,80-87.J,171-184.J,227-239.J\" % self.cur_mol.id\n\n umsg ( \"Selected: \" + selStr )\n sel = chimera.selection.OSLSelection ( selStr )\n chimera.selection.setCurrent ( sel )\n self.ShowSel()\n\n\n def ExCustB ( self ) :\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n selStr = \"#%d:428-435.F,365-376.F,396-402.F,428-435.I,365-376.I,396-402.I\" % self.cur_mol.id\n #selStr = \"#%d:428-435.A,365-376.A,396-402.A,428-435.H,365-376.H,396-402.H\" % self.cur_mol.id\n\n\n umsg ( \"Selected: \" + selStr )\n sel = chimera.selection.OSLSelection ( selStr )\n chimera.selection.setCurrent ( sel )\n self.ShowSel()\n\n def ExCustC ( self ) :\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n #selStr = \"#%d:10:548-558.I,520-530.I,548-558.F,520-530.F\" % self.cur_mol.id\n selStr = \"#%d:428-435.F,365-376.F,396-402.F,428-435.I,365-376.I,396-402.I,548-558.I,520-530.I,548-558.F,520-530.F\" % self.cur_mol.id\n\n\n umsg ( \"Selected: \" + selStr )\n sel = chimera.selection.OSLSelection ( selStr )\n chimera.selection.setCurrent ( sel )\n self.ShowSel()\n\n\n def AdSel ( self ) :\n\n atoms = chimera.selection.currentAtoms ()\n\n R = float ( self.maskRad.get() )\n\n if len(atoms) > 0 :\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = True )\n COM, U, S, V = prAxes ( points )\n\n #atomRad = 2.0 # float ( self.maskWithSelDist.get() )\n print \" - %d selected atoms, mask at %.2f\" % ( len(atoms), R )\n dmap = self.cur_dmap\n\n label = \" %d_ats_%s.%d.%s mask\" % (len(atoms), atoms[0].name, atoms[0].residue.id.position, atoms[0].residue.id.chainId )\n\n if len ( atoms ) > 0 and dmap != None :\n #points = get_atom_coordinates ( atoms, transformed = False )\n self.PtsToMap ( points, dmap, R, dmap.name + label, False, alpha=0.2 if self.showMesh.get() else 0.4 )\n if self.showMesh.get () :\n self.PtsToMap ( points, dmap, R, dmap.name + label + \"_mesh\", True )\n\n\n def HideSel ( self ) :\n\n for r in chimera.selection.currentResidues() :\n r.ribbonDisplay = False\n for at in r.atoms :\n at.display = False\n\n\n def ShowSel ( self ) :\n\n #showRibbon = self.showRibbon.get()\n showRibbon = not self.showingAtoms # self.showRibbon.get()\n showLigands = self.showLigands.get()\n showSC = True # self.showAtoms.get()\n showH = self.showH.get()\n showW = self.showW.get()\n\n atoms = []\n scores = []\n selResM = {}\n\n if len ( chimera.selection.currentResidues () ) == 0 :\n umsg ( \"Nothing selected...\" )\n return\n\n for r in chimera.selection.currentResidues () :\n rid = \"%d.%s\" % (r.id.position, r.id.chainId)\n selResM [rid] = 1\n\n if self.cur_mol == None :\n return\n\n if 1 or not hasattr ( self.cur_mol, 'bbats' ) :\n SetBBAts(self.cur_mol)\n self.cur_mol.bbats = True\n\n\n atMap = {}\n for r in self.cur_mol.residues :\n rid = \"%d.%s\" % (r.id.position, r.id.chainId)\n if rid in selResM :\n\n if hasattr (r, 'scZ') and r.scZ != None :\n scores.append(r.scZ)\n\n r.ribbonDisplay = showRibbon\n\n for at in r.atoms :\n atMap[at] = 1\n if at.element.name == \"H\" :\n at.display = showH\n elif at.isSC :\n if showSC :\n at.display = True\n atoms.append ( at )\n else :\n at.display = False\n else :\n at.display = True\n atoms.append ( at )\n if at.element.name in atomColors :\n if at.element.name == \"H\" :\n continue\n if at.isBB :\n at.color = atomColors[at.element.name.upper()]\n #if at.element.name == \"C\" :\n # at.color = atomColors['Cbb']\n else :\n at.color = atomColors[at.element.name.upper()]\n\n else :\n r.ribbonDisplay = False\n for at in r.atoms :\n at.display = False\n\n\n atTree = None\n if showLigands :\n points = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(atoms), len(r.molecule.atoms) )\n atTree = AdaptiveTree ( points.tolist(), atoms, 2.0)\n from chimera.resCode import protein3to1\n\n ligAts = []\n for r in self.cur_mol.residues :\n rid = \"%d.%s\" % (r.id.position, r.id.chainId)\n\n #if r.type == \"MG\" or r.type == \"HOH\" :\n if not r.isProt and not r.isNA :\n if len ( self.AtsWithin (r.atoms, 4.0, atTree) ) > 0 :\n for at in r.atoms :\n at.display = True\n atMap[at] = 1\n\n at.radius = 1.46\n at.drawMode = at.EndCap if at.element.name.lower() == \"o\" else at.Ball\n\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n atoms.append ( at )\n ligAts.append ( at )\n else :\n for at in r.atoms :\n at.display = False\n\n #chimera.selection.clearCurrent ()\n print \" - added %d ligand atoms to sel\" % len(ligAts)\n chimera.selection.addCurrent ( ligAts )\n\n\n for bond in self.cur_mol.bonds :\n a1, a2 = bond.atoms\n if a1 in atMap and a2 in atMap :\n if showW :\n bond.drawMode = bond.Wire\n else :\n bond.drawMode = bond.Stick\n #bond.display = bond.Smart\n\n\n if len(atoms) > 0 :\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = True )\n COM, U, S, V = prAxes ( points )\n\n moveCam = 1\n if moveCam :\n p0 = numpy.array ( chimera.viewer.camera.center )\n p1 = numpy.array ( [ COM[0], COM[1], COM[2] ] )\n for i in range (10) :\n f = float(i) / 9.0\n f1, f2 = 2.0*f*f*f-3.0*f*f+1.0, 3*f*f-2*f*f*f\n P = p0 * f1 + p1 * f2\n chimera.viewer.camera.center = (P[0],P[1],P[2])\n print \".\",\n print \"\"\n\n\n atomRad = 2.5 # float ( self.maskWithSelDist.get() )\n print \" - %d selected atoms, mask at %.2f\" % ( len(atoms), atomRad )\n dmap = self.cur_dmap\n\n mlist = OML(modelTypes = [VolumeViewer.volume.Volume])\n\n at = 1\n for m in mlist :\n if \"sel_masked\" in m.name :\n mname = m.name.split()[0]\n if not hasattr (self, 'cLevels') :\n self.cLevels = {}\n if not \"_mesh\" in m.name :\n self.cLevels[mname] = m.surface_levels[0]\n if not self.preserveVol.get() :\n chimera.openModels.close ( [m] )\n else :\n m.name = m.name.replace ( \"sel_masked\", \"prev_masked\" )\n\n if len ( atoms ) > 0 and dmap != None :\n #points = get_atom_coordinates ( atoms, transformed = False )\n self.PtsToMap ( points, dmap, atomRad, dmap.name + \" sel_masked\", False )\n if self.showMesh.get () :\n self.PtsToMap ( points, dmap, atomRad, dmap.name + \" sel_masked_mesh\", True )\n\n if len(scores) > 0 :\n umsg ( \"%d residue scores, avg score %.1f\" % ( len(scores), numpy.average(scores) ) )\n\n else :\n umsg ( \"no atoms selected, try reselecting the model and chain...\" )\n\n\n def AtsWithin (self, ats, R, atTree) :\n\n nearAts = []\n R2 = R * R\n for at in ats :\n pt = at.coord()\n vPt = numpy.array ( pt.data() )\n opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )\n if len(opointsNear) > 0 :\n for p in opointsNear :\n try :\n v = vPt - p.coord().data()\n except :\n continue\n sqSum = numpy.sum ( v * v )\n if sqSum < R2 :\n nearAts.append (p)\n\n return nearAts\n\n\n def AtsWithinPt (self, pt, R, atTree) :\n\n nearAts = []\n R2 = R * R\n\n vPt = numpy.array ( [pt[0], pt[1], pt[2]] )\n opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )\n if len(opointsNear) > 0 :\n for p in opointsNear :\n try :\n v = vPt - p.coord().data()\n except :\n continue\n sqSum = numpy.sum ( v * v )\n if sqSum < R2 :\n nearAts.append ( [numpy.sqrt(sqSum), p] )\n\n return nearAts\n\n\n\n\n def PtsToMap0 ( self, points, dmap, atomRad, nname, neg = 1.0 ) :\n import _contour\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )\n\n gdata = VolumeData.Array_Grid_Data ( mdata.full_matrix()*neg, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = nname )\n nv = VolumeViewer.volume.volume_from_grid_data ( gdata )\n nv.name = nname\n dmap.display = False\n nv.region = ( nv.region[0], nv.region[1], [1,1,1] )\n nv.surface_levels[0] = dmap.surface_levels[0]\n ro = VolumeViewer.volume.Rendering_Options()\n #ro.smoothing_factor = .5\n #ro.smoothing_iterations = 20\n #ro.surface_smoothing = True\n nv.update_surface ( False, ro )\n for sp in nv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n sp.color = (0.7, 0.7, 0.7, 0.4)\n\n\n def PtsToMap ( self, points, dmap, atomRad, nname, showMesh = False, alpha=0.2 ) :\n\n #_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n #mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )\n\n import _contour\n points1 = numpy.copy ( points )\n _contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n points0 = numpy.copy ( points1 )\n _contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )\n\n bound = 5\n li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li,lj,lk, hi,hj,hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n #nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )\n\n O = dmap.data.origin\n #print \" - %s origin:\" % dmap.name, O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n #print \" - new map origin:\", nO\n\n nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n\n #print \" - fmap grid dim: \", numpy.shape ( fmap.full_matrix() )\n #print \" - new map grid dim: \", numpy.shape ( nmat )\n\n npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n _contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = dmap.interpolated_values ( npoints, chimera.Xform.identity() )\n #dvals = dmap.interpolated_values ( npoints, dmap.openState.xform.inverse() )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n #f_mat = fmap.data.full_matrix()\n #f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n #df_mat = df_mat * f_mask\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n #try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n #except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n #nv.openState.xform = dmap.openState.xform\n\n mdata = VolumeData.zone_masked_grid_data ( ndata, points0, atomRad )\n gdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), nO, nstep, dmap.data.cell_angles, name = nname )\n nv = VolumeViewer.volume.volume_from_grid_data ( gdata )\n nv.openState.xform = dmap.openState.xform\n\n nv.name = nname\n dmap.display = False\n nv.region = ( nv.region[0], nv.region[1], [1,1,1] )\n\n if hasattr (self, 'cLevels') and dmap.name in self.cLevels :\n print \"%s -- %.2f\" % (dmap.name, self.cLevels[dmap.name])\n nv.surface_levels[0] = self.cLevels[dmap.name]\n else :\n nv.surface_levels[0] = dmap.surface_levels[0]\n\n M = dmap.data.full_matrix()\n sdev, avg, thr = numpy.std(M), numpy.average(M), nv.surface_levels[0]\n\n M = dmap.data.full_matrix()\n lsdev, lavg = numpy.std(nmat), numpy.average(nmat)\n\n #print \"Avg: %.3f, sdev: %.3f, thr: %.4f [%.4f sdev above mean]\" % (avg, sdev, thr, (thr-avg)/sdev)\n sigmaGlobal = (thr-avg)/sdev\n sigmaLocal = (thr-lavg)/lsdev\n umsg ( \"Contour level: %.4f, %.2f/%.2f sigma above average global/local\" % (thr, sigmaGlobal, sigmaLocal) )\n #print sigmaGlobal, sigmaLocal\n\n\n ro = VolumeViewer.volume.Rendering_Options()\n ro.smoothing_factor = .3\n ro.smoothing_iterations = 2\n ro.surface_smoothing = False\n ro.square_mesh = True\n ro.line_thickness = 2\n nv.update_surface ( False, ro )\n setro (ro)\n for sp in nv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n if showMesh :\n sp.color = (.5, .5, .5, 1.0)\n sp.displayStyle = sp.Mesh\n else :\n sp.color = (0.7, 0.7, 0.7, alpha)\n\n return nv\n\n\n def B1_Drag (self, event):\n #print \"b1m \", event.x, event.y\n\n if self.drag == 'seq' :\n d = event.x - self.last_x\n self.seqX += d\n #GetSegMod().seqX = self.seqX\n self.UpdateSeq ()\n elif self.drag == 'mod' :\n d = event.x - self.last_x\n self.modX += d\n #GetSegMod().modX = self.modX\n self.UpdateMod ()\n elif self.drag == 'seqSel' :\n if hasattr ( self, 'seqSel' ):\n seqI = ( event.x - self.seqX ) / self.tw\n if seqI > self.seqSel[0] :\n self.seqSel[1] = seqI\n elif seqI < self.seqSel[1] :\n self.seqSel[0] = seqI\n\n resStartI = self.seqSel[0]+1\n resEndI = self.seqSel[1]+1\n try :\n resStartI = self.seqRes [ self.seqSel[0] ].id.position\n resEndI = self.seqRes [ self.seqSel[1] ].id.position\n except :\n pass\n status ( \"Sequence selected %d - %d\" % (resStartI,resEndI) )\n\n self.UpdateSeqSel ()\n elif self.drag == 'con' :\n x1, y1, x2, y2 = self.Canvas.coords ( self.conLine )\n self.Canvas.coords ( self.conLine, x1, y1, event.x, event.y )\n self.SelectedModClr ( event.x, event.y )\n elif self.drag == \"modRot\" :\n dx = event.x - self.dragStartX\n self.dragStartX = event.x\n self.dragMod.Rotate ( dx )\n\n\n\n self.last_x = event.x\n self.last_y = event.y\n\n\n def B2_Down (self, event):\n print \"b2 - down\"\n\n\n\n\n def B2_Up (self, event):\n print \"b2 - up\", event.x, event.y\n\n if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :\n\n if self.selModPiece.type == \"Loop\" :\n self.selModPiece.MakeMod ()\n\n else :\n self.selModPiece.switch = not self.selModPiece.switch\n self.selModPiece.MakeMod ()\n self.UpdateMod ()\n\n\n def B2_Up_Ctrl (self, event):\n print \"b2 - up - control\", event.x, event.y\n if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :\n if self.selModPiece.type == \"Loop\" :\n MakeLoopMod1 ( self.selModPiece )\n #MakeLoopMod ( self.selModPiece )\n\n\n\n def B2_Up_Alt (self, event):\n print \"b2 - up - alt\", event.x, event.y\n if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :\n if self.selModPiece.type == \"Loop\" :\n LoopPathOpt ( self.selModPiece, self.refUseMap.get() )\n\n\n def B2_Up_Shift (self, event):\n print \"b2 - up - alt\", event.x, event.y\n if hasattr ( self, 'selModPiece' ) and self.selModPiece != None :\n if self.selModPiece.type == \"Loop\" :\n LoopPathOpt ( self.selModPiece, self.refUseMap.get() )\n\n\n\n def B2_Up_Comm (self, event):\n print \"b2 - up - command\", event.x, event.y\n\n\n\n\n def B2_Drag (self, event):\n #print \"b2m \", event.x, event.y\n pass\n\n\n\n def B3_Down (self, event):\n\n print \"b3 _\", event.x, event.y\n\n\n\n\n def B3_Up (self, event):\n print \"b3 ^\", event.x, event.y\n self.B2_Up ( event )\n\n\n def B3_Drag (self, event):\n #print \"b3m \", event.x, event.y\n pass\n\n\n def isInSeq ( self, x, y ) :\n if y >= self.seqY and y <= self.seqY + self.seqH :\n return True\n else :\n return False\n\n def isInMod ( self, x, y ) :\n if y >= self.modY and y <= self.modY + self.modH :\n return True\n else :\n return False\n\n\n def Mouse_Move (self, event):\n #print \"mod m \", event.x, event.y\n #self.Canvas.coords ( self.seqMouseLine, event.x,self.seqY,event.x,self.seqY+self.seqH )\n\n if self.isInSeq ( event.x, event.y ) and hasattr ( self, 'seq') and len(self.seq) > 0 :\n\n if hasattr ( self, 'seqRec' ) and hasattr ( self, 'tw' ) and hasattr ( self, 'seqMouseR' ) :\n self.Canvas.itemconfigure ( self.seqRec, state=Tkinter.NORMAL )\n\n si = ( event.x - self.seqX ) / self.tw\n if si < 0 :\n si = 0\n if si < len ( self.seq ) :\n res = self.seqRes [ si ]\n ri = self.seqRi [si]\n resEnd = self.seqRes [ len(self.seqRes) - 1 ]\n resStart = self.seqRes [ 0 ]\n\n if res != None :\n try :\n status ( \"Sequence: %s/%s %d/%d\" % ( self.seq[si], res.type, res.id.position, resEnd.id.position ) )\n except :\n return\n else :\n try :\n status ( \"Sequence: ?/? %d/%d\" % ( ri, resEnd.id.position ) )\n except :\n return\n\n y0 = self.seqY+5\n y1 = self.seqY+self.seqH-5\n if event.y >= y0 and event.y <= y1 and hasattr ( self, 'seqMouseR' ) :\n x0 = self.seqX + si * self.tw\n x1 = x0 + self.tw\n self.Canvas.coords ( self.seqMouseR, x0, y0, x1, y1 )\n self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.NORMAL )\n else :\n self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.HIDDEN )\n\n else :\n self.Canvas.itemconfigure ( self.seqRec, state=Tkinter.HIDDEN )\n\n if hasattr ( self, 'seqMouseR' ) :\n self.Canvas.itemconfigure ( self.seqMouseR, state=Tkinter.HIDDEN )\n\n\n self.last_x = event.x\n self.last_y = event.y\n\n\n def Canvas_Leave ( self, event ) :\n #self.Canvas.coords ( self.seqMouseLine, 0,0,0,0 )\n pass\n\n\n def Canvas_Config (self, event) :\n #print \"mod cfg \", event.width, event.height\n self.W = event.width\n self.H = event.height\n\n #self.Canvas.delete(\"all\")\n if 1 :\n if hasattr(self, 'backRec') :\n self.Canvas.coords (self.backRec, 0, 0, self.W, self.H)\n else :\n self.backRec = self.Canvas.create_rectangle(0, 0, self.W, self.H, outline=\"#eee\", fill=\"#eee\")\n #self.seqMouseLine = self.Canvas.create_line(0, 0, 0, 0, fill=\"#66a\")\n\n if hasattr ( self, 'seqRec' ) :\n self.Canvas.coords ( self.seqRec, 0, self.seqY, self.W, self.seqY+self.seqH )\n else :\n self.seqRec = self.Canvas.create_rectangle(0, self.seqY, self.W, self.seqY+self.seqH, outline=\"#ddd\", fill=\"#ddd\" )\n\n self.Canvas.tag_lower(self.seqRec)\n self.Canvas.tag_lower(self.backRec)\n\n\n def Canvas_Wheel ( self, event ) :\n\n if self.isInSeq (self.last_x, self.last_y) :\n\n self.seqX += event.delta * 10\n\n if 0 :\n self.mag = self.mag + event.delta\n if self.mag > 15 : self.mag = 15\n if self.mag < 2 : self.mag = 2\n status ( \"Mag: %d\" % self.mag )\n\n self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')\n #self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')\n self.tw = self.font.measure ( \"a\" )\n\n #GetSegMod().seqX = self.seqX\n #self.UpdateSeqFont ()\n\n #self.UpdateSeqFont ()\n self.UpdateSeq ()\n\n # ['__doc__', '__module__', 'char', 'delta', 'height', 'keycode', 'keysym', 'keysym_num', 'num', 'send_event', 'serial', 'state', 'time', 'type', 'widget', 'width', 'x', 'x_root', 'y', 'y_root']\n #print dir(event)\n #print event.delta\n\n\n\n def ZoomMinus ( self ) :\n self.mag = self.mag - 1\n if self.mag > 15 : self.mag = 15\n if self.mag < 2 : self.mag = 2\n #print \"w \", event.delta, \" mag: \", self.mag\n\n self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')\n #self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')\n self.tw = self.font.measure ( \"a\" )\n\n self.UpdateSeqFont ()\n self.UpdateSeq ()\n status ( \"Magnification: %d\" % self.mag )\n\n\n\n def ZoomPlus ( self ) :\n self.mag = self.mag + 1\n if self.mag > 15 : self.mag = 15\n if self.mag < 2 : self.mag = 2\n #print \"w \", event.delta, \" mag: \", self.mag\n\n self.font = tkFont.Font(family='Courier', size=(self.mag), weight='normal')\n #self.boldFont = tkFont.Font(family='Courier', size=(self.mag+4), weight='bold')\n self.tw = self.font.measure ( \"a\" )\n\n self.UpdateSeqFont ()\n self.UpdateSeq ()\n status ( \"Magnification: %d\" % self.mag )\n\n\n def ZoomBegin ( self ) :\n self.seqX = 10\n self.UpdateSeqFont ()\n self.UpdateSeq ()\n\n def ZoomEnd ( self ) :\n self.seqX = - ( len(self.seq) - 50 ) * self.tw\n self.UpdateSeqFont ()\n self.UpdateSeq ()\n\n\n\n\n def isSelected ( self, fmap ) :\n for sp in fmap.surfacePieces :\n if sp in Surface.selected_surface_pieces() :\n return True\n return False\n\n\n\n\n def S_sel (self) :\n\n # show sigma for a side chain\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n selAtom = selAts[0]\n r = selAtom.residue\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, selAtom.name)\n\n if 1 or not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n chimera.openModels.remove ( removeMods )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(r.molecule.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n #allAtTree = None\n #print \"-\"\n\n import time\n start = time.time()\n\n #sigma = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )\n\n if 0 :\n print \"_sigma____________________________\"\n sigma = RadAts ( [selAtom], dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.1 )\n res = sigma * numpy.pi * numpy.sqrt(2.0)\n end = time.time()\n print \"%s - sigma: %.3f, res: %.3f, time: %f\" % ( selAtom.name, sigma, res, (end - start) )\n\n\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n #sigma = 0.6\n\n sigma = float(self.sigma.get())\n\n start = time.time()\n qq = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=1, log=1, numPts=8, toRAD=2.0, dRAD=0.5, minD=minD, maxD=maxD, fitg=0 )\n end = time.time()\n print \" - time: %f\" % ( (end - start) )\n\n start = time.time()\n qq = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=1, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=1 )\n end = time.time()\n print \" - time: %f\" % ( (end - start) )\n\n start = time.time()\n qq = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=1, numPts=20, toRAD=3.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=1 )\n end = time.time()\n print \" - time: %f\" % ( (end - start) )\n\n #CC, CCm, yds, err = rr\n\n print r\n\n\n\n def Q_sel (self) :\n\n # show sigma for a side chain\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n selAtom = selAts[0]\n r = selAtom.residue\n print \"\"\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, selAtom.name)\n\n print \" - in map: %s\" % dmap.name\n\n if 1 or not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n #chimera.openModels.remove ( removeMods )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n\n if self.showH.get() :\n ats = self.cur_mol.atoms\n\n allAtTree = None\n #print \"-\"\n if 1 :\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(r.molecule.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n allPtsTree = None\n if 1 :\n allPts = [None] * len(ats) # numpy.array.zeros ( [numAts,3] )\n ati = 0\n for at in ats :\n p = at.coord()\n allPts[ati] = [ p.x, p.y, p.z ]\n ati += 1\n\n print \" - %d pts tree\" % len(ats)\n allPtTree = AdaptiveTree ( allPts, allPts, 1.0)\n\n\n #import grid\n #reload(grid)\n #agrid = grid.Grid ()\n #agrid.FromAtomsLocal ( ats, 2.0 )\n sigma = float(self.sigma.get())\n\n import time\n start = time.time()\n\n if 0 :\n print \"_sigma____________________________\"\n\n sigma = RadAts ( [selAtom], dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )\n res = sigma * numpy.pi * numpy.sqrt(2.0)\n\n end = time.time()\n print \"%s - sigma: %.3f, res: %.3f, time: %f\" % ( selAtom.name, sigma, res, (end - start) )\n\n elif 1 :\n print \"\"\n print \"_Q_score____________________________\"\n\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n start = time.time()\n qs = 0\n qs = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n #qs, yds, err = qs\n end = time.time()\n print \" - sigma (at): %.3f, Q-score: %.3f, time: %f\" % ( sigma, qs, (end - start) )\n\n start = time.time()\n atPt = selAtom.coord()\n atPt = [atPt.x, atPt.y, atPt.z]\n xfI = selAtom.molecule.openState.xform\n qs = qscores.QscorePt2 ( atPt, xfI, dmap, sigma, allPtTree=allPtTree, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n end = time.time()\n print \" - sigma (pt): %.3f, Q-score: %.3f, time: %f\" % ( sigma, qs, (end - start) )\n\n\n\n elif 0 :\n print \"\"\n print \"_Q_score____________________________\"\n\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n\n if 0 :\n minD = numpy.min(M)\n print \" - min before masking: %.4f\" % minD\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, 0.5 )\n M = mdata.full_matrix ()\n minD = numpy.min(M)\n print \" - min after masking: %.4f\" % minD\n M = numpy.where ( M == 0.0, numpy.ones_like(M)*(minD-0.2), M )\n import _volume\n points = _volume.high_indices(M, minD-0.1)\n fpoints = points.astype(numpy.single)\n fpoint_weights = M[points[:,2],points[:,1],points[:,0]]\n minD = numpy.min(fpoint_weights)\n print \" - min of mask pts: %.4f\" % minD\n\n\n #sigma = 2.0 / (numpy.pi * numpy.sqrt(2.0))\n #sigma = 0.4\n\n start = time.time()\n qs = 0\n qs = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.2, minD=minD, maxD=maxD, fitg=0 )\n #qs, yds, err = qs\n end = time.time()\n print \" - sigma: %.3f, Q-score: %.3f, time: %f\" % ( sigma, qs, (end - start) )\n\n\n if 0 :\n start = time.time()\n qs = 0\n #qs = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.2, minD=minD, maxD=maxD, fitg=0 )\n qs = qscores.QscoreG ( [selAtom], dmap, sigma, agrid=agrid, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.2, minD=minD, maxD=maxD, fitg=0 )\n #qs, yds, err = qs\n end = time.time()\n print \" - sigma: %.3f, Q-score: %.3f, time: %f\" % ( sigma, qs, (end - start) )\n\n\n start = time.time()\n #qs = qscores.QscoreM ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n qs = qscores.QscoreM ( [selAtom], dmap, 0.6, agrid=agrid, allAtTree=None, show=1, log=0, toRAD=2.0, step=0.2, minD=minD, maxD=maxD, useMask=True )\n #qs = qscores.QscoreM ( selAtom.residue.atoms, dmap, 4.0, agrid=agrid, show=1, log=0, toRAD=2.0, step=0.2, minD=minD, maxD=maxD, useMask=True )\n #qs, yds, err = qs\n end = time.time()\n print \" - sigma: %.3f, Q-score: %.3f, time: %f (M)\" % ( sigma, qs, (end - start) )\n\n\n start = time.time()\n #qs = qscores.QscoreM ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n qs = qscores.QscoreM ( [selAtom], dmap, 0.6, agrid=None, allAtTree=None, show=1, log=0, toRAD=2.0, step=0.2, minD=minD, maxD=maxD, useMask=False )\n #qs = qscores.QscoreM ( selAtom.residue.atoms, dmap, 4.0, agrid=agrid, show=1, log=0, toRAD=2.0, step=0.2, minD=minD, maxD=maxD, useMask=True )\n #qs, yds, err = qs\n end = time.time()\n print \" - sigma: %.3f, Q-score: %.3f, time: %f (noM)\" % ( sigma, qs, (end - start) )\n\n\n if 0 :\n #def QscorePt ( atPt, xfI, dmap, sigma, allAtTree = None, log=0, numPts=8, toRAD=2.0, dRAD=0.5, minD=None, maxD=None, fitg=0 ) :\n\n pt = selAtom.coord().data()\n xfI = selAtom.molecule.openState.xform\n\n #pt = selAtom.xformCoord().data()\n #xfI = chimera.Xform()\n\n start = time.time()\n qs = qscores.QscorePt ( pt, xfI, dmap, sigma, allAtTree=allAtTree, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n #qs, yds, err = qs\n end = time.time()\n print \" - sigma: %.3f, Q-score Pt: %.3f, time: %f\" % ( sigma, qs, (end - start) )\n\n\n if 0 :\n print \"Atoms in %d.%s %s\" % (selAtom.residue.id.position, selAtom.residue.id.chainId, selAtom.residue.type)\n #print \"-\"\n\n avg, N = 0.0, 0.0\n #bbAts, scAts, baseAts, sugarAts = [], [], [], []\n\n for at in selAtom.residue.atoms :\n\n at.Q = qscores.Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )\n print \" - %s : %.2f\" % (at.name, at.Q)\n if 1 or at.isSC :\n avg += at.Q\n N += 1.0\n\n #if at.residue.isNA and at.isBB : bbAts.append ( at )\n #if at.residue.isNA and at.isSugar : sugarAts.append ( at )\n #if at.residue.isNA and at.isBase : baseAts.append ( at )\n\n #if at.residue.isProt and at.isBB : bbAts.append ( at )\n #if at.residue.isProt and at.isSC : scAts.append ( at )\n\n\n if selAtom.residue.isNA :\n print \"NA:\"\n print \" - backbone Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.bbAtoms] )\n #print \" - sugar Q: %.2f\" % numpy.average ( [at.Q for at in sugarAts] )\n print \" - base Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.scAtoms] )\n\n if selAtom.residue.isProt :\n print \"Protein:\"\n print \" - backbone Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.bbAtoms] )\n print \" - side chain Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.scAtoms] )\n\n if N > 0 :\n print \"All:\"\n #print \" - avg sc Q: %.2f\" % (avg/N)\n print \" - avg Q: %.2f\" % (avg/N)\n\n\n\n\n\n def Q_show (self) :\n\n # show sigma for a side chain\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n selAtom = selAts[0]\n r = selAtom.residue\n print \"\"\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, selAtom.name)\n\n print \" - in map: %s\" % dmap.name\n\n if 1 or not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n #chimera.openModels.remove ( removeMods )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n if self.showH.get() :\n ats = self.cur_mol.atoms\n\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(r.molecule.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n sigma = float(self.sigma.get())\n\n #allAtTree = None\n #print \"-\"\n\n import time\n start = time.time()\n\n #sigma = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )\n\n if 0 :\n print \"_sigma____________________________\"\n\n sigma = RadAts ( [selAtom], dmap, allAtTree=allAtTree, show=1, log=1, numPts=30, toRAD=2, dRAD=0.5 )\n res = sigma * numpy.pi * numpy.sqrt(2.0)\n\n end = time.time()\n print \"%s - sigma: %.3f, res: %.3f, time: %f\" % ( selAtom.name, sigma, res, (end - start) )\n\n elif 1 :\n print \"_Q_score____________________________\"\n\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n\n if 0 :\n minD = numpy.min(M)\n print \" - min before masking: %.4f\" % minD\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, 0.5 )\n M = mdata.full_matrix ()\n minD = numpy.min(M)\n print \" - min after masking: %.4f\" % minD\n M = numpy.where ( M == 0.0, numpy.ones_like(M)*(minD-0.2), M )\n import _volume\n points = _volume.high_indices(M, minD-0.1)\n fpoints = points.astype(numpy.single)\n fpoint_weights = M[points[:,2],points[:,1],points[:,0]]\n minD = numpy.min(fpoint_weights)\n print \" - min of mask pts: %.4f\" % minD\n\n\n #sigma = 2.0 / (numpy.pi * numpy.sqrt(2.0))\n #sigma = 0.4\n\n qs, yds, err = 0,0,0\n\n if 0 :\n rr = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=1, log=1, numPts=20, toRAD=2.0, dRAD=0.5, minD=minD, maxD=maxD, fitg=1 )\n qs, yds, err = rr\n\n elif 1 :\n rr = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=1, numPts=30, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n qs, yds, err = rr\n\n else :\n qs = qscores.Qscore ( [selAtom], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n\n end = time.time()\n print \" - sigma: %.3f, Q-score: %.3f, time: %f\" % ( sigma, qs, (end - start) )\n\n print \"Atoms in %d.%s %s\" % (selAtom.residue.id.position, selAtom.residue.id.chainId, selAtom.residue.type)\n #print \"-\"\n\n\n if 0 :\n avg, N = 0.0, 0.0\n #bbAts, scAts, baseAts, sugarAts = [], [], [], []\n\n for at in selAtom.residue.atoms :\n at.Q = qscores.Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD )\n print \" - %s : %.2f\" % (at.name, at.Q)\n if 1 or at.isSC :\n avg += at.Q\n N += 1.0\n\n #if at.residue.isNA and at.isBB : bbAts.append ( at )\n #if at.residue.isNA and at.isSugar : sugarAts.append ( at )\n #if at.residue.isNA and at.isBase : baseAts.append ( at )\n\n #if at.residue.isProt and at.isBB : bbAts.append ( at )\n #if at.residue.isProt and at.isSC : scAts.append ( at )\n\n\n if selAtom.residue.isNA :\n print \"NA:\"\n print \" - backbone Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.bbAtoms] )\n #print \" - sugar Q: %.2f\" % numpy.average ( [at.Q for at in sugarAts] )\n print \" - base Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.scAtoms] )\n\n if selAtom.residue.isProt :\n print \"Protein:\"\n print \" - backbone Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.bbAtoms] )\n print \" - side chain Q: %.2f\" % numpy.average ( [at.Q for at in selAtom.residue.scAtoms] )\n\n if N > 0 :\n print \"All:\"\n #print \" - avg sc Q: %.2f\" % (avg/N)\n print \" - avg Q: %.2f\" % (avg/N)\n\n\n\n def CalcSelQ (self) :\n\n # show sigma for a side chain\n\n atoms = chimera.selection.currentAtoms()\n if len ( atoms ) == 0 :\n umsg ( \"No selected atoms found\" )\n return\n\n dmap = self.cur_dmap\n mol = atoms[0].molecule\n\n umsg ( \"Calculating Q-scores of %d atoms...\" % len(atoms) )\n\n\n #selAtom = selAts[0]\n #r = selAtom.residue\n #print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, selAtom.name)\n\n sigma = float(self.sigma.get())\n\n #sigma = 0.4\n print \" - in map: %s\" % dmap.name\n print \" - mol: %s\" % mol.name\n print \" - sigma: %.2f\" % sigma\n\n if 1 or not hasattr ( mol.name, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n ats = [at for at in mol.atoms if not at.element.name == \"H\"]\n if self.showH.get() :\n ats = mol.atoms\n\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - minD %.3f, maxD %.3f\" % (minD, maxD)\n\n import time\n start = time.time()\n\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Calculating Q-scores', modal = True)\n\n avg, avgBB, avgSC, numBB, numSC = 0.0, 0.0, 0.0, 0, 0\n\n import traceback\n\n nonhatoms = []\n try :\n\n for ai, at in enumerate ( atoms ) :\n\n if at.element.name == \"H\" :\n continue\n\n nonhatoms.append ( at )\n at.Q = qscores.Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n at.bfactor = at.Q\n avg += at.Q\n\n if at.isBB :\n avgBB += at.Q\n numBB += 1\n if at.isSC :\n avgSC += at.Q\n numSC += 1\n\n if (ai+1) % 10 == 0 :\n leftTime = qscores.TimeLeftStr (ai, len(atoms), time.time() - start)\n status ( \"Calculating Q scores - atom %d/%d - eta: %s\" % (ai+1, len(atoms), leftTime) )\n print \".\",\n #task.updateStatus( \"Calculating Q scores - atom %d/%d - %s in %s.%d.%s - eta: %s\" % (ai+1, len(atoms), at.name, at.residue.type, at.residue.id.position, at.residue.id.chainId, leftTime) )\n task.updateStatus( \"Calculating Q scores - atom %d/%d - eta: %s\" % (ai+1, len(atoms), leftTime) )\n\n except Exception, err:\n umsg ( \"Something went wrong...\" )\n print Exception, err\n traceback.print_exc()\n return\n\n\n finally :\n task.finished()\n\n\n for at in nonhatoms :\n print \" - atom: %s %d.%s %s : %.3f\" % (at.residue.type, at.residue.id.position, at.residue.id.chainId, at.name, at.Q)\n\n if numBB > 0 :\n print \"%d backbone atoms, Q=%.2f\" % (numBB, avgBB/float(numBB))\n if numSC > 0 :\n print \"%d sidechain atoms, Q=%.2f\" % (numSC, avgSC/float(numSC))\n\n avgq = avg / float(len(nonhatoms))\n if len(atoms) > 1 :\n umsg ( \"Q-score of %d atoms: %.3f\" % (len(nonhatoms), avgq) )\n else :\n umsg ( \"Q-score of %d atom: %.3f\" % (len(nonhatoms), avgq) )\n\n res = float(self.mapRes.get())\n cc1, cc2 = ResCC ( mol, atoms, res, dmap )\n\n cc1_, cc2_ = ResCC ( mol, nonhatoms, res, dmap )\n\n\n numClash = 0.0\n if 1 and allAtTree :\n for at in nonhatoms :\n anear = allAtTree.searchTree ( at.coord().data(), 2.0 )\n for nat in anear :\n if nat.residue != at.residue :\n v = at.coord() - nat.coord()\n if v.length < 1.8 :\n numClash += 1.0\n break\n\n clashScore = numClash / float(len(nonhatoms))\n\n if 0 :\n # for some ligands stats...\n R = atoms[0].residue\n rid = \"%s.%d.%s\" % (R.type, R.id.position, R.id.chainId)\n\n #print mol.name, rid, avgq, cc1, cc2, cc1_, cc2_, res\n\n print \"\\nMol Name\\tRes Id\\tQ\\tCC\\tCCm\\tCC(noh)\\tCCm(noh)\\tClash\\tClashes\"\n print \"%s\\t%s\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.2f\\t%.0f (%.2f)\\n\" % (mol.name, rid, avgq, cc1, cc2, cc1_, cc2_, clashScore, numClash, res)\n\n if not os.path.isfile (\"/Users/greg/Desktop/txt.txt\") :\n fp = open ( \"/Users/greg/Desktop/txt.txt\", \"a\" )\n fp.write ( \"Mol Name\\tRes Id\\tQ\\tCC\\tCCm\\tCC(noh)\\tCCm(noh)\\tClash\\tClashes\\n\" )\n fp.close()\n fp = open ( \"/Users/greg/Desktop/txt.txt\", \"a\" )\n fp.write ( \"%s\\t%s\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.3f\\t%.2f\\t%.0f\\n\" % (mol.name, rid, avgq, cc1, cc2, cc1_, cc2_, clashScore, numClash) )\n fp.close()\n\n\n def CalcSelQOpen ( self ) :\n\n for m in chimera.openModels.list() :\n\n if type(m) != chimera.Molecule :\n continue\n\n resN = None\n for r in m.residues :\n\n if r.type == \"PTQ\" :\n #if r.type == \"PEE\" or r.type == \"ACB\" :\n #if r.type == \"F86\" :\n resN = r\n break\n\n if resN :\n\n print \"\\n\\n-------------- %s -------- %s.%d.%s\" % (m.name, resN.type, resN.id.position, resN.id.chainId)\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( resN.atoms )\n self.CalcSelQ ()\n\n\n\n\n\n def AProfs (self) :\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - in map: %s\" % dmap.name\n\n if 1 or not hasattr ( mol, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n #sigma = 0.4\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n sigma = float(self.sigma.get())\n\n def doAt (at, arr) :\n rr = qscores.Qscore ( [at], dmap, sigma, allAtTree=allAtTree, show=0, log=0, numPts=10, toRAD=3.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=1 )\n Qscore, yds, err = rr\n #print len(yds)\n #if len(yds) == 31 :\n arr.append ( [Qscore,err] + yds.tolist() )\n if 0 :\n print \"%.3f\\t%.5f\\t%s.%d.%s\" % (at.Q, err, at.residue.type, at.residue.id.position, at.name),\n for y in yds : print \"\\t%f\" % y,\n print \"\"\n else :\n print \".\",\n\n\n bb_atn_q, sc_atn_q = {}, {}\n for at in mol.atoms :\n if at.residue.isProt and (at.name == 'C' or at.name == 'O' or at.name == 'N' or at.name == \"CA\") :\n if at.name in bb_atn_q :\n bb_atn_q[at.name].append ( [at.Q, at] )\n else :\n bb_atn_q[at.name] = [[at.Q, at]]\n\n atn = \"%s(%s)\" % (at.residue.type, at.name)\n if atn in sc_atn_q :\n sc_atn_q[atn].append ( [at.Q, at] )\n else :\n sc_atn_q[atn] = [[at.Q, at]]\n\n\n\n N = 60\n print \"N = %d\" % N\n\n # BB\n bb_c, bb_n, bb_o, bb_ca = [], [], [], []\n if 0 :\n for an, aa in [ [\"C\",bb_c], [\"N\",bb_n], [\"O\",bb_o], [\"CA\",bb_ca]] :\n print \"___\",an,\"___\";\n A = bb_atn_q[an];\n A.sort ( reverse=True, key=lambda x: x[0] )\n print \"%d - \" % len(A);\n i = 0\n for q, at in A[:N] :\n if q > 0.8 :\n doAt (at, aa)\n i += 1;\n print \"%d\" % i,\n print \"\"\n\n\n # SC\n asp_o, glu_o, arg_n, leu_c, val_c = [], [], [], [], []\n if 0 :\n for an, aa in [ [\"ASP(OD1)\",asp_o], [\"ASP(OD2)\",asp_o]] :\n print \"___\",an,\"___\"; A = sc_atn_q[an]; A.sort ( reverse=True, key=lambda x: x[0] )\n for q, at in A[0:N] :\n if q > 0.8 : doAt (at, aa)\n print \"\"\n for an, aa in [ [\"GLU(OE1)\",glu_o], [\"GLU(OE1)\",glu_o]] :\n print \"___\",an,\"___\"; A = sc_atn_q[an]; A.sort ( reverse=True, key=lambda x: x[0] )\n for q, at in A[0:N] :\n if q > 0.8 : doAt (at, aa)\n print \"\"\n for an, aa in [ [\"ARG(NH1)\",arg_n], [\"ARG(NH2)\",arg_n]] :\n print \"___\",an,\"___\"; A = sc_atn_q[an]; A.sort ( reverse=True, key=lambda x: x[0] )\n for q, at in A[0:N] :\n if q > 0.8 : doAt (at, aa)\n print \"\"\n for an, aa in [ [\"LEU(CD1)\",leu_c], [\"LEU(CD2)\",leu_c]] :\n print \"___\",an,\"___\"; A = sc_atn_q[an]; A.sort ( reverse=True, key=lambda x: x[0] )\n for q, at in A[0:N] :\n if q > 0.8 : doAt (at, aa)\n print \"\"\n for an, aa in [ [\"VAL(CG1)\",val_c], [\"VAL(CG2)\",val_c]] :\n print \"___\",an,\"___\"; A = sc_atn_q[an]; A.sort ( reverse=True, key=lambda x: x[0] )\n for q, at in A[0:N] :\n if q > 0.8 : doAt (at, aa)\n print \"\"\n\n # HOH, ion\n hoh_o, i_i = [], []\n if 1 :\n for an, aa in [ [\"HOH(O)\",hoh_o], [\"MG(MG)\",i_i]] :\n print \"___\",an,\"___\"; A = sc_atn_q[an]; A.sort ( reverse=True, key=lambda x: x[0] )\n for q, at in A[0:N] :\n if q > 0.8 : doAt (at, aa)\n print \"\"\n\n\n for r in mol.residues :\n if 0 :\n if r.type == \"ASP\" :\n for at in [r.atomsMap[\"OD1\"][0], r.atomsMap[\"OD2\"][0]] :\n if at.Q > 0.8 : doAt (at, asp_o)\n if r.type == \"GLU\" :\n for at in [r.atomsMap[\"OE1\"][0], r.atomsMap[\"OE2\"][0]] :\n if at.Q > 0.8 : doAt (at, glu_o)\n if 0 :\n if r.type == \"VAL\" :\n for at in [r.atomsMap[\"CG1\"][0], r.atomsMap[\"CG2\"][0]] :\n if at.Q > 0.8 : doAt (at, val_c)\n if r.type == \"LEU\" :\n for at in [r.atomsMap[\"CD1\"][0], r.atomsMap[\"CD2\"][0]] :\n if at.Q > 0.8 : doAt (at, leu_c)\n if 0 :\n if r.type == \"ARG\" :\n for at in [r.atomsMap[\"NH1\"][0], r.atomsMap[\"NH2\"][0]] :\n if at.Q > 0.8 : doAt (at, arg_n)\n\n #if r.type == \"LEU\" :\n # for at in [r.atomsMap[\"CD1\"][0], r.atomsMap[\"CD2\"][0]] :\n # if at.Q > 0.8 : doAt (at, leu_c)\n\n\n def outAt (arr, label, w=\"avg\") :\n\n arr.sort ( reverse=True, key=lambda x: x[0] )\n\n #K = 10\n #aa = numpy.array ( arr[0:K] )\n aa = numpy.array ( arr )\n\n if w==\"p\" :\n print \"Q\\tAvgD - \", label\n for qa in aa :\n for d in qa :\n print \"%f\\t\" % d,\n print \"\"\n return\n\n s = numpy.std(aa,axis=0)\n m = numpy.mean(aa,axis=0)\n\n print label, \"\\t\", aa.shape,\n\n #print label,\n if w == \"avg\" :\n for i in range(len(s)) :\n print \"\\t%f\" % m[i],\n else :\n for i in range(len(s)) :\n print \"\\t%f\" % s[i],\n\n print \"\"\n\n\n print \"\"\n print \"Res\\tQ\\tErr\",\n for yi in range(31) : print \"\\t%f\" % (yi*.1),\n print \"\"\n\n if 0 :\n for w in [\"p\", \"avg\", \"std\"] :\n outAt ( val_c, \"VAL(CG)\", w )\n outAt ( leu_c, \"LEU(CD)\", w )\n outAt ( arg_n, \"ARG(NH)\", w )\n outAt ( asp_o, \"ASP(OD)\", w )\n outAt ( glu_o, \"GLU(OE)\", w )\n print \"\"\n\n if 0 :\n for w in [\"p\", \"avg\", \"std\"] :\n outAt ( bb_c, \"C\", w )\n outAt ( bb_ca, \"CA\", w )\n outAt ( bb_n, \"N\", w )\n outAt ( bb_o, \"O\", w )\n\n if 1 :\n for w in [\"p\", \"avg\", \"std\"] :\n outAt ( hoh_o, \"Water(O)\", w )\n outAt ( i_i, \"Ion\", w )\n\n\n\n def Ligs ( self ) :\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - in map: %s\" % dmap.name\n\n if 1 or not hasattr ( mol, 'bbats' ) :\n SetBBAts(mol)\n mol.bbats = True\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n #sigma = 0.6\n minD, maxD = qscores.MinMaxD ( dmap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n\n remAts = []\n showAts = []\n for r in mol.residues :\n if not r.isProt and not r.isNA :\n #print at.residue.id.position, at.residue.type,\n for at in r.atoms :\n #print \" - %s - %.2f\" % (at.name, at.Q)\n\n if hasattr ( at, 'Q1' ) and hasattr ( at, 'Q2' ) :\n if at.Q > 0.8 and at.Q2 > 0.8 and at.Q1 > 0.8 :\n print \" -3- %s - %.2f, %.2f, %.2f\" % (at.name, at.Q, at.Q1, at.Q2)\n at.display = True\n showAts.append ( at )\n else :\n at.display = False\n remAts.append ( at )\n\n else :\n #print \" -1- %s - %.2f\" % (at.name, at.Q)\n if at.Q > 0.8 :\n at.display = True\n showAts.append ( at )\n else :\n at.display = False\n remAts.append ( at )\n\n print \"Showing %d, Hiding %d\" % ( len(showAts), len(remAts) )\n\n if 0 :\n for at in remAts :\n mol.deleteAtom ( at )\n\n\n\n\n def Scale ( self ) :\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - scale map: %s\" % dmap.name\n\n\n def SetStep ( D, S ) :\n # Preserve index origin.\n index_origin = D.data.xyz_to_ijk((0,0,0))\n #print \" - origin 0 :\", index_origin\n D.data.set_step ( S )\n xyz_origin = [x0-x for x0,x in zip(D.data.ijk_to_xyz((0,0,0)),D.data.ijk_to_xyz(index_origin))]\n D.data.set_origin(xyz_origin)\n #print \" - origin 1 :\", xyz_origin\n\n\n xf0 = mol.openState.xform\n s0 = dmap.data.step\n\n max_Xf, max_Avg, max_S = None, None, None\n\n RES = 2.0 # float(self.mapRes.get())\n print \" - res: %.1f\" % RES\n\n vals = []\n\n avg, cc, ccm = FitMolToMap ( mol, dmap, RES )\n MapUp ( dmap, showMesh = False, color=(.7,.7,.7,1) )\n\n print \"Initial avg: %f, step: %.4f\" % (avg, dmap.data.step[0])\n vals.append ( [avg, cc, ccm, dmap.data.step[0], dmap.data.step[1], dmap.data.step[2]] )\n\n\n D = 0.0001\n for i in range ( 100 ) :\n\n S = dmap.data.step\n S_ = ( S[0] - D, S[1] - D, S[2] - D )\n SetStep ( dmap, S_ )\n\n avg, cc, ccm = FitMolToMap ( mol, dmap, RES )\n MapUp ( dmap, showMesh = False, color=(.7,.7,.7,1) )\n\n vals.append ( [avg, cc, ccm, S_[0], S_[1], S_[2]] )\n\n #print \" %f - %f\" % (S_[0], cc)\n\n if i % 10 == 0 :\n status ( \"Step: %f - %d/%d\" % (S_[0], i+1, 100) )\n print \".\",\n\n if max_Avg == None or max_Avg < avg :\n max_Avg = avg\n max_Xf = mol.openState.xform\n max_S = S_\n\n print \"\"\n\n vals.reverse ()\n\n SetStep ( dmap, s0 )\n MapUp ( dmap, showMesh = False, color=(.7,.7,.7,1) )\n mol.openState.xform = xf0\n\n for i in range ( 100 ) :\n\n S = dmap.data.step\n S_ = ( S[0] + D, S[1] + D, S[2] + D )\n SetStep ( dmap, S_ )\n\n avg, cc, ccm = FitMolToMap ( mol, dmap, RES )\n MapUp ( dmap, showMesh = False, color=(.7,.7,.7,1) )\n\n vals.append ( [avg, cc, ccm, S_[0], S_[1], S_[2]] )\n\n #print \" %f - %f\" % (S_[0], cc)\n\n if i % 10 == 0 :\n status ( \"Step: %f + %d/%d\" % (S_[0], i+1, 100) )\n print \".\",\n\n if max_Avg == None or max_Avg < avg :\n max_Avg = avg\n max_Xf = mol.openState.xform\n max_S = S_\n\n\n print \"\"\n print \"Max avg: %f, step: %.4f\" % (max_Avg, max_S[0])\n SetStep ( dmap, max_S )\n MapUp ( dmap, showMesh = False, color=(.7,.7,.7,1) )\n mol.openState.xform = max_Xf\n\n nout = os.path.splitext(dmap.data.path)[0] + \"_scales.txt\"\n fout = open ( nout, \"w\" )\n for avg, cc, ccm, s0, s1, s2 in vals :\n fout.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\t%f\\n\" % (s0, s1, s2, avg, cc, ccm) )\n fout.close()\n print \" - wrote \", nout\n\n\n\n\n\n def Asn ( self ) :\n\n print \"ASN - show\"\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - scale map: %s\" % dmap.name\n\n\n totAt, showAt = 0, 0\n\n tot = {}\n rids = []\n\n\n for r in self.cur_mol.residues :\n\n if r.id.chainId != chainId :\n continue\n\n rids.append ( [r.id.position, r] )\n\n rids.sort ()\n\n i = 0\n for ri, r in rids :\n if i > 2 :\n r2 = rids[i-2]\n if (r.type == \"SER\" or r.type == \"THR\") and r2[1].type == \"ASN\" :\n print \"%s - %d.%s\" % (r2[1].type, r2[1].id.position, r2[1].id.chainId)\n\n chimera.selection.addCurrent ( r2[1] )\n\n\n i += 1\n\n\n\n def DelSel ( self ) :\n\n mol = chimera.selection.currentMolecules()[0]\n\n for b in chimera.selection.currentBonds() :\n mol.deleteBond(b)\n\n for at in chimera.selection.currentAtoms() :\n mol.deleteAtom(at)\n\n\n def Take ( self ) :\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n print \" - %s\" % mols[0].name\n print \" - %s\" % mols[1].name\n\n m1, m2 = mols\n\n chainId = self.chain.get()\n\n rids = {}\n for r in m1.residues :\n if r.id.chainId == chainId :\n rids[r.id.position] = r\n\n\n for r in m2.residues :\n if not r.id.position in rids :\n #print \" - %d %s %s\" % (r.id.position, r.type, r.id.chainId)\n chimera.selection.addCurrent ( r )\n\n\n return\n\n\n aMap = dict()\n for ri, r in enumerate ( m2.residues ) :\n nres = m1.newResidue (r.type, chimera.MolResId(chainId, r.id.position))\n for at in r.atoms :\n nat = m1.newAtom (at.name, chimera.Element(at.element.number))\n aMap[at] = nat\n nres.addAtom( nat )\n p = chimera.Point ( at.coord().x, at.coord().y, at.coord().z )\n nat.setCoord ( p )\n\n for bond in m2.bonds :\n try :\n nb = m1.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = nb.Smart\n except :\n pass\n\n\n\n def Domains ( self ) :\n\n print \"dms\"\n\n mol = self.cur_mol\n rmap = {}\n for r in mol.residues :\n rmap[r.id.position] = r\n r.dms = None\n\n dms = []\n fp = open ( \"/Users/greg/Box Sync/20 Ribozyme - Zhaoming/L21RNA_DMS_0000.JustWT.txt\" )\n for l in fp :\n #print l,\n s = l.split()\n #print s\n try :\n rid = int(s[0])\n except :\n continue\n if rid in rmap :\n r = rmap[rid]\n r.dms = float(s[1])\n #print \"res %d - %g\" % (rid, r.dms)\n dms.append ( r.dms )\n else :\n print \"res %d - x\" % rid\n\n print \"min: %g\" % numpy.min ( dms )\n print \"max: %g\" % numpy.max ( dms )\n print \"avg: %g\" % numpy.mean ( dms )\n print \"std: %g\" % numpy.std ( dms )\n\n dmin = numpy.min ( dms )\n dmax = numpy.max ( dms )\n dmean = numpy.mean ( dms )\n dstd = numpy.std ( dms )\n\n chimera.selection.clearCurrent ()\n\n for r in mol.residues :\n #rmap[r.id.position] = r\n #r.dms = None\n if r.dms == None :\n r.ribbonColor = chimera.MaterialColor ( .7, .7, .7, 1.0 )\n else :\n\n R = numpy.array ( [1,0,0] )\n G = numpy.array ( [0,1,0] )\n\n f = (r.dms - dmin) / ( dmax - dmin )\n col = R * f + G * (1.0-f)\n\n if r.dms > 0.05 :\n col = R\n chimera.selection.addCurrent ( r )\n chimera.selection.addCurrent ( rmap[r.id.position-1] )\n chimera.selection.addCurrent ( rmap[r.id.position+1] )\n\n else :\n col = G\n\n r.ribbonColor = chimera.MaterialColor ( col[0], col[1], col[2], 1.0 )\n\n\n def SS ( self ) :\n\n print \"color domains\"\n\n mol = self.cur_mol\n\n mfrom = None\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule and m.display == True and m != mol :\n mfrom = m\n break\n\n\n if mfrom == None :\n rmap = {}\n for r in mol.residues :\n rmap[r.id.position] = r\n r.dms = None\n\n r.ribbonColor = chimera.MaterialColor ( .7, .7, .7, 1.0 )\n\n dms = []\n fp = open ( \"/Users/greg/Box Sync/_data/Ribozyme/sec.txt\" )\n for l in fp :\n #print l,\n s = l.split()\n #print s\n\n C = s[1].split(\",\")\n C = ( float(C[0]), float(C[1]), float(C[2]) )\n\n print s[0], C,\n\n r = s[2].split(\",\")\n for rs in r :\n print rs,\n be = rs.split(\"-\")\n for i in range ( int(be[0]), int(be[1])+1 ) :\n if not i in rmap :\n print \" - res %d not in rmap\" % i\n else :\n r = rmap[i]\n r.ribbonColor = chimera.MaterialColor ( C[0], C[1], C[2], 1.0 )\n\n print \"\"\n\n else :\n\n print \"from:\", mfrom.name\n print \"to:\", mol.name\n\n rmap = {}\n for r in mfrom.residues :\n rmap[r.id.chainId + \"%d\"%r.id.position] = r\n\n for r in mol.residues :\n rid = r.id.chainId + \"%d\"%r.id.position\n if not rid in rmap :\n print \"r %s not found in %s\" % (rid, mol.name)\n continue\n rf = rmap[rid]\n for at in r.atoms :\n if at.name in rf.atomsMap :\n atf = rf.atomsMap[at.name][0]\n at.setCoord ( atf.coord() )\n else :\n print \" - at %s in %s.%d.%s not found in %s.%d.%s\" % (at.name, r.type, r.id.position, r.id.chainId, rf.type, rf.id.position, rf.id.chainId)\n break\n\n\n\n def AddH ( self ) :\n\n print \"addh\"\n\n selAt = chimera.selection.currentAtoms()[0]\n\n print selAt.name\n\n aN = selAt\n aC1 = selAt.residue.atomsMap[\"CE1\"][0]\n aC2 = selAt.residue.atomsMap[\"CD2\"][0]\n\n v1 = aC1.coord() - aN.coord(); v1.normalize()\n v2 = aC2.coord() - aN.coord(); v2.normalize()\n\n avgV = v1 + v2\n avgV.normalize()\n\n nat = selAt.molecule.newAtom ( \"HNE2\", chimera.Element(1))\n selAt.residue.addAtom( nat )\n nat.drawMode = nat.EndCap\n nat.setCoord ( aN.coord() - avgV * 1.0 )\n nat.display = True\n if nat.element.name.upper() in atomColors : nat.color = atomColors[nat.element.name.upper()]\n\n nb = selAt.molecule.newBond ( aN, nat )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n\n\n\n\n\n\n\n def AddDiS ( self ) :\n\n print \"\"\n print \"AddDiS\"\n\n selAts = chimera.selection.currentAtoms()\n\n if len(selAts) != 2 :\n umsg ( \"Select two atoms\" )\n return\n\n at1, at2 = selAts\n if at1.name != \"OG\" or at1.residue.type != \"SER\" :\n umsg ( \"Check atoms\" )\n return\n\n if at2.name != \"OG\" or at2.residue.type != \"SER\" :\n umsg ( \"Check atoms\" )\n return\n\n mol1 = at1.molecule\n mol2 = at2.molecule\n\n if mol1 != mol2 :\n umsg ( \"Not same molecule\" )\n return\n\n nb = mol1.newBond ( at1, at2 )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n\n\n\n def AddResProt ( self ) :\n\n #startI = self.seqRes [ max(self.seqSel[0],0) ].id.position\n #endI = self.seqRes [ min(self.seqSel[1],len(self.seqRes)-1) ].id.position\n\n print \"\"\n print \"AddRes\"\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - in map: %s\" % dmap.name\n\n startRi = self.seqRes[0].id.position + self.seqSel[0]\n endRi = self.seqRes[0].id.position + self.seqSel[1]\n numRes = endRi - startRi + 1\n\n print \" - sel %d - %d\" % (self.seqSel[0], self.seqSel[1])\n #print \"res %d - %d\" % (startI, endI)\n print \" - res %d - %d, %d res\" % (startRi, endRi, numRes)\n\n seq = self.addRess.get().upper().strip().replace(\" \", \"\")\n print \" - seq:\", seq\n\n from chimera.resCode import protein1to3\n #from chimera.resCode import nucleic1t3\n for i in range ( len(seq) ) :\n if not seq[i] in protein1to3 :\n umsg ( \"Sequence position %d '%s' not known\" % (i+1, seq[i]) )\n return\n\n if len(seq) != numRes :\n umsg ( \"%s is %d, need %d\" % (seq, len(seq), numRes) )\n\n molbuild.BuildModLoop ( mol, startRi, endRi, seq, chainId )\n\n\n\n def Occ ( self ) :\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n #chainId = self.chain.get()\n\n for at in mol.atoms :\n #if at.residue.id.chainId == chainId :\n ats = at.residue.atomsMap[at.name]\n\n if len(ats) > 1 :\n alts = {}\n for at in ats :\n alts[at.altLoc] = at\n\n locs = alts.keys()\n locs.sort()\n #print at.name, at.residue.type, at.residue.id.position, locs\n\n sum = 0.0\n occ = 1.0 / float(len(locs))\n occ = round(occ * 100.0)/100.0\n for l in locs[:-1] :\n alts[l].occupancy = occ\n sum += occ\n alts[locs[-1]].occupancy = 1.0 - sum\n\n else :\n ats[0].occupancy = 1.0\n\n\n\n\n def RibD ( self ) :\n\n print self.cur_mol.name\n print self.cur_dmap.name\n\n minR, maxR = 1e9, -1e9\n\n L, H = numpy.array([1.0,0,0]), numpy.array([0,1.0,0])\n l, h = 2.0, 7.0\n\n for r in self.cur_mol.residues :\n points = _multiscale.get_atom_coordinates ( r.atoms, transformed = False )\n dvals = self.cur_dmap.interpolated_values ( points, self.cur_mol.openState.xform )\n dval = numpy.average(dvals)\n minR = min ( minR, dval )\n maxR = max ( maxR, dval )\n\n f = (dval - l) / (h-l)\n C = f * L + (1-f) * H\n r.ribbonColor = chimera.MaterialColor ( C[0], C[1], C[2], 1.0 )\n for at in r.atoms :\n at.color = r.ribbonColor\n\n print minR, maxR\n\n\n\n\n def RMSD ( self ) :\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n if len(mols) != 2 :\n umsg ( \"Make at least two molecules visible\" )\n return\n\n m1, m2 = mols\n\n SetBBAts ( m1 )\n SetBBAts ( m2 )\n\n print \"\\nRMSD\"\n print \"%s : %s\" % (m1.name, m2.name)\n\n atids = {}\n rmap = {}\n for r in m1.residues :\n if r.isProt :\n rmap[\"%d.%s\"%(r.id.position, r.id.chainId)] = r\n for at in r.atoms :\n if len(r.atomsMap[at.name]) > 1 :\n # ignore alt conformations...\n continue\n else :\n #atId = \"%d.%s.%s.%s\" % (r.id.position,r.id.chainId,at.name,at.altLoc)\n atId = \"%d.%s.%s\" % (r.id.position,r.id.chainId,at.name)\n atids[atId] = at\n\n\n sums, N = {\"All\":0.0, \"BB\":0.0, \"SC\":0.0}, {\"All\":0.0, \"BB\":0.0, \"SC\":0.0}\n\n for r2 in m2.residues :\n\n if r2.isProt :\n\n rId = \"%d.%s\" % (r2.id.position, r2.id.chainId)\n if rId not in rmap :\n #print \" - res %s not in m1\" % rId\n continue\n\n r1 = rmap[rId]\n\n for at2 in r2.atoms :\n\n if len(r2.atomsMap[at2.name]) > 1 :\n # ignore alt conformations...\n continue\n\n if at2.element.name == \"H\" :\n continue\n\n at2Id = \"%d.%s.%s\" % (r2.id.position,r2.id.chainId,at2.name)\n\n if at2Id not in atids :\n #print \" - atom %s not in m1\" % (at2Id)\n continue\n\n at1 = atids[at2Id]\n\n #atPos = m2.openState.xform.inverse().apply ( at1.xformCoord() )\n v = at2.xformCoord() - at1.xformCoord()\n\n v2 = v.length * v.length\n\n sums[\"All\"] += v2; N[\"All\"] += 1.0\n if at2.isBB :\n sums[\"BB\"] += v2; N[\"BB\"] += 1.0\n else :\n sums[\"SC\"] += v2; N[\"SC\"] += 1.0\n\n\n for k in sums.keys() :\n #print \"%s\\t%.3f\\t%.3f\" % (k, sums[k], N[k] )\n print \"%s\\t%.3f\" % (k, numpy.sqrt(sums[k] / N[k]) )\n print \"\"\n\n\n\n def Rotas ( self, res ) :\n\n\n ctrRes = chimera.Vector(0,0,0)\n for at in res.atoms :\n ctrRes += at.coord().toVector()\n\n ctrRes = ctrRes / float ( len(res.atoms) )\n ctrRes = chimera.Point ( ctrRes[0], ctrRes[1], ctrRes[2] )\n #print \" - res %s %d.%s ctr \" % ( res.type, res.id.position, res.id.chainId ), ctrRes\n\n #print \" - in %s\" % self.cur_dmap.name\n\n treeAts, treeAtsAll = [], []\n #print \" - %d atoms in %s\" % ( len(res.molecule.atoms), res.molecule.name )\n for at in res.molecule.atoms :\n d = (at.coord() - ctrRes).length\n if d < 40.0 :\n treeAtsAll.append ( at )\n if at.residue != res :\n treeAts.append ( at )\n\n #print \" - %d atoms within 40 - %d all\" % ( len(treeAts), len(treeAtsAll) )\n\n points = _multiscale.get_atom_coordinates ( treeAts, transformed = False )\n atTree = AdaptiveTree ( points.tolist(), treeAts, 2.0)\n\n points = _multiscale.get_atom_coordinates ( treeAtsAll, transformed = False )\n atTreeAll = AdaptiveTree ( points.tolist(), treeAtsAll, 2.0)\n\n\n\n #print rmols\n\n rotas = []\n bbdep, rmols = getRotamers ( res, log=False )\n\n for ri, rmol in enumerate ( rmols ) :\n\n rotres = rmol.residues[0]\n rotres.rotamerProb = rmol.rotamerProb\n\n #print ri, rmol.rotamerProb\n\n to_ats = [ res.atomsMap['N'][0],res.atomsMap['CA'][0],res.atomsMap['CB'][0] ]\n rot_ats = [ rotres.atomsMap['N'][0],rotres.atomsMap['CA'][0],rotres.atomsMap['CB'][0] ]\n xf, rmsd = chimera.match.matchAtoms ( to_ats, rot_ats )\n\n\n clash = False\n rotres.clashes = False\n rotAts = []\n rotPos = []\n\n for ai, rat in enumerate ( rotres.atoms ) :\n\n atPos = xf.apply(rat.coord())\n rat.setCoord ( atPos )\n\n if rat.name != \"C\" and rat.name != \"N\" and rat.name != \"CA\" and rat.name != \"O\" :\n\n rotAts.append ( rat )\n rotPos.append ( atPos )\n nearAts = self.AtsWithinPt ( atPos.data(), 2.0, atTree )\n if len(nearAts) > 0 :\n rotres.clashes = True\n #for d, a in nearAts :\n # print \" - at %s - %.2f - at %s in %d.%s\" % (rat.name, d, a.name, a.residue.id.position, a.residue.id.chainId )\n # break\n break\n\n\n if rotres.clashes :\n continue\n\n #dvals = dmap.interpolated_values ( apos, r.molecule.openState.xform )\n\n if self.cur_dmap == None :\n umsg ( \"No map selected\" )\n return\n\n #rotres.CC, ccm = ccAts ( rotAts, self.cur_dmap, resolution=3.0, mol=res.molecule )\n #rotres.AvgD = avgdAts ( rotAts, self.cur_dmap, mol=res.molecule )\n\n mol = res.molecule\n dmap = self.cur_dmap\n\n molg = MyMolMapX2 ( rotAts, 3.0, dmap.data.step[0], chimera.Xform.identity() )\n fpoints, fpoint_weights = fit_points_g ( molg, 1e-2 )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n #print map_values\n olap, rotres.CC, bbCCm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n\n\n dvals = self.cur_dmap.interpolated_values ( rotPos, res.molecule.openState.xform )\n #print dvals\n rotres.AvgD = numpy.average(dvals)\n\n avgQ = 0\n minD, maxD = qscores.MinMaxD ( dmap )\n #print \"%d | \" % ri,\n for at in rotAts :\n Qs = qscores.Qscore ( [at], dmap, 0.6, allAtTree=atTreeAll, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, mol=mol )\n #print \"%s:%.3f \" % (at.name, ccm),\n avgQ += Qs\n\n\n rotres.Q = avgQ / float(len(rotAts))\n #print \" | %.3f\" % rotres.Q\n\n\n\n rotas.append ( rotres )\n #break\n\n return rotas\n\n\n\n def Rotas_ ( self, res ) :\n\n rotas = []\n\n bbdep, rmols = getRotamers ( res, log=False )\n\n #print rmols\n\n for ri, rmol in enumerate ( rmols ) :\n\n rotres = rmol.residues[0]\n rotres.rotamerProb = rmol.rotamerProb\n\n #print ri, rmol.rotamerProb\n\n to_ats = [ res.atomsMap['N'][0],res.atomsMap['CA'][0],res.atomsMap['CB'][0] ]\n rot_ats = [ rotres.atomsMap['N'][0],rotres.atomsMap['CA'][0],rotres.atomsMap['CB'][0] ]\n xf, rmsd = chimera.match.matchAtoms ( to_ats, rot_ats )\n\n for ai, rat in enumerate ( rotres.atoms ) :\n #if rat.name == \"C\" or rat.name == \"N\" or rat.name == \"CA\" or rat.name == \"O\" :\n # continue\n rat.setCoord ( xf.apply(rat.coord()) )\n\n rotas.append ( rotres )\n\n return rotas\n\n\n def ApplyRota ( self, res, rota ) :\n for at in res.atoms :\n if at.name == \"C\" or at.name == \"N\" or at.name == \"CA\" or at.name == \"O\" :\n continue\n\n rotaAt = rota.atomsMap[at.name][0]\n at.setCoord ( rotaAt.coord() )\n\n\n def HohRota ( self ) :\n\n res = chimera.selection.currentResidues()[0]\n print \"Res %d.%s %s\" % (res.id.position, res.id.chainId, res.type)\n\n rotas = self.Rotas ( res )\n\n print \"#\\tProb\\tCC\\tAvg.D.\\tQ\"\n\n\n #rotas.sort ( reverse=True, key=lambda r: r.CC )\n rotas.sort ( reverse=True, key=lambda r: r.Q )\n\n\n for ri, r in enumerate ( rotas ) :\n #print \" - %d, prob %.5f, cc \" % (ri, r.rotamerProb),\n print \"%d\\t%f\\t%f\\t%f\\t%f\" % (ri+1, r.rotamerProb, r.CC, r.AvgD, r.Q),\n\n if r.clashes :\n print \"--x--\"\n else :\n print \"\"\n\n\n if len(rotas) > 0 :\n #ri = int ( numpy.floor ( ( random.random() * len(rotas) ) ) )\n print \" - applying %d/%d\" % (1, len(rotas))\n self.ApplyRota ( res, rotas[0] )\n\n self.rotas = rotas\n self.rotaAt = 0\n self.rotaRes = res\n\n\n def HohRotaL ( self ) :\n\n if not hasattr ( self, 'rotas' ) :\n return\n\n self.rotaAt = max ( self.rotaAt - 1, 0 )\n rota = self.rotas[self.rotaAt]\n print \" - applying rota %d/%d - prob %f, cc %f\" % (self.rotaAt+1, len(self.rotas), rota.rotamerProb, rota.CC)\n self.ApplyRota ( self.rotaRes, rota )\n\n\n def HohRotaR ( self ) :\n\n if not hasattr ( self, 'rotas' ) :\n return\n\n self.rotaAt = min ( self.rotaAt+1, len(self.rotas)-1 )\n rota = self.rotas[self.rotaAt]\n print \" - applying rota %d/%d - prob %f, cc %f\" % (self.rotaAt+1, len(self.rotas), rota.rotamerProb, rota.CC)\n self.ApplyRota ( self.rotaRes, rota )\n\n\n\n\n\n\n def ResMap ( self ) :\n\n\n print \" - resmap - \"\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - scale map: %s\" % dmap.name\n\n rmap = None\n for m in chimera.openModels.list() :\n if \"resmap\" in m.name :\n rmap = m\n\n print \"mol:\", self.cur_mol.name\n print \"resmap:\", rmap.name\n\n\n #points = _multiscale.get_atom_coordinates ( mol.atoms, transformed = False )\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n mapName = os.path.splitext(rmap.name)[0]\n\n nname = molPath + \"__R__\" + mapName + \".txt\"\n print \" - q vs resmap:\", nname\n\n fp = open ( nname, \"w\" )\n\n\n for at in mol.atoms :\n res = rmap.interpolated_values ( [at.coord().data()], mol.openState.xform )\n fp.write ( \"%f\\t%f\\n\" % (at.Q, res) )\n\n\n fp.close()\n print \" - done\"\n\n\n\n\n\n\n def BB_Sigma (self) :\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n a = selAts[0]\n r = a.residue\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)\n\n if 1 or not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n chimera.openModels.remove ( removeMods )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(r.molecule.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n #allAtTree = None\n #print \"-\"\n\n import time\n start = time.time()\n\n sigma = RadAts ( r.bbAtoms, dmap, allAtTree=allAtTree, show=0, log=1, numPts=10, toRAD=2, dRAD=0.25 )\n\n end = time.time()\n\n print \"%s - rad: %.3f, time: %f\" % ( a.name, sigma, (end - start) )\n\n\n\n\n def ZScoreSel (self) :\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n a = selAts[0]\n r = a.residue\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)\n\n if not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n if \"SC \" in m.name :\n removeMods.append ( m )\n chimera.openModels.remove ( removeMods )\n\n\n scZ, cc = zRotSideChain ( r.molecule, r, 3.0, dmap, show=True )\n print \"- scZ %.3f, cc %.3f\" % (scZ, cc)\n #print \"%f\\t%f\\t%f\" % (r.sigma,scZ,cc)\n\n\n\n\n def RotaZ1 (self) :\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n a = selAts[0]\n r = a.residue\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)\n\n if not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n if \"SC \" in m.name :\n removeMods.append ( m )\n chimera.openModels.remove ( removeMods )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(r.molecule.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n\n rZ = RadZ ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=1, numPts=10, toRAD=2 )\n\n\n #scZ, cc = zRotSideChain ( r.molecule, r, 3.0, dmap, show=True )\n print \"- radZ %.3f \" % (rZ)\n #print \"%f\\t%f\\t%f\" % (r.sigma,scZ,cc)\n\n\n\n\n def R1 (self) :\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = self.cur_dmap\n\n\n a = selAts[0]\n r = a.residue\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)\n\n if not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n if \"SC \" in m.name :\n removeMods.append ( m )\n chimera.openModels.remove ( removeMods )\n\n\n\n ress = []\n bbAtoms = []\n allAtoms = []\n for r in a.molecule.residues :\n if r.id.chainId == a.residue.id.chainId :\n ress.append ( r )\n bbAtoms.extend ( r.bbAtoms )\n allAtoms.extend ( r.atoms )\n\n avgD = avgdAts ( allAtoms, dmap )\n bbAvgD = avgdAts ( bbAtoms, dmap )\n print \" - avgd - all: %f, bb: %f\" % (avgD, bbAvgD)\n\n r = a.residue\n if len(r.scAtoms) > 0 :\n scAvgD = avgdAts ( r.scAtoms, dmap )\n r.SCBBr = scAvgD / bbAvgD\n print \" - residue %s.%d, %d side chain atoms, avgd: %.5f, r: %.5f\" % ( r.type, r.id.position, len(r.scAtoms), scAvgD, r.SCBBr/bbAvgD )\n else :\n r.SCBBr = None\n print \" - residue %s.%d - no side chain atoms\" % ( r.type, r.id.position )\n\n\n\n\n\n\n\n def AlignRes1 ( self ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n chainId = self.chain.get()\n if len(chainId) == 0 :\n umsg (\"Select a chain first\")\n return\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n\n #SetBBAts ( self.cur_mol )\n last_x = 0.0\n last_y = 0.0\n\n r0, exR0, xtR0 = None, None, None\n\n alAts = []\n if self.exType == \"ASP\" : alAts = [\"CG\",\"OD1\",\"OD2\"]\n if self.exType == \"LEU\" : alAts = [\"CG\",\"CD1\",\"CD2\"]\n if self.exType == \"GLU\" : alAts = [\"CD\",\"OE1\",\"OE2\"]\n if self.exType == \"TYR\" : alAts = [\"OH\",\"CE1\",\"CE2\",\"CD1\",\"CD2\",\"CG\",\"CB\"]\n\n\n for r in self.cur_mol.residues :\n if r.id.chainId == chainId and r.type == self.exType :\n print \" - res %s %d\" % (r.type, r.id.position)\n\n if r0 == None :\n r0 = r\n\n r.exMaps[0].display = True\n r.exMaps[1].display = False\n\n #r.xtMaps[0].display = False\n #r.xtMaps[1].display = False\n\n for at in r.atoms :\n at.display = at.name in alAts\n\n else :\n\n exR0 = r0.exMol.residues[0]\n exR = r.exMol.residues[0]\n ats0, ats = [], []\n for atName in alAts :\n ats0.append ( exR0.atomsMap[atName][0] )\n ats.append ( exR.atomsMap[atName][0] )\n\n for at in r.atoms :\n at.display = at.name in alAts\n\n #aCG0, aOD10, aOD20 = exR0.atomsMap['CG'][0], exR0.atomsMap['OD1'][0], exR0.atomsMap['OD2'][0],\n #aCG, aOD1, aOD2 = exR.atomsMap['CG'][0], exR.atomsMap['OD1'][0], exR.atomsMap['OD2'][0],\n\n #xf, rmsd = chimera.match.matchPositions ( pts_o, pts_c )\n #xf, rmsd = chimera.match.matchAtoms ( [aCG0, aOD10, aOD20], [aCG, aOD1, aOD2] )\n xf, rmsd = chimera.match.matchAtoms ( ats0, ats )\n print \" - rmsd: \", rmsd\n\n #from _multiscale import get_atom_coordinates\n #points = get_atom_coordinates ( atoms, transformed = True )\n\n #exR.xf0 = r.exMol.openState.xform\n\n mxf = r0.exMol.openState.xform\n mxf.multiply ( xf )\n r.exMol.openState.xform = mxf\n r.exMaps[0].openState.xform = mxf\n r.exMaps[1].openState.xform = mxf\n r.exMaps[0].display = True\n r.exMaps[1].display = False\n\n #r.xtMaps[0].display = False\n #r.xtMaps[1].display = False\n\n\n #break\n\n\n def AlignRes2 ( self ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n chainId = self.chain.get()\n if len(chainId) == 0 :\n umsg (\"Select a chain first\")\n return\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n\n #SetBBAts ( self.cur_mol )\n last_x = 0.0\n last_y = 0.0\n\n r0, exR0, xtR0 = None, None, None\n\n\n for r in self.cur_mol.residues :\n if r.id.chainId == chainId and r.type == \"ASP\" :\n print \" - res %s %d\" % (r.type, r.id.position)\n\n if r0 == None :\n r0 = r\n\n r.exMaps[0].display = False\n r.exMaps[1].display = False\n\n r.xtMaps[0].display = True\n r.xtMaps[1].display = False\n\n\n else :\n\n r.exMaps[0].display = False\n r.exMaps[1].display = False\n\n exR0 = r0.xtMol.residues[0]\n aCB0, aCG0, aOD10, aOD20 = exR0.atomsMap['CB'][0], exR0.atomsMap['CG'][0], exR0.atomsMap['OD1'][0], exR0.atomsMap['OD2'][0],\n\n exR = r.xtMol.residues[0]\n aCB, aCG, aOD1, aOD2 = exR.atomsMap['CB'][0], exR.atomsMap['CG'][0], exR.atomsMap['OD1'][0], exR.atomsMap['OD2'][0],\n\n #xf, rmsd = chimera.match.matchPositions ( pts_o, pts_c )\n xf, rmsd = chimera.match.matchAtoms ( [aCB0, aCG0, aOD10, aOD20], [aCB, aCG, aOD1, aOD2] )\n print \" - rmsd: \", rmsd\n\n #from _multiscale import get_atom_coordinates\n #points = get_atom_coordinates ( atoms, transformed = True )\n\n #exR.xf0 = r.exMol.openState.xform\n\n mxf = r0.xtMol.openState.xform\n mxf.multiply ( xf )\n r.xtMol.openState.xform = mxf\n r.xtMaps[0].openState.xform = mxf\n r.xtMaps[1].openState.xform = mxf\n r.xtMaps[0].display = True\n r.xtMaps[1].display = False\n\n\n #break\n\n\n\n def Avg ( self ) :\n\n print \" -- finding base map --- \"\n largestMap = None\n maxD = 0\n for m in OML(modelTypes = [VolumeViewer.volume.Volume]) :\n if m.display == True :\n d = numpy.sum ( m.data.size )\n if d > maxD :\n maxD = d\n largestMap = m\n\n print \" - largest map: \", largestMap.name\n dmap = largestMap\n dmap.display = False\n\n\n fmap = None\n avgMat = dmap.data.full_matrix()\n N = 0.0\n\n print \" ----------- Averaging... ---------------------\"\n\n for m in OML(modelTypes = [VolumeViewer.volume.Volume]) :\n if m.display == True and m != dmap :\n print m.name\n\n df_mat = self.Map2Map ( m, dmap )\n m.display = False\n N = N + 1.0\n avgMat = avgMat + df_mat\n\n\n print \" ----------- n=%f ---------------------\" % N\n\n avgMat = avgMat / N\n df_data = VolumeData.Array_Grid_Data ( avgMat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name=\"avg\" )\n\n MapFromData ( df_data, \"Avg\", dmap, False )\n MapFromData ( df_data, \"Avg\", dmap, True )\n\n\n #df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n #df_v.name = \"Avg\"\n #df_v.openState.xform = dmap.openState.xform\n\n #nv = self.ShrinkMap ( df_v, 1e-3 )\n\n\n\n def Map2Map ( self, densitiesFromMap, toGridOfMap, mask = False ) :\n\n fmap = toGridOfMap\n dmap = densitiesFromMap\n\n import _contour\n n1, n2, n3 = fmap.data.size[0], fmap.data.size[1], fmap.data.size[2]\n f_points = VolumeData.grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices\n _contour.affine_transform_vertices( f_points, fmap.data.ijk_to_xyz_transform )\n\n d_vals = dmap.interpolated_values ( f_points, fmap.openState.xform )\n df_mat = d_vals.reshape( (n3,n2,n1) )\n\n if mask :\n f_mat = fmap.data.full_matrix()\n f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n df_mat = df_mat * f_mask\n\n return df_mat\n\n\n\n\n def CloseExtracted ( self ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n chainId = self.chain.get()\n if len(chainId) == 0 :\n umsg (\"Select a chain first\")\n return\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n\n for r in self.cur_mol.residues :\n\n if hasattr ( r, \"exMaps\" ) :\n chimera.openModels.close ( r.exMaps ); del r.exMaps\n\n if hasattr ( r, \"xtMaps\" ) :\n chimera.openModels.close ( r.xtMaps ); del r.xtMaps\n\n if hasattr ( r, \"exMol\" ) :\n chimera.openModels.close ( [r.exMol] ); del r.exMol\n\n if hasattr ( r, \"xtMol\" ) :\n chimera.openModels.close ( [r.xtMol] ); del r.xtMol\n\n for m in chimera.openModels.list() :\n if m.name == \"Avg\" or m.name == \"Avg_mesh\" :\n chimera.openModels.close ( [m] )\n\n\n\n\n\n def Extract ( self ) :\n\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return\n\n chainId = self.chain.get()\n if len(chainId) == 0 :\n umsg (\"Select a chain first\")\n return\n\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return\n\n\n #SetBBAts ( self.cur_mol )\n last_x = 0.0\n last_y = 0.0\n\n\n print \"Extracting - %s - %s - %s\" % (self.cur_dmap.name, self.cur_mol.name, chainId)\n\n #self.exType = \"TYR\"\n #self.exType = \"GLU\"\n #self.exType = \"ASP\"\n self.exType = \"LEU\"\n\n yzAts = { \"ASP\" : [\"CB\",\"CG\",\"OD1\"],\n \"GLU\" : [\"CG\",\"CD\",\"OE1\"],\n \"TYR\" : [\"CB\",\"CZ\",\"CD1\"],\n \"LEU\" : [\"CB\",\"CG\",\"CD1\"]\n }\n\n for r in self.cur_mol.residues :\n\n if r.id.chainId == chainId and r.type == self.exType :\n\n print \" - res %s %d\" % (r.type, r.id.position)\n\n self.ExtractRes ( r, self.cur_mol, self.cur_dmap, last_x, last_y, yzAts[self.exType] )\n\n #self.ExtendRes ( r, self.cur_mol, self.cur_dmap, last_x, -8.0, thrF=0.8 )\n\n last_x += 7.0\n\n #break\n\n\n\n def ExtractRes ( self, r, mol, dmap, atX, atY, xyAts ) :\n\n nmol, nres = CopyRess ( [r] )\n nmol.name = mol.name + \"_%s_%d\" % (r.type, r.id.position)\n chimera.openModels.add ( [nmol] )\n nmol.openState.xform = mol.openState.xform\n\n for at in nmol.atoms :\n #at.drawMode = 3\n if at.element.name.upper() in atomColors : at.color = atomColors[at.element.name.upper()]\n #at.radius = at.radius * 0.8\n\n mname = dmap.name + \"_%s_%d\" % (r.type, r.id.position)\n\n #aCB, aCG, aOD1 = r.atomsMap['CB'][0], r.atomsMap['CG'][0], r.atomsMap['OD1'][0]\n aCB, aCG, aOD1 = r.atomsMap[xyAts[0]][0], r.atomsMap[xyAts[1]][0], r.atomsMap[xyAts[2]][0]\n\n dmap, mmap = ExtractDen ( r.atoms, dmap, mname, boundRad=2.0, showMesh=True )\n r.exMol = nmol\n r.exMaps = [dmap, mmap]\n\n X = aOD1.coord() - aCB.coord(); X.normalize()\n Y = aCG.coord() - aCB.coord(); Y.normalize()\n Z = chimera.cross ( X, Y ); Z.normalize()\n X = chimera.cross ( Y, Z ); Y.normalize()\n\n xf = chimera.Xform.coordFrame ( X, Y, Z, aCB.coord(), True ).inverse()\n xf.premultiply ( chimera.Xform.translation(atX, atY, 0) )\n\n nmol.openState.xform = xf\n dmap.openState.xform = xf\n if mmap : mmap.openState.xform = xf\n\n\n def ExtendRes ( self, r, mol, dmap, atX, atY, thrF=0.75 ) :\n\n nmol, nres = CopyRess ( [r] )\n nmol.name = mol.name + \"_%s_%d_ext\" % (r.type, r.id.position)\n chimera.openModels.add ( [nmol] )\n nmol.openState.xform = mol.openState.xform\n\n for at in nmol.atoms :\n at.drawMode = 3\n if at.element.name.upper() in atomColors : at.color = atomColors[at.element.name.upper()]\n at.radius = at.radius * 0.8\n\n mname = dmap.name + \"_%s_%d_ext\" % (r.type, r.id.position)\n\n\n R = nres[0]\n R.O, R.N, R.C, R.CA = R.atomsMap[\"O\"][0], R.atomsMap[\"N\"][0], R.atomsMap[\"C\"][0], R.atomsMap[\"CA\"][0]\n R.CB, R.CG, R.OD1, R.OD2 = R.atomsMap[\"CB\"][0], R.atomsMap[\"CG\"][0], R.atomsMap[\"OD1\"][0], R.atomsMap[\"OD2\"][0]\n\n bones = []\n bones.append ( Bone(R.CA, R.N, R.CB) )\n bones.append ( Bone(R.CA, R.C, R.CB) )\n bones.append ( Bone(R.C, R.O, R.CA) )\n\n bones.append ( Bone(R.CA, R.CB, R.N) )\n bones.append ( Bone(R.CG, R.CB, R.OD1) )\n bones.append ( Bone(R.CG, R.OD1, R.OD2) )\n bones.append ( Bone(R.CG, R.OD2, R.OD1) )\n\n for bi, bo in enumerate ( bones ) :\n if GetMod ( \"bone_%d.mrc\" % bi ) != None : chimera.openModels.close ( \"bone_%d.mrc\" % bi )\n if GetMod ( \"bone_%d.mrc_mesh\" % bi ) != None : chimera.openModels.close ( \"bone_%d.mrc_mesh\" % bi )\n bo.dmap = BoneMap ( bo, dmap, 1.0, \"bone_%d.mrc\" % bi, show = False, showMesh=True )\n\n v1 = R.CB.coord() - R.CA.coord(); v1.normalize()\n v2 = R.CB.coord() - R.CG.coord(); v2.normalize()\n ang = numpy.arccos ( v1*v2 ) * 180.0/numpy.pi\n ax = chimera.cross ( v1, v2 ); ax.normalize()\n\n print \"CB-CG: %.2f\" % (-ang + 180)\n\n T = chimera.Xform.translation ( R.CB.coord().toVector() )\n T.multiply ( chimera.Xform.rotation ( ax, -ang + 180 ) )\n T.multiply ( chimera.Xform.translation ( R.CB.coord().toVector()*-1.0 ) )\n\n for an in [\"CG\", \"OD1\", \"OD2\"] :\n at = R.atomsMap[an][0]\n at.setCoord ( T.apply (at.coord()) )\n\n #MoldMap2 ( bones, rmaps[0], rmaps[1] )\n\n d1 = diha ( R.N, R.CB, R.CG, R.OD1 )\n d2 = diha ( R.N, R.CB, R.CG, R.OD2 )\n ang = d1 if numpy.abs(d1) < numpy.abs(d2) else d2\n print \"CG dihedral - \", d1, d2, \" -> \", ang\n ax = R.CG.coord() - R.CB.coord(); ax.normalize()\n\n T = chimera.Xform.translation ( R.CG.coord().toVector() )\n T.multiply ( chimera.Xform.rotation ( ax, -ang ) )\n T.multiply ( chimera.Xform.translation ( R.CG.coord().toVector()*-1.0 ) )\n\n for an in [\"OD1\", \"OD2\"] :\n at = R.atomsMap[an][0]\n at.setCoord ( T.apply (at.coord()) )\n\n dmap, dmesh = MapForAtoms ( R.atoms, dmap, mname, showMesh=True, thrF=thrF )\n MoldMap2 ( bones, dmap, dmesh )\n r.xtMol = nmol\n r.xtMaps = [dmap, dmesh]\n\n\n X = R.OD1.coord() - R.CB.coord(); X.normalize()\n Y = R.CG.coord() - R.CB.coord(); Y.normalize()\n Z = chimera.cross ( X, Y ); Z.normalize()\n X = chimera.cross ( Y, Z ); Y.normalize()\n\n xf = chimera.Xform.coordFrame ( X, Y, Z, R.CB.coord(), True ).inverse()\n xf.premultiply ( chimera.Xform.translation(atX, atY, 0) )\n\n nmol.openState.xform = xf\n dmap.openState.xform = xf\n if dmesh : dmesh.openState.xform = xf\n\n\n\n\n def asp ( self ) :\n\n N = 1\n\n framei = 0\n mpath = \"/Users/greg/Desktop/frames\"\n for f in os.listdir ( mpath ) :\n if f.endswith(\".png\") :\n os.remove( mpath + \"/\" + f )\n\n dmap, mol = VisMapMod()\n resolution = 3.0 * dmap.data.step[0]\n\n print \"Map: %s, mol: %s\" % (dmap.name, mol.name)\n res = chimera.selection.currentResidues()[0]\n print \" - res: %s %d.%s\" % (res.type, res.id.position, res.id.chainId)\n z = None\n\n nname = \"%s_%d\" % ( res.type, res.id.position )\n\n #for na in [\"ASP\",\"molded.mrc\",\"skinned.mrc\"] :\n # m = GetMod ( na )\n # if m != None :\n # chimera.openModels.close ( [m] )\n\n\n nmol = GetMod ( nname + \".pdb\" )\n if nmol == None :\n nmol, nres = CopyRess ( [res] )\n nmol.name = nname + \".pdb\"\n chimera.openModels.add ( [nmol] )\n nmol.openState.xform = mol.openState.xform\n\n xf = nmol.openState.xform\n #xf.multiply ( chimera.Xform.translation ( 0,0,5 ) )\n nmol.openState.xform = xf\n\n for at in nmol.atoms:\n at.drawMode = 3\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n at.radius = at.radius * 0.8\n\n nres = nmol.residues\n R = nres[0]\n\n R.O = R.atomsMap[\"O\"][0]\n R.N = R.atomsMap[\"N\"][0]\n R.C = R.atomsMap[\"C\"][0]\n R.CA = R.atomsMap[\"CA\"][0]\n R.CB = R.atomsMap[\"CB\"][0]\n R.CG = R.atomsMap[\"CG\"][0]\n R.OD1 = R.atomsMap[\"OD1\"][0]\n R.OD2 = R.atomsMap[\"OD2\"][0]\n\n\n bones = []\n bones.append ( Bone(R.CA, R.N, R.CB) )\n bones.append ( Bone(R.CA, R.C, R.CB) )\n bones.append ( Bone(R.C, R.O, R.CA) )\n\n bones.append ( Bone(R.CA, R.CB, R.N) )\n bones.append ( Bone(R.CG, R.CB, R.OD1) )\n bones.append ( Bone(R.CG, R.OD1, R.OD2) )\n bones.append ( Bone(R.CG, R.OD2, R.OD1) )\n\n for bi, bo in enumerate ( bones ) :\n if GetMod ( \"bone_%d.mrc\" % bi ) != None : chimera.openModels.close ( \"bone_%d.mrc\" % bi )\n if GetMod ( \"bone_%d.mrc_mesh\" % bi ) != None : chimera.openModels.close ( \"bone_%d.mrc_mesh\" % bi )\n bo.dmap = BoneMap ( bo, dmap, 1.0, \"bone_%d.mrc\" % bi, show = False, showMesh=True )\n\n\n v1 = R.CB.coord() - R.CA.coord(); v1.normalize()\n v2 = R.CB.coord() - R.CG.coord(); v2.normalize()\n ang = numpy.arccos ( v1*v2 ) * 180.0/numpy.pi\n print ang\n ax = chimera.cross ( v1, v2 ); ax.normalize()\n\n dmap.display = False\n mol.display = False\n\n NB = 2\n #N = 90\n toAng = -ang + 180\n dAng = toAng / float(N)\n\n print \"CB-CG: %.2f/%.2f deg\" % (toAng, dAng)\n\n rmaps = None\n\n for i in range ( N ) :\n\n print i,\n\n T = chimera.Xform.translation ( R.CB.coord().toVector() )\n #T.multiply ( chimera.Xform.rotation ( ax, -ang + 180 ) )\n T.multiply ( chimera.Xform.rotation ( ax, dAng ) )\n T.multiply ( chimera.Xform.translation ( R.CB.coord().toVector()*-1.0 ) )\n\n for an in [\"CG\", \"OD1\", \"OD2\"] :\n at = R.atomsMap[an][0]\n at.setCoord ( T.apply (at.coord()) )\n\n #SkinMap ( R.atoms, bones, NB, dmap, 2.0, \"skinned.mrc\", True)\n #MoldMap ( R.atoms, bones, dmap, \"molded.mrc\", showMesh=True )\n\n if rmaps == None :\n rmaps = MapForAtoms ( R.atoms, dmap, nname+\".mrc\", showMesh=True )\n # for m in rmaps :\n # if m != None :\n # m.openState.xform = nmol.openState.xform\n\n MoldMap2 ( bones, rmaps[0], rmaps[1] )\n\n\n if N > 1 :\n chimera.viewer.postRedisplay()\n self.toplevel_widget.update_idletasks ()\n chimera.printer.saveImage ( mpath + \"/%06d.png\" % framei )\n framei += 1\n\n print \"\"\n\n if 1 :\n\n d1 = diha ( R.N, R.CB, R.CG, R.OD1 )\n d2 = diha ( R.N, R.CB, R.CG, R.OD2 )\n ang = d1 if numpy.abs(d1) < numpy.abs(d2) else d2\n print \"CG dihedral - \", d1, d2, \" -> \", ang\n ax = R.CG.coord() - R.CB.coord(); ax.normalize()\n\n toAng = -ang\n dAng = toAng / float( max(N/2,1) )\n print \"CG dihedral -- %.2f/%.2f deg\" % (toAng, dAng)\n\n for i in range ( max(N/2,1) ) :\n\n print i,\n\n T = chimera.Xform.translation ( R.CG.coord().toVector() )\n T.multiply ( chimera.Xform.rotation ( ax, dAng ) )\n T.multiply ( chimera.Xform.translation ( R.CG.coord().toVector()*-1.0 ) )\n\n for an in [\"OD1\", \"OD2\"] :\n at = R.atomsMap[an][0]\n at.setCoord ( T.apply (at.coord()) )\n\n #print \"%d bones\" % len(bones)\n #PtsToMapSkinD ( R.atoms, bones, NB, dmap, 2.0, \"skinned.mrc\", True)\n #MoldMap ( R.atoms, bones, dmap, \"molded.mrc\", showMesh=True )\n MoldMap2 ( bones, rmaps[0], rmaps[1] )\n\n if N > 1 :\n chimera.viewer.postRedisplay()\n self.toplevel_widget.update_idletasks ()\n chimera.printer.saveImage ( mpath + \"/%06d.png\" % framei )\n framei += 1\n\n\n\n\n if N > 1 :\n args = [ \"/Users/greg/_mol/Chimera.app/Contents/Resources/bin/ffmpeg\", \"-r\", \"30\",\n \"-i\", mpath + \"/%06d.png\", \"-y\", \"-qscale\", \"1\", \"-b\", \"9000\", \"-vcodec\", \"mpeg4\", # mpeg4 libx264\n \"-f\", \"mov\", mpath+\"/__ares.mov\" ]\n\n print \"- running: \"\n for a in args : print a,\n print \"\"\n\n import subprocess\n subprocess.call ( args )\n print \"done!\\n\"\n\n\n\n\n\n\n\ndef CalcSCBBr ( mol, cid, dmap ) :\n\n print \"Calculating sc-bb ratios...\"\n\n ress = []\n bbAtoms = []\n allAtoms = []\n scAtoms = []\n for r in mol.residues :\n if cid == None or r.id.chainId == cid :\n ress.append ( r )\n bbAtoms.extend ( r.bbAtoms )\n allAtoms.extend ( r.atoms )\n scAtoms.extend ( r.scAtoms )\n\n bbAvgD, scAvgD = avgdAts ( bbAtoms, dmap ), avgdAts ( scAtoms, dmap )\n print \" - avgd - bb: %.3f, sc: %.3f\" % (bbAvgD, scAvgD)\n\n bbCC, bbCCm = ccAts ( bbAtoms, dmap, 2.0)\n print \" - all bb cc: %.3f, ccm: %.3f\" % (bbCC, bbCCm)\n\n cc, ccm = ccAts ( allAtoms, dmap, 2.0)\n print \" - all cc: %.3f, ccm: %.3f\" % (cc, ccm)\n\n\n dr, ccr, ccmr = [], [], []\n for r in ress :\n if len(r.scAtoms) > 0 :\n scAvgD = avgdAts ( r.scAtoms, dmap )\n #rbbAvgD = avgdAts ( r.bbAtoms, dmap )\n r.SCBBr = scAvgD / bbAvgD\n dr.append ( scAvgD / bbAvgD )\n\n scCC, scCCm = ccAts ( r.scAtoms, dmap, 2.0)\n ccr.append ( scCC/bbCC )\n ccmr.append ( scCCm/bbCCm )\n\n r.SCBBr = scCCm\n\n else :\n r.SCBBr = None\n\n print \" - avg-r d:%.3f, cc:%.3f, ccm: %.3f\" % ( numpy.average ( dr ), numpy.average ( ccr ), numpy.average ( ccmr ) )\n\n return cc, ccm, numpy.average ( dr ), numpy.average ( ccr ), numpy.average ( ccmr )\n\n\ndef ccAts ( atoms, dmap, resolution=3.0, mol=None ) :\n\n if mol == None :\n mol = atoms[0].molecule\n\n molg = MyMolMapX ( mol, atoms, resolution, dmap.data.step[0], chimera.Xform.identity() )\n fpoints, fpoint_weights = fit_points_g ( molg, 1e-3 )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n olap, bbCC, bbCCm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n return bbCC, bbCCm\n\n\ndef avgdAts ( atoms, dmap, mol=None ) :\n\n if mol == None :\n mol = atoms[0].molecule\n\n if len(atoms) < 1 :\n #print \" - no atoms\" % len(atoms)\n return 0\n\n from _multiscale import get_atom_coordinates\n apos = get_atom_coordinates(atoms, transformed = False)\n dvals = dmap.interpolated_values ( apos, mol.openState.xform )\n #print dvals\n return numpy.average(dvals)\n\n\n\n\n\ndef RadZ ( atoms, dmap, allAtTree = None, show=0, log=0, numPts=10, toRAD=2.0 ) :\n\n if len(atoms) == 0 :\n #print \" - no RAD atoms?\"\n return None\n\n #pts = []\n #for at in atoms :\n # p = at.coord()\n # pts.append ( [p[0], p[1], p[2]] )\n\n from _multiscale import get_atom_coordinates\n pts = get_atom_coordinates(atoms, transformed = False)\n\n d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )\n avg0 = numpy.average ( d_vals )\n\n\n #dRAD, toRAD, RAD = 0.2, 1.8, 0.1\n RAD = toRAD\n zscore = None\n\n outRad = RAD*0.9\n #outRad2 = outRad * outRad\n pts = []\n for at in atoms :\n npts = (numPts * RAD*RAD / (dRAD*dRAD)) if show else numPts\n npts = int ( npts )\n #print RAD, dRAD, numPts, \" -> \", npts\n outPts = SpherePts ( at.coord(), RAD, npts )\n for pt in outPts :\n if allAtTree != None :\n vPt = numpy.array ( [pt[0], pt[1], pt[2]] )\n opointsNear = allAtTree.searchTree ( [pt[0], pt[1], pt[2]], outRad )\n if len(opointsNear) > 0 :\n if 0 :\n clash = False\n for p in opointsNear :\n v = vPt - p.coord().data()\n sqSum = numpy.sum ( v * v )\n if sqSum < outRad2 :\n clash = True\n break\n if clash == False :\n pts.append ( [pt[0], pt[1], pt[2]] )\n else :\n pts.append ( [pt[0], pt[1], pt[2]] )\n else :\n pts.append ( [pt[0], pt[1], pt[2]] )\n\n if show :\n AddSpherePts ( pts, (.8,.2,.8,0.5), 0.1, \"RAD points %.1f\" % RAD )\n\n if len (pts) < 1 :\n if log :\n print \" - no points for RAD %.1f - %d.%s - \" % (RAD, atoms[0].residue.id.position, atoms[0].residue.type),\n print \"SC\" if atoms[0].isSC else \"BB\"\n\n else :\n d_vals = dmap.interpolated_values ( pts, atoms[0].molecule.openState.xform )\n avg = numpy.average ( d_vals )\n sdev = numpy.std ( d_vals )\n\n if sdev < 1e-4 : sdev = 1e-4\n zscore = (avg0 - avg) / sdev #(scores[0] - avg) / stdev\n #print \" - scores: avg %.4f, std %.4f, z-score %.4f\" % (avg, stdev, zscore )\n\n if log :\n print \" - q at rad %.2f, avg0 %.3f, avg %.3f, stdev %.4f, z %.3f, %d pts\" % (RAD, avg0, avg, sdev, zscore, len(pts))\n\n\n return zscore\n\n\n\n\n\n\ndef CurMolAndChain () :\n\n segModDialog = getdialog ()\n if segModDialog != None :\n\n if segModDialog.cur_mol == None :\n segModDialog.cur_mol = chimera.Molecule()\n segModDialog.cur_mol.name = \"Model\"\n #chimera.openModels.add ( [mol], noprefs = True )\n chimera.openModels.add ( [segModDialog.cur_mol] )\n segModDialog.struc.set ( segModDialog.cur_mol.name )\n\n try :\n segModDialog.cur_mol.openState.xform = chimera.openModels.list()[0].openState.xform\n except :\n pass\n\n chainId = segModDialog.chain.get()\n if len(chainId) == 0 :\n chainId = \"A\"\n segModDialog.chain.set ( chainId )\n\n return segModDialog.cur_mol, chainId\n\n return None, \"\"\n\n\ndef VisMapMod () :\n\n mol, map = None, None\n\n for m in OML(modelTypes = [chimera.Molecule]) :\n if m.display :\n mol = m\n\n for m in OML(modelTypes = [VolumeViewer.volume.Volume]) :\n if m.display :\n map = m\n\n return map, mol\n\n\n\ndef ZScoresVis ( ) :\n\n map, mol = VisMapMod()\n\n if mol != None and map != None :\n ZScores ( mol, map)\n else :\n print \"Did not find visible mol and map\"\n\n\n\ndef ZScores ( mol, map ) :\n\n resolution = 3.0 * map.data.step[0]\n print \"Mol: %s, Map: %s -- res %.1f\" % (mol.name, map.name, resolution)\n\n SetBBAts ( mol )\n\n\n cmap = {}\n for r in mol.residues :\n\n if r.id.chainId in cmap :\n cmap[r.id.chainId].append ( [r.id.position, r] )\n else :\n cmap[r.id.chainId] = [ [r.id.position, r] ]\n\n\n #ress = cmap['0']\n\n allBB, allSC = [], []\n\n for cid, ress in cmap.iteritems() :\n print \" - chain %s\" % cid\n\n ress.sort ()\n ares = [el[1] for el in ress]\n\n zscores = []\n if 0 :\n sses = SSEs ( ares )\n for el in sses :\n si, ei, ss, elRess = el\n zscore, ccs = zBB ( mol, elRess, resolution, map )\n #print ss, si, \"-\", ei, zscore\n if zscore != None :\n zscores.append ( zscore )\n for r in elRess :\n r.bbZ = zscore\n\n else :\n bbs = BBsegs ( self.seqRes )\n W = 3\n print \" - %d BB segments\" % len(bbs)\n for bb in bbs :\n print \" %d res, %d-%d\" % (len(bb),bb[0].id.position,bb[-1].id.position)\n\n for ri, r in enumerate ( bb ) :\n firstRi = max ( 0, ri-(W-1)/2 )\n lastRi = min ( len(bb)-1, ri+(W-1)/2 )\n ress = bb[firstRi:lastRi+1]\n zscore, ccs = zBB ( self.cur_mol, ress, resolution, map )\n if zscore != None :\n zscores.append ( zscore )\n\n avgBB = 0\n if len(zscores) > 0 :\n avgBB = numpy.average ( zscores )\n allBB.extend ( zscores )\n #print \" - BB - min %.2f max %.2f, avg %.2f\" % (min(zscores), max(zscores), avgBB )\n #else :\n # print \" - BB - no zscores?\"\n\n\n avgSC = 0\n zscores = CalcRotaZ ( map, mol, ares )\n if len(zscores) > 0 :\n avgSC = numpy.average(zscores)\n #print \" - SC - min %.2f max %.2f, avg %.2f\" % (min(zscores), max(zscores), numpy.average(zscores) )\n allSC.extend ( zscores )\n #else :\n # print \" - SC - no zscores?\"\n\n print \"Chain %s - %d res - avgBB %.2f, avgSC %.2f\" % ( cid, len(ares), avgBB, avgSC )\n\n\n print \"\"\n\n avgBB = 0\n if len(avgBB) > 0 :\n avgBB = numpy.average(allBB)\n print \"BB All - %d scores - min %.2f max %.2f, avg %.2f\" % (len(allBB), min(allBB), max(allBB), avgBB )\n else :\n print \"BB - no zscores?\"\n\n avgSC = 0\n if len(allSC) > 0 :\n avgSC = numpy.average(allSC)\n print \"SC All - %d scores - min %.2f max %.2f, avg %.2f\" % (len(allSC), min(allSC), max(allSC), avgSC )\n else :\n print \"SC - no zscores?\"\n\n print \"\"\n\n\n\n\n\ndef BBsegs ( ress ) :\n\n bbs = []\n\n firstRi, atRi = 0, 1\n for r in ress[1:] :\n if ress[atRi].id.position > ress[atRi-1].id.position + 1 or r.rtype == \"?\" :\n bbs.append ( ress[firstRi:atRi] )\n firstRi = atRi\n atRi += 1\n\n bbs.append ( ress[firstRi:atRi] )\n\n return bbs\n\n\n\n\ndef SSEs ( allRess ) :\n\n if len(allRess) < 1 :\n return []\n\n sses, ss = [], \"\"\n\n res, rStart = allRess[0], allRess[0]\n #print \" - at first res / pos: %d \" % res.id.position\n if res.isHelix :\n ss = \"H\"\n elif res.isSheet or res.isStrand :\n ss = \"E\"\n else :\n ss = \"_\"\n\n ress = [ res ]\n lastRes = rStart\n for res in allRess [1:] :\n\n if res.id.position > lastRes.id.position + 1 :\n print \" - gap at\", res.id.position\n sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )\n ress = []\n rStart = res\n if res.isHelix :\n ss = \"H\"\n elif res.isSheet or res.isStrand :\n ss = \"E\"\n else :\n ss = \"_\"\n\n if res.isHelix :\n if ss != \"H\" :\n #print \"%s -> H - at %d rid %d | %d->%d, %d res\" % (ss, i, res.id.position, rStart.id.position, lastRes.id.position, len(ress))\n sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )\n ress = []\n rStart = res\n ss = \"H\"\n elif res.isSheet or res.isStrand :\n if ss != \"E\" :\n #print \"%s -> E - at %d rid %d | %d->%d, %d res\" % (ss, i, res.id.position, rStart.id.position, lastRes.id.position, len(ress))\n sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )\n ress = []\n rStart = res\n ss = \"E\"\n else :\n if ss == \"H\" or ss == \"E\" :\n #print \"%s -> _ at %d rid %d | %d->%d, %d res\" % (ss, i, res.id.position, rStart.id.position, lastRes.id.position, len(ress))\n sses.append ( [rStart.id.position, lastRes.id.position, ss, ress] )\n ress = []\n rStart = res\n ss = \"_\"\n\n ress.append ( res )\n lastRes = res\n\n #print \"Done at rid %d - %s | %d->%d, %d res\" % ( res.id.position, ss, rStart.id.position, res.id.position, len(ress))\n sses.append ( [rStart.id.position, res.id.position, ss, ress] )\n return sses\n\n\n\n\ndef CalcRotaZ ( dmap, mol, ress ) :\n\n A = []\n resolution = 3.0 * dmap.data.step[0]\n\n for ri, res in enumerate ( ress ) :\n\n if 1 :\n if res.isProt :\n res.scZ, cc = zRotSideChain ( mol, res, resolution, dmap, show=False )\n elif res.isNA :\n res.scZ = zRotBase ( mol, res, resolution, dmap, show=False )\n else :\n print \"?_%d.%s_%s\" % (res.id.position, res.id.chainId, res.type)\n res.scZ = 0\n res.scQ = res.scZ\n\n else :\n res.scZ = zShakeSC ( mol, res, resolution, dmap, show=False )\n\n\n if res.scZ != None :\n A.append ( res.scZ )\n\n\n #avgA, stdA = numpy.average ( A ), numpy.std ( A )\n #umsg ( \"Avg side chain Z-score: %.3f\" % ( avgA ) )\n return A\n\n\n\ndef MoveSC () :\n\n map, mol = VisMapMod()\n resolution = 3.0 * map.data.step[0]\n\n print \"Map: %s, mol: %s\" % (map.name, mol.name)\n res = chimera.selection.currentResidues()[0]\n print \" - res: %s %d.%s\" % (res.type, res.id.position, res.id.chainId)\n z = None\n\n if 1 :\n if res.isProt :\n z, cc = zRotSideChain ( mol, res, resolution, map, True )\n elif res.isNA :\n z = zRotBase ( mol, res, resolution, map, True )\n\n else :\n z = zShakeSC ( mol, res, resolution, map, True )\n\n print z\n\n\ndef score3 (R) :\n\n selAts = chimera.selection.currentAtoms()\n if len ( selAts ) == 0 :\n return\n\n dmap = getdialog ().cur_dmap\n\n a = selAts[0]\n r = a.residue\n print \"Res: %s - %d.%s - %s - Atom: %s\" % (r.type, r.id.position, r.id.chainId, r.molecule.name, a.name)\n\n if not hasattr ( r.molecule, 'bbats' ) :\n SetBBAts(r.molecule)\n r.molecule.bbats = True\n\n removeMods = []\n for m in chimera.openModels.list() :\n if \"RAD points\" in m.name :\n removeMods.append ( m )\n chimera.openModels.remove ( removeMods )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(r.molecule.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n\n #allAtTree = None\n #print \"-\"\n\n import time\n start = time.time()\n\n #r.sdev = RadAts ( selAts, dmap, allAtTree=allAtTree, show=1, log=0, numPts=40, toRAD=2, dRAD=0.5 )\n r.sigma = RadAts ( r.scAtoms, dmap, allAtTree=allAtTree, show=0, log=1, numPts=20, toRAD=2, dRAD=0.5 )\n\n end = time. time()\n\n print \"%s - rad: %.3f, time: %f\" % ( a.name, r.sigma, (end - start) )\n\n scZ, cc = zRotSideChain ( r.molecule, r, R, dmap, show=False )\n print \" - cc %.3f, scZ %.3f \" % (cc, scZ)\n print \"%f\\t%f\\t%f\" % (r.sigma, cc, scZ)\n\n\n\n\ndef zShakeSC ( mol, res, resolution, dmap, show=False ) :\n\n atoms = res.scAtoms\n\n if len(atoms) < 1 :\n #print \" - no sc atoms\" % len(atoms)\n return None\n\n score0 = 0\n scores, scorest = [], []\n T = 1\n trange = [-T*1.0, 0.0, T*1.0]\n #trange = [-T*2.0, -T, 0.0, T, T*2.0]\n\n fout = None\n if show :\n fout = open (\"/Users/greg/Desktop/sc.txt\", \"w\")\n\n moved = False\n\n for xx in trange :\n for yy in trange :\n for zz in trange :\n\n v = chimera.Vector(xx,yy,zz)\n xfT = chimera.Xform.translation ( chimera.Vector(xx,yy,zz) )\n\n molg = MyMolMapX ( mol, atoms, resolution, dmap.data.step[0], xfT )\n\n fpoints, fpoint_weights = fit_points_g ( molg )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n\n if numpy.fabs(xx) < .01 and numpy.fabs(yy) < .01 and numpy.fabs(zz) < .01 :\n score0 = corr1\n else :\n scores.append ( corr1 )\n if fout :\n\n #if not moved :\n nmol, cress = CopyRess ( [res] )\n for nr in cress :\n for nat in nr.atoms :\n try :\n nat.setCoord ( xfT.apply ( nat.coord() ) )\n except :\n pass\n #chimera.openModels.add ( [nmol] )\n nmol.name = \"S_%.0f_%.0f_%.0f\" % (xx,yy,zz)\n moved = True\n\n scorest.append ( [corr1, [xx,yy,zz], nmol] )\n\n\n if fout :\n scorest.sort ()\n #scorest.reverse ()\n scorest = scorest[0:len(scorest)/2]\n if fout :\n fout.write ( \"%.0f,%.0f,%.0f\\t%f\\n\" % (0,0,0, score0) )\n for sc, t, nmol in scorest:\n fout.write ( \"%.0f,%.0f,%.0f\\t%f\\n\" % (t[0],t[1],t[2], sc) )\n chimera.openModels.add ( [nmol] )\n SetBBAts ( nmol )\n for at in nmol.atoms :\n at.display = at.isSC\n\n\n fout.close()\n\n if 1 :\n scores.sort ()\n #scores.reverse ()\n scores = scores[0:len(scores)/2]\n\n #print \"\"\n avg = numpy.average ( scores ) #numpy.average ( scores[1:] )\n stdev = numpy.std ( scores ) #numpy.std ( scores[1:] )\n if stdev < 1e-8 :\n #print \" - nostdev\"\n return None\n zscore = (score0 - avg) / stdev #(scores[0] - avg) / stdev\n #print \" - scores: avg %.4f, std %.4f, z-score %.4f\" % (avg, stdev, zscore )\n #fout.close()\n\n return zscore\n\n\n\n\ndef zRotSideChain ( mol, r, resolution, dmap, show=False ) :\n\n r.CA, r.CB, r.CG = None, None, None\n try :\n r.CA = r.atomsMap[\"CA\"][0]\n r.CB = r.atomsMap[\"CB\"][0]\n except :\n pass\n\n if \"CG\" in r.atomsMap :\n r.CG = r.atomsMap[\"CG\"][0]\n elif \"CG1\" in r.atomsMap :\n r.CG = r.atomsMap[\"CG1\"][0]\n elif \"CG2\" in r.atomsMap :\n r.CG = r.atomsMap[\"CG2\"][0]\n elif \"OG\" in r.atomsMap :\n r.CG = r.atomsMap[\"OG\"][0]\n elif \"SG\" in r.atomsMap :\n r.CG = r.atomsMap[\"SG\"][0]\n\n if r.CA == None or r.CB == None or r.CG == None :\n #print r.type, \" - no ats\"\n return None, None\n\n resolution = 3.0 * dmap.data.step[0]\n\n scores = []\n\n #molg = MyMolMap ( mol, r.atoms, resolution, dmap.data.step[0] )\n #fpoints, fpoint_weights = fit_points_g ( molg )\n #map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n #olap_0, corr1_0, corr2_0 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n\n rats = r.scAtoms\n nrats = []\n for at in rats :\n try :\n at.p0 = at.coord()\n nrats.append ( at )\n except :\n pass\n\n fout = None\n if show :\n fout = open (\"/Users/greg/Desktop/sc.txt\", \"w\")\n\n #for ri, rmol in enumerate ( rmols[0:10] ) :\n for deg in range (0, 360, 36) :\n\n RotAts ( nrats, r.CA, r.CB, deg )\n\n if fout :\n nmol, cress = CopyRess ( [r] )\n chimera.openModels.add ( [nmol] )\n nmol.name = \"SC %d %.0f\" % (r.id.position, deg)\n nr = nmol.residues[0]\n SetBBAts ( nmol )\n for at in nr.atoms :\n if at.isBB :\n at.display = False\n else :\n at.display = True\n\n corr = ResCC ( mol, nrats, resolution, dmap )\n scores.append ( corr )\n\n for at in nrats :\n at.setCoord ( at.p0 )\n\n if fout :\n for sci, sc in enumerate ( scores ):\n fout.write ( \"%d\\t%f\\n\" % (sci*36, sc) )\n\n fout.close()\n\n zscore1 = None\n if len(scores) > 3 :\n avg = numpy.average ( scores[1:] )\n stdev = numpy.std ( scores[1:] )\n zscore1 = ( (scores[0] - avg) / stdev ) if stdev > 1e-5 else 0\n #print \" -0- avg %.4f, std %.4f, z-score %.4f\" % (avg, stdev, zscore1 )\n #print scores\n #print \" -1- avg %.4f, std %.4f, z-score %.4f\" % (avg, stdev, zscore1 )\n\n\n return zscore1, scores[0]\n\n\n\n\ndef zRotBase ( mol, r, resolution, dmap, show=False ) :\n\n resolution = 3.0 * dmap.data.step[0]\n\n scores = []\n\n rats = r.scAtoms\n nrats = []\n for at in rats :\n try :\n if at.element.name == \"H\" :\n continue\n at.p0 = at.coord()\n nrats.append ( at )\n except :\n pass\n\n fout = None\n if show :\n fout = open (\"/Users/greg/Desktop/sc.txt\", \"w\")\n\n #for ri, rmol in enumerate ( rmols[0:10] ) :\n for deg in range (0, 360, 36) :\n\n RotAts ( nrats, r.atomsMap[\"C1'\"][0], r.baseAt, deg )\n\n if fout :\n nmol, cress = CopyRess ( [r] )\n chimera.openModels.add ( [nmol] )\n nmol.name = \"SC %d %.0f\" % (r.id.position, deg)\n nr = nmol.residues[0]\n SetBBAts ( nmol )\n for at in nr.atoms :\n if at.isBB :\n at.display = False\n else :\n at.display = True\n\n corr = ResCC ( mol, nrats, resolution, dmap )\n scores.append ( corr )\n\n for at in nrats :\n at.setCoord ( at.p0 )\n\n if fout :\n for sci, sc in enumerate ( scores ):\n fout.write ( \"%d\\t%f\\n\" % (sci*36, sc) )\n\n fout.close()\n\n zscore1 = None\n if len(scores) > 3 :\n avg = numpy.average ( scores[1:] )\n stdev = numpy.std ( scores[1:] )\n zscore1 = ( (scores[0] - avg) / stdev ) if stdev > 1e-5 else 0\n #print \" -1- avg %.4f, std %.4f, z-score %.4f\" % (avg, stdev, zscore1 )\n\n\n return zscore1\n\n\n\n\n\n\n\ndef MoveBB () :\n\n map, mol = VisMapMod()\n resolution = 3.0 * map.data.step[0]\n\n print \"Map: %s, mol: %s\" % (map.name, mol.name)\n z, cc = zBB ( mol, chimera.selection.currentResidues(), resolution, map, True )\n print z\n\n\n\ndef zBB ( mol, ress, resolution, dmap, show=False ) :\n\n atoms = []\n for r in ress :\n #if 'C' in r.atomsMap : atoms.append ( r.atomsMap['C'][0] )\n #if 'N' in r.atomsMap : atoms.append ( r.atomsMap['N'][0] )\n #if 'CA' in r.atomsMap : atoms.append ( r.atomsMap['CA'][0] )\n #if 'O' in r.atomsMap : atoms.append ( r.atomsMap['O'][0] )\n atoms.extend ( r.bbAtoms )\n atoms.extend ( r.scAtoms )\n\n if len(atoms) < 1 :\n #print \" - no atoms\" % len(atoms)\n return [0,0]\n\n score0 = 0\n scores, scorest = [], []\n T = 2\n trange = [-T*1.0, 0.0, T*1.0]\n #trange = [-T*2.0, -T, 0.0, T, T*2.0]\n\n fout = None\n if show :\n fout = open (\"/Users/greg/Desktop/sse.txt\", \"w\")\n\n moved = False\n\n for xx in trange :\n for yy in trange :\n for zz in trange :\n\n v = chimera.Vector(xx,yy,zz)\n xfT = chimera.Xform.translation ( chimera.Vector(xx,yy,zz) )\n\n molg = MyMolMapX ( mol, atoms, resolution, dmap.data.step[0], xfT )\n\n fpoints, fpoint_weights = fit_points_g ( molg )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n\n if numpy.fabs(xx) < .01 and numpy.fabs(yy) < .01 and numpy.fabs(zz) < .01 :\n score0 = corr2\n else :\n scores.append ( corr2 )\n if fout :\n scorest.append ( [corr2, [xx,yy,zz]] )\n\n if not moved :\n nmol, cress = CopyRess ( ress )\n for nr in cress :\n for nat in nr.atoms :\n try :\n nat.setCoord ( xfT.apply ( nat.coord() ) )\n except :\n pass\n chimera.openModels.add ( [nmol] )\n nmol.name = \"T_%.0f_%.0f_%.0f\" % (xx,yy,zz)\n moved = True\n\n\n if fout :\n scorest.sort ()\n scorest.reverse ()\n scorest = scorest[len(scorest)/2:]\n if fout :\n fout.write ( \"%.0f,%.0f,%.0f\\t%f\\n\" % (0,0,0, score0) )\n for sc, t in scorest:\n fout.write ( \"%.0f,%.0f,%.0f\\t%f\\n\" % (t[0],t[1],t[2], sc) )\n\n fout.close()\n\n if 0 :\n scores.sort ()\n scores.reverse ()\n scores = scores[len(scores)/2:]\n\n #print \"\"\n avg = numpy.average ( scores ) #numpy.average ( scores[1:] )\n stdev = numpy.std ( scores ) #numpy.std ( scores[1:] )\n if stdev < 1e-8 :\n #print \" - nostdev\"\n return [0,0]\n zscore = (score0 - avg) / stdev #(scores[0] - avg) / stdev\n #print \" - scores: avg %.4f, std %.4f, z-score %.4f\" % (avg, stdev, zscore )\n #fout.close()\n\n return [zscore, score0]\n\n\n\n\n\n\n\ndef CopyRess ( res ) :\n\n nmol = chimera.Molecule()\n ress = [None] * len ( res )\n\n aMap = dict()\n for ri, r in enumerate ( res ) :\n nres = nmol.newResidue (r.type, chimera.MolResId(r.id.chainId, r.id.position))\n ress[ri] = nres\n for at in r.atoms :\n nat = nmol.newAtom (at.name, chimera.Element(at.element.number))\n aMap[at] = nat\n nres.addAtom( nat )\n p = chimera.Point ( at.coord().x, at.coord().y, at.coord().z )\n nat.setCoord ( p )\n nat.coord0 = chimera.Point ( at.coord().x, at.coord().y, at.coord().z )\n #if at.name == \"C\" or at.name == 'CA' or at.name == 'O' or at.name == \"N\" :\n # at.display = False\n\n\n\n for bond in res[0].molecule.bonds :\n try :\n nb = nmol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = nb.Smart\n except :\n pass\n\n for r in ress :\n r.CA, r.CB, r.CG = None, None, None\n try :\n r.CA = r.atomsMap[\"CA\"][0]\n r.CB = r.atomsMap[\"CB\"][0]\n r.CG = r.atomsMap[\"CG\"][0]\n except :\n pass\n\n return nmol, ress\n\n\n\ndef RotAts (rats, a1, a2, deg) :\n\n # phi: N -> CA\n p1, p2 = a1.coord(), a2.coord()\n v = p2 - p1; v.normalize()\n\n xf = chimera.Xform.translation ( p1.toVector() )\n xf.multiply ( chimera.Xform.rotation ( v, deg ) )\n xf.multiply ( chimera.Xform.translation ( p1.toVector() * -1.0 ) )\n\n #for at in res.atoms :\n # if at.name != 'C' and at.name != 'CA' and at.name != 'N' and at.name != 'CB' and at.name != 'O' :\n for at in rats :\n at.setCoord ( xf.apply (at.coord()) )\n\n\n\ndef MyMolMapX2 ( atoms, resolution, step=1.0, xf=None ) :\n\n from math import sqrt, pi\n\n pad = 3*resolution\n cutoff_range = 5 # in standard deviations\n sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution\n\n from _multiscale import get_atom_coordinates\n xyz = get_atom_coordinates(atoms, transformed = False)\n\n # Transform coordinates to local coordinates of the molecule containing\n # the first atom. This handles multiple unaligned molecules.\n # Or if on_grid is specified transform to grid coordinates.\n #m0 = atoms[0].molecule\n\n #xf = m0.openState.xform\n #import Matrix as M\n #M.transform_points(xyz, M.xform_matrix(xf.inverse()))\n\n anum = [a.element.number for a in atoms]\n\n grid = bounding_grid(xyz, step, pad, [])\n grid.name = \"\"\n\n sdev = resolution * sigma_factor\n add_gaussians(grid, xyz, anum, sdev, cutoff_range, [])\n\n #return grid, molecules\n return grid\n\n\n\n\ndef molecule_grid_dataX (m0, atoms, resolution, step, pad, xfT, cutoff_range, sigma_factor, transforms = [], csys = None):\n\n from _multiscale import get_atom_coordinates\n xyz = get_atom_coordinates(atoms, transformed = True)\n\n # Transform coordinates to local coordinates of the molecule containing\n # the first atom. This handles multiple unaligned molecules.\n # Or if on_grid is specified transform to grid coordinates.\n #m0 = atoms[0].molecule\n xf = m0.openState.xform\n xf.multiply ( xfT )\n import Matrix as M\n M.transform_points(xyz, M.xform_matrix(xf.inverse()))\n if csys:\n xf.premultiply(csys.xform.inverse())\n tflist = M.coordinate_transform_list(transforms, M.xform_matrix(xf))\n\n anum = [a.element.number for a in atoms]\n\n molecules = set([a.molecule for a in atoms])\n if len(molecules) > 1:\n name = 'molmap res %.3g' % (resolution,)\n else:\n name = 'molmap %s res %.3g' % (m0.name, resolution)\n\n grid = bounding_grid(xyz, step, pad, tflist)\n grid.name = name\n\n sdev = resolution * sigma_factor\n add_gaussians(grid, xyz, anum, sdev, cutoff_range, tflist)\n\n #return grid, molecules\n return grid\n\n\ndef MyMolMapX ( m0, atoms, resolution, step, xf ) :\n\n #from MoleculeMap import molecule_grid_data\n from math import sqrt, pi\n #from chimera import openModels as om\n #from VolumeViewer import volume_from_grid_data\n\n atoms = tuple(atoms)\n\n pad = 3*resolution\n cutoff_range = 5 # in standard deviations\n sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution\n transforms,csys = [], None\n display_threshold = 0.95\n\n return molecule_grid_dataX (m0, atoms, resolution, step, pad, xf, cutoff_range, sigma_factor, transforms, csys)\n\n\n\ndef MyMolMap ( m0, atoms, resolution, step ) :\n\n #from MoleculeMap import molecule_grid_data\n from math import sqrt, pi\n from chimera import openModels as om\n from VolumeViewer import volume_from_grid_data\n\n atoms = tuple(atoms)\n\n pad = 3*resolution\n cutoff_range = 5 # in standard deviations\n sigma_factor = 1/(pi*sqrt(2)) # standard deviation / resolution\n transforms,csys = [], None\n #display_threshold = 0.95\n\n return molecule_grid_data(m0, atoms, resolution, step, pad, None, cutoff_range, sigma_factor, transforms, csys)\n\n\n\n\ndef molecule_grid_data(m0, atoms, resolution, step, pad, on_grid,\n cutoff_range, sigma_factor,\n transforms = [], csys = None):\n\n\n\n from _multiscale import get_atom_coordinates\n xyz = get_atom_coordinates(atoms, transformed = True)\n\n # Transform coordinates to local coordinates of the molecule containing\n # the first atom. This handles multiple unaligned molecules.\n # Or if on_grid is specified transform to grid coordinates.\n #m0 = atoms[0].molecule\n xf = on_grid.openState.xform if on_grid else m0.openState.xform\n import Matrix as M\n M.transform_points(xyz, M.xform_matrix(xf.inverse()))\n if csys:\n xf.premultiply(csys.xform.inverse())\n tflist = M.coordinate_transform_list(transforms, M.xform_matrix(xf))\n\n anum = [a.element.number for a in atoms]\n\n molecules = set([a.molecule for a in atoms])\n if len(molecules) > 1:\n name = 'molmap res %.3g' % (resolution,)\n else:\n name = 'molmap %s res %.3g' % (m0.name, resolution)\n\n if on_grid:\n from numpy import float32\n grid = on_grid.region_grid(on_grid.region, float32)\n else:\n grid = bounding_grid(xyz, step, pad, tflist)\n grid.name = name\n\n sdev = resolution * sigma_factor\n add_gaussians(grid, xyz, anum, sdev, cutoff_range, tflist)\n\n #return grid, molecules\n return grid\n\n\n\n\ndef ResCC ( mol, rats, resolution, dmap ) :\n\n molg = MyMolMap ( mol, rats, resolution, dmap.data.step[0] )\n\n #if 0 :\n # fmap = VolumeViewer.volume.volume_from_grid_data ( molg )\n # fmap.name = \"res molmap!\"\n # fpoints, fpoint_weights = fit_points(fmap, False)\n # map_values = dmap.interpolated_values ( fpoints, fmap.openState.xform )\n # olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n # scores.append ( corr1 )\n # chimera.openModels.close ( [fmap] )\n #else :\n\n fpoints, fpoint_weights = fit_points_g ( molg, 0.1 )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n olap, corr1, corr2 = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n return corr1, corr2\n\n\n\ndef fit_points_g (fdata, threshold = 0.3) :\n\n mat = fdata.full_matrix()\n\n import _volume\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n nz = numpy.nonzero( fpoint_weights )[0]\n if len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n transform_vertices ( fpoints, fdata.ijk_to_xyz_transform )\n\n if 0 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return fpoints, fpoint_weights\n\n\ndef fit_points (fmap, threshold = 1e-5) :\n\n mat = fmap.data.full_matrix()\n\n import _volume\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n nz = numpy.nonzero( fpoint_weights )[0]\n if len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n from _contour import affine_transform_vertices as transform_vertices\n transform_vertices ( fpoints, fmap.data.ijk_to_xyz_transform )\n #transform_vertices ( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )\n\n if 0 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return fpoints, fpoint_weights\n\n\n\n\n\n# -----------------------------------------------------------------------------\n#\ndef bounding_grid(xyz, step, pad, transforms):\n\n xyz_min, xyz_max = point_bounds(xyz, transforms)\n origin = [x-pad for x in xyz_min]\n from math import ceil\n shape = [int(ceil((xyz_max[a] - xyz_min[a] + 2*pad) / step)) for a in (2,1,0)]\n from numpy import zeros, float32\n matrix = zeros(shape, float32)\n from VolumeData import Array_Grid_Data\n grid = Array_Grid_Data(matrix, origin, (step,step,step))\n return grid\n\n\n# -----------------------------------------------------------------------------\n#\ndef add_gaussians(grid, xyz, weights, sdev, cutoff_range, transforms = []):\n\n from numpy import zeros, float32, empty\n sdevs = zeros((len(xyz),3), float32)\n for a in (0,1,2):\n sdevs[:,a] = sdev / grid.step[a]\n\n import Matrix as M\n if len(transforms) == 0:\n transforms = [M.identity_matrix()]\n from _gaussian import sum_of_gaussians\n ijk = empty(xyz.shape, float32)\n matrix = grid.matrix()\n for tf in transforms:\n ijk[:] = xyz\n M.transform_points(ijk, M.multiply_matrices(grid.xyz_to_ijk_transform, tf))\n sum_of_gaussians(ijk, weights, sdevs, cutoff_range, matrix)\n\n from math import pow, pi\n normalization = pow(2*pi,-1.5)*pow(sdev,-3)\n matrix *= normalization\n\n\n\n# -----------------------------------------------------------------------------\n#\ndef point_bounds(xyz, transforms = []):\n\n from _multiscale import bounding_box\n if transforms :\n from numpy import empty, float32\n xyz0 = empty((len(transforms),3), float32)\n xyz1 = empty((len(transforms),3), float32)\n txyz = empty(xyz.shape, float32)\n import Matrix as M\n for i, tf in enumerate(transforms) :\n txyz[:] = xyz\n M.transform_points(txyz, tf)\n xyz0[i,:], xyz1[i,:] = bounding_box(txyz)\n xyz_min, xyz_max = xyz0.min(axis = 0), xyz1.max(axis = 0)\n else:\n xyz_min, xyz_max = bounding_box(xyz)\n\n return xyz_min, xyz_max\n\n\n\n\n\n# ---------------------------------------------------------------------------------\n\n\n\n\ndef SkinMap ( atoms, bones, N, dmap, atomRad, nname, showMesh = False ) :\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = True )\n\n import _contour\n points0 = numpy.copy ( points )\n _contour.affine_transform_vertices ( points0, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n\n nn3, nn2, nn1 = dmap.data.size\n\n npoints = VolumeData.grid_indices ( (int(nn1), int(nn2), int(nn3) ), numpy.single) # i,j,k indices\n _contour.affine_transform_vertices ( npoints, dmap.data.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform.inverse() ) )\n\n for bo in bones :\n bo.MakeFrame ()\n\n if N == 1 :\n for pi, p in enumerate ( npoints ) :\n\n cbone, minDist = None, 1e9\n for bo in bones :\n d = bo.DistToPoint ( p )\n\n if d < minDist :\n minDist = d\n cbone = bo\n\n pt = cbone.SkinPoint ( p )\n npoints[pi] = pt\n\n else :\n\n for pi, p in enumerate ( npoints ) :\n\n dbos = []\n for bo in bones :\n dbos.append ( [bo.DistToPoint ( p ), bo] )\n\n dbos.sort()\n\n totD = 0.0\n sp = numpy.array ( [0,0,0] )\n for i in range ( N ) :\n d, bo = dbos[i]\n sp = sp + numpy.array ( bo.SkinPoint ( p ) ) * d\n totD += d\n\n npoints[pi] = sp / totD\n\n\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n ndata = VolumeData.Array_Grid_Data ( nmat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n mdata = VolumeData.zone_masked_grid_data ( ndata, points0, atomRad )\n\n MapFromData ( mdata, nname, dmap, False )\n if showMesh :\n MapFromData ( mdata, nname, dmap, True )\n\n\n\ndef ExtractDen ( atoms, dmap, nname, boundRad = 2.0, showMesh = False) :\n\n from _multiscale import get_atom_coordinates\n points1 = get_atom_coordinates ( atoms, transformed = False )\n #COM, U, S, V = prAxes ( points )\n\n bound = 4.0\n li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)\n\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n\n n1 = int ( numpy.ceil ( (hi - li + 1) / nstep[0] ) )\n n2 = int ( numpy.ceil ( (hj - lj + 1) / nstep[1] ) )\n n3 = int ( numpy.ceil ( (hk - lk + 1) / nstep[2] ) )\n\n O = chimera.Point ( li, lj, lk )\n #O = atoms[0].molecule.openState.xform.apply ( O )\n\n #print \" - new map origin:\", nO\n\n npoints = VolumeData.grid_indices ( (n1, n2, n3), numpy.single) # i,j,k indices\n S = dmap.data.step\n\n _contour.affine_transform_vertices ( npoints, ((S[0], 0.0, 0.0, O[0]), (0.0, S[1], 0.0, O[1]), (0.0, 0.0, S[1], O[2])) )\n #_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n dvals = dmap.interpolated_values ( npoints, atoms[0].molecule.openState.xform )\n nmat = dvals.reshape( (n3,n2,n1) )\n\n ndata = VolumeData.Array_Grid_Data ( nmat, O, nstep, dmap.data.cell_angles, name = nname )\n\n #_contour.affine_transform_vertices ( points1, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )\n #_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n mdata = VolumeData.zone_masked_grid_data ( ndata, points1, boundRad )\n\n dmap = MapFromData ( mdata, nname, dmap, False, color=(.7,.7,.7,.2) )\n dmap.openState.xform = atoms[0].molecule.openState.xform\n dmesh = None\n\n if showMesh :\n dmesh = MapFromData ( mdata, nname, dmap, True )\n dmesh.openState.xform = atoms[0].molecule.openState.xform\n\n return [dmap, dmesh]\n\n\n\n\n\n\ndef BoneMap ( bone, dmap, atomRad, nname, show = False, showMesh = False ) :\n\n #_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n #mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )\n\n from _multiscale import get_atom_coordinates\n atoms = [bone.a1, bone.a2]\n points = get_atom_coordinates ( atoms, transformed = True )\n\n import _contour\n points1 = numpy.copy ( points )\n _contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n points0 = numpy.copy ( points1 )\n _contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )\n\n bound = int ( numpy.ceil( atomRad / dmap.data.step[0] ) ) + 1\n li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li,lj,lk, hi,hj,hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n #nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )\n\n O = dmap.data.origin\n #print \" - %s origin:\" % dmap.name, O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n #print \" - new map origin:\", nO\n\n wmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( wmat, nO, nstep, dmap.data.cell_angles )\n\n npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n npointsi = numpy.copy ( npoints )\n _contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform.inverse() ) )\n\n for pi, p in enumerate ( npoints ) :\n\n i,j,k = npointsi[pi]\n d = bone.DistToPoint ( p )\n if d < atomRad :\n wmat[k,j,i] = 1.0\n else :\n wmat[k,j,i] = 1.0 / numpy.power (1+d-atomRad,8)\n\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n\n bone.ndata = VolumeData.Array_Grid_Data ( nmat*wmat, nO, nstep, dmap.data.cell_angles, name = nname )\n bone.xfmod = dmap\n\n if show :\n\n from random import random as rand\n clr = ( rand()*.5+.1, rand()*.5+.1, rand()*.5+.1 )\n\n bone.dmap = MapFromData ( bone.ndata, nname, dmap, showMesh, color = clr )\n bone.dmap.openState.xform = dmap.openState.xform\n\n\n\n\n\ndef MoldMap ( atoms, bones, dmap, nname, showMesh = False ) :\n\n\n ndata = dmap.data\n nn3, nn2, nn1 = dmap.data.size\n nO = dmap.data.origin\n nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n\n if 1 :\n ndata = DataForAtoms ( atoms, dmap )\n\n npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n _contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n #_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )\n #_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform.inverse() ) )\n\n for bone in bones :\n\n npointsc = numpy.copy ( npoints )\n\n _contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf().inverse() ) )\n _contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf0() ) )\n\n #_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( atoms[0].molecule.openState.xform ) )\n #_contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n _contour.affine_transform_vertices ( npointsc, bone.ndata.xyz_to_ijk_transform )\n\n p2mt = Matrix.xform_matrix ( chimera.Xform.identity() )\n #dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.dmap.data.matrix(), method='linear' )\n dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.ndata.matrix(), method='linear' )\n\n bmat = dvals.reshape( (nn3,nn2,nn1) )\n #nmat = nmat + bmat\n nmat = numpy.maximum ( nmat, bmat )\n\n #nmat = nmat / float ( len(bones) )\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles, name = nname )\n\n MapFromData ( ndata, nname, dmap, False )\n if showMesh :\n MapFromData ( ndata, nname, dmap, True )\n\n\n\n\ndef MoldMap2 ( bones, dmap, dmesh ) :\n\n\n ndata = dmap.data\n nn1, nn2, nn3 = dmap.data.size\n nO = dmap.data.origin\n nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n\n npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n _contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix( dmap.openState.xform ) )\n _contour.affine_transform_vertices ( npoints, Matrix.xform_matrix (bones[0].a1.molecule.openState.xform.inverse()) )\n\n for bone in bones :\n\n npointsc = numpy.copy ( npoints )\n\n _contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf().inverse() ) )\n _contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( bone.Xf0() ) )\n\n _contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix (bone.a1.molecule.openState.xform) )\n _contour.affine_transform_vertices ( npointsc, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n _contour.affine_transform_vertices ( npointsc, bone.ndata.xyz_to_ijk_transform )\n\n p2mt = Matrix.xform_matrix ( chimera.Xform.identity() )\n #dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.dmap.data.matrix(), method='linear' )\n dvals, outvals = VolumeData.interpolate_volume_data ( npointsc, p2mt, bone.ndata.matrix(), method='linear' )\n\n bmat = dvals.reshape( (nn3,nn2,nn1) )\n #nmat = nmat + bmat\n nmat = numpy.maximum ( nmat, bmat )\n\n #nmat = nmat / float ( len(bones) )\n\n #ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles, name = nname )\n dmap.data.full_matrix()[:,:,:] = nmat[:,:,:]\n dmap.data.values_changed()\n MapUp ( dmap, False )\n\n if dmesh != None :\n dmesh.data.full_matrix()[:,:,:] = nmat[:,:,:]\n dmesh.data.values_changed()\n MapUp ( dmesh, True )\n\n\n\n\ndef DataForAtoms ( atoms, dmap, nname = \"data for atoms\" ) :\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = True )\n\n points1 = numpy.copy ( points )\n _contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n #points0 = numpy.copy ( points1 )\n _contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )\n\n bound = 5\n li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )\n\n O = dmap.data.origin\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n nmat = numpy.zeros ( (nn3,nn2,nn1), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles, name = nname )\n return ndata\n\n\n\ndef MapForAtoms ( atoms, dmap, nname, showMesh=False, thrF = 1.0 ) :\n\n ndata = DataForAtoms ( atoms, dmap, nname )\n\n m1 = MapFromData ( ndata, nname, dmap, False, thrF=thrF )\n m2 = None\n\n if showMesh :\n m2 = MapFromData ( ndata, nname, dmap, True, thrF=thrF )\n\n return [m1,m2]\n\n\ndef MapUp (dmap, showMesh = False, color=(.7,.7,.7,1)) :\n\n ro = VolumeViewer.volume.Rendering_Options()\n ro.smoothing_factor = .3\n ro.smoothing_iterations = 2\n ro.surface_smoothing = False\n ro.square_mesh = True\n ro.line_thickness = 1\n\n dmap.update_surface ( False, ro )\n for sp in dmap.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n if showMesh :\n sp.color = (color[0]/2.0, color[1]/2.0, color[2]/2.0, 1.0)\n sp.displayStyle = sp.Mesh\n else :\n sp.color = (color[0], color[1], color[2], color[3])\n\n\ndef MapFromData ( ndata, nname, dmap, showMesh, thrF=1.0, color=(.7,.7,.7,1) ) :\n\n if showMesh :\n m = GetMod ( nname + \"_mesh\" )\n if m != None :\n chimera.openModels.close ( [m] )\n else :\n m = GetMod ( nname )\n if m != None :\n chimera.openModels.close ( [m] )\n\n\n nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n nv.openState.xform = dmap.openState.xform\n nv.name = nname\n if showMesh :\n nv.name = nname + \"_mesh\"\n nv.region = ( nv.region[0], nv.region[1], [1,1,1] )\n nv.surface_levels[0] = dmap.surface_levels[0] * thrF\n\n MapUp(nv, showMesh, color)\n return nv\n\n\n\ndef diha ( a1, a2, a3, a4 ) :\n #n1 = vnorm ( a1.coord(), a2.coord(), a3.coord() )\n #n2 = vnorm ( a2.coord(), a3.coord(), a4.coord() )\n #return numpy.arccos ( n2 * n1 * -1.0 ) * 180.0 / numpy.pi\n\n # http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates\n b1 = a2.coord() - a1.coord()\n b2 = a3.coord() - a2.coord()\n b3 = a4.coord() - a3.coord()\n\n n1 = chimera.cross ( b1, b2 ); n1.normalize()\n n2 = chimera.cross ( b2, b3 ); n2.normalize()\n m1 = chimera.cross ( n1, b2 ); m1.normalize()\n\n x = n1 * n2\n y = m1 * n2\n\n return -1.0 * numpy.arctan2 ( y, x) * 180.0 / numpy.pi\n\n\ndef angle ( a1, a2, a3 ) :\n n1 = a1.coord() - a2.coord()\n n2 = a3.coord() - a2.coord()\n return numpy.arccos ( (n2/n1.length) * (n1/n2.length) ) * 180.0 / numpy.pi\n\n\nclass Bone (object) :\n\n def __init__ (self, a1, a2, a3) :\n BoneInit ( self, a1, a2, a3 )\n\n def CS ( self ) :\n return CS ( a1.coord(), a2.coord(), a3.coord() )\n\n def CS0 ( self ) :\n return CS ( a1.coord0, a2.coord0, a3.coord0 )\n\n def Xf ( self ) :\n X,Y,Z = CS ( self.a1.coord(), self.a2.coord(), self.a3.coord() )\n return chimera.Xform.coordFrame ( X, Y, Z, self.a1.coord(), True )\n\n def Xf0 ( self ) :\n X,Y,Z = CS ( self.a1.coord0, self.a2.coord0, self.a3.coord0 )\n return chimera.Xform.coordFrame ( X, Y, Z, self.a1.coord0, True )\n\n def MakeFrame ( self ) :\n BoneMakeFrame ( self )\n\n def DistToPoint ( self, pt ) :\n return BoneDistToPoint ( self, pt )\n\n def SkinPoint ( self, pt ) :\n return BoneSkinPoint ( self, pt )\n\n\ndef BoneInit (bo, a1, a2, a3) :\n bo.a1, bo.a2, bo.a3 = a1, a2, a3\n bo.X0, bo.Y0, bo.Z0 = CS ( a1.coord0, a2.coord0, a3.coord0 )\n bo.F0 = chimera.Xform.coordFrame ( bo.X0, bo.Y0, bo.Z0, bo.a1.coord0, True )\n\ndef BoneMakeFrame ( bo ) :\n bo.X, bo.Y, bo.Z = CS ( bo.a1.coord(), bo.a2.coord(), bo.a3.coord() )\n bo.F = chimera.Xform.coordFrame ( bo.X, bo.Y, bo.Z, bo.a1.coord(), True )\n bo.F = bo.F.inverse()\n\n\ndef CS ( p1, p2, p3 ) :\n X = p2 - p1; X.normalize()\n Y = p3 - p1; Y.normalize()\n Z = chimera.cross ( X, Y ); Z.normalize()\n Y = chimera.cross ( Z, X ); Y.normalize()\n return X,Y,Z\n\n\ndef BoneDistToPoint ( bo, pt ) :\n\n pt = chimera.Point(pt[0], pt[1], pt[2])\n V = bo.a2.coord() - bo.a1.coord()\n v = pt - bo.a1.coord()\n t = V * v\n if t < 0.0 :\n return v.length\n elif t > 1.0 :\n return (pt-bo.a2.coord()).length\n else :\n lp = bo.a1.coord() + (V*t)\n return (pt-lp).length\n\n\ndef BoneSkinPoint ( bo, pt ) :\n\n #bo.X, bo.Y, bo.Z = CS ( bo.a1.coord(), bo.a2.coord(), bo.a3.coord() )\n #x = chimera.Xform.coordFrame ( bo.X, bo.Y, bo.Z, bo.a1.coord(), True )\n #x = x.inverse()\n #y = chimera.Xform.coordFrame ( bo.X0, bo.Y0, bo.Z0, bo.a1.coord0, True )\n\n pt = chimera.Point ( pt[0], pt[1], pt[2] )\n pt = bo.F.apply ( pt )\n pt = bo.F0.apply ( pt )\n return [pt[0], pt[1], pt[2]]\n\n\n\n\n\n# -----------------------------------------------------------------------------\n\n\n\n# -----------------------------------------------------------------------------\n#\n\n\ndef FitMolToMap ( mol, dmap, RES, doTranslate = True, doRotate = True ) :\n\n import FitMap\n\n fpoints = _multiscale.get_atom_coordinates ( mol.atoms, transformed = True )\n fpoint_weights = numpy.ones ( len(mol.atoms), numpy.float32 )\n\n darray = dmap.data.matrix()\n\n xyz_to_ijk_tf = dmap.data.xyz_to_ijk_transform\n\n dmm = Matrix.invert_matrix ( Matrix.xform_matrix ( dmap.openState.xform ) )\n\n mm = Matrix.multiply_matrices ( dmap.data.xyz_to_ijk_transform, dmm )\n\n\n map_values, outside = VolumeData.interpolate_volume_data(fpoints, mm, darray)\n\n #olap0, cc0, other = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n avg0 = numpy.average ( map_values )\n #print \" - 0 - \", avg0,\n\n move_tf, stats = FitMap.locate_maximum(fpoints, fpoint_weights,\n darray, mm,\n max_steps = 1000,\n ijk_step_size_min = 0.01,\n ijk_step_size_max = 0.5,\n optimize_translation = doTranslate,\n optimize_rotation = doRotate,\n metric = 'sum product',\n request_stop_cb = None)\n\n xf = chimera_xform ( move_tf )\n avg1 = stats['average map value']\n #print \" - 1 - \", avg1\n\n xfm = mol.openState.xform\n xfm.premultiply ( xf )\n mol.openState.xform = xfm\n\n molg = MyMolMapX ( mol, mol.atoms, RES, dmap.data.step[0], chimera.Xform.identity() )\n fpoints, fpoint_weights = fit_points_g ( molg, 0.22 )\n map_values = dmap.interpolated_values ( fpoints, mol.openState.xform )\n import FitMap\n mmolap, cc, ccm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #print \"Molmap - olap: %f, CC: %f, CCm: %f\" % (mmolap, mmcorr1, mmcorr2)\n\n return avg1, cc, ccm\n\n\n\n\n# ---------------------------------------------------\n\ndef getdialog ( create=False ) :\n\n from chimera import dialogs\n d = dialogs.find ( dlgName, create=False )\n return d\n\n\n\ndef close_dialog () :\n from chimera import dialogs\n\n\ndef setro (ro) :\n from chimera import dialogs\n d = dialogs.find ( \"volume viewer\", create=False )\n if d :\n d.surface_options_panel.set_gui_from_rendering_options (ro)\n #d.redisplay_needed_cb()\n\n\ndef vold () :\n from chimera import dialogs\n d = dialogs.find ( \"volume viewer\", create=False )\n d.surface_options_panel.line_thickness.set(2)\n d.redisplay_needed_cb()\n set_gui_from_rendering_options\n\n\n\ndef show_dialog () :\n\n from chimera import dialogs\n\n d = dialogs.find ( dlgName, create=False )\n if d :\n print \" - found old diag\"\n d.toplevel_widget.update_idletasks ()\n d.Close()\n d.toplevel_widget.update_idletasks ()\n\n dialogs.register (MapQ_Dialog.name, MapQ_Dialog, replace = True)\n\n d = dialogs.find ( dlgName, create=True )\n # Avoid transient dialog resizing when created and mapped for first time.\n d.toplevel_widget.update_idletasks ()\n d.enter()\n\n return d\n\n\n\ndef GetMod ( name ) :\n for m in chimera.openModels.list() :\n if m.name == name :\n return m\n return None\n\n\n\ndef SetBBAts ( mol ) :\n\n #if hasattr ( mol, \"bbats\" ) :\n # return\n #mol.bbats = True\n\n #print \" - setting bbAts in %s\" % mol.name\n for r in mol.residues :\n\n from chimera.resCode import nucleic3to1\n from chimera.resCode import protein3to1\n protein3to1['HSD'] = protein3to1['HIS']\n protein3to1['HSE'] = protein3to1['HIS']\n\n r.isProt = r.type in protein3to1\n r.isNA = r.type in nucleic3to1\n\n #if r.isProt : r.rtype = \"prot\"\n #lif r.isNA : r.rtype = \"na\"\n #else : r.rtype = \"?\"\n\n if 0 and r.isNA :\n try :\n if nucleic3to1[r.type] == \"G\" :\n r.baseAt = r.atomsMap[\"N9\"][0]\n elif nucleic3to1[r.type] == \"C\" :\n r.baseAt = r.atomsMap[\"N1\"][0]\n elif nucleic3to1[r.type] == \"A\" :\n r.baseAt = r.atomsMap[\"N9\"][0]\n elif nucleic3to1[r.type] == \"U\" :\n r.baseAt = r.atomsMap[\"N1\"][0]\n except :\n #print \" - baseAt not found - \"\n pass\n\n\n r.bbAtoms = []\n r.scAtoms = []\n\n if r.isProt :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n a.isBB = n==\"C\" or n==\"CA\" or n==\"O\" or n==\"N\" or n==\"OT1\" or n==\"OT2\"\n a.isSC = not a.isBB\n if a.isBB : r.bbAtoms.append ( a )\n else : r.scAtoms.append ( a )\n a.isSugar, a.isBase = False, False\n\n elif r.isNA :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n\n a.isBB = n==\"P\" or n==\"O1P\" or n==\"O2P\" or n==\"OP1\" or n==\"OP2\" or n==\"O5'\" or n==\"C5'\" or n==\"O3'\"\n a.isSugar = n==\"C1'\" or n==\"C2'\" or n==\"O4'\" or n==\"O2'\" or n==\"C3'\" or n==\"C4'\"\n\n a.isBB = a.isBB or a.isSugar\n a.isBase = not a.isBB\n a.isSC = a.isBase\n\n if 0 :\n if nucleic3to1[r.type] == \"G\" :\n a.isBase = n==\"N9\" or n==\"C8\" or n==\"N7\" or n==\"C5\" or n==\"C4\" or n==\"C6\" or n==\"O6\" or n==\"N1\" or n==\"C2\" or n==\"N2\" or n==\"N3\"\n\n elif nucleic3to1[r.type] == \"C\" :\n a.isBase = n==\"N1\" or n==\"C2\" or n==\"O2\" or n==\"N3\" or n==\"C4\" or n==\"N4\" or n==\"C5\" or n==\"C6\"\n\n elif nucleic3to1[r.type] == \"A\" :\n a.isBase = n==\"N9\" or n==\"C8\" or n==\"N7\" or n==\"C5\" or n==\"C4\" or n==\"N3\" or n==\"C2\" or n==\"N1\" or n==\"C6\" or n==\"N6\"\n\n elif nucleic3to1[r.type] == \"U\" :\n a.isBase = n==\"N1\" or n==\"C2\" or n==\"O2\" or n==\"N3\" or n==\"C4\" or n==\"O4\" or n==\"C5\" or n==\"C6\"\n\n else :\n #print \" -x- NA res %d.%s is ?\" % (r.id.position, r.type)\n break\n\n a.isSC = a.isBase\n a.isBB = a.isBB or a.isSugar\n\n #if nucleic3to1[r.type] == \"G\" :\n # r.isBase = n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n=\"\" or n=\"\" or n=\"\"\n # r.baseAt = r.atomsMap[\"N9\"][0]\n\n if a.isBB : r.bbAtoms.append ( a )\n else : r.scAtoms.append ( a )\n\n else :\n for a in r.atoms :\n a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False\n\n\n\n#def GetVisibleMol () :\n# for m in chimera.openModels.list() :\n# if m.display == True and type(m) == chimera.Molecule :\n# return m\n# return None\n\nNA = {\n \"A\" : { \"baseAtoms\" : [\"\",\"\",\"\"] }\n}\n\n\nclass NA ( object ):\n\n type\n", "id": "4327734", "language": "Python", "matching_score": 12.614910125732422, "max_stars_count": 6, "path": "mapq/mapq.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport Tkinter\nimport ttk\nimport tkFont\nfrom CGLtk import Hybrid\nimport VolumeData\nimport _multiscale\nimport MultiScale.surface\nimport _surface\nimport numpy\nimport _contour\nimport Matrix\nimport VolumeViewer\nfrom sys import stderr\nfrom time import clock\n\nfrom axes import prAxes\nimport regions\nimport graph\nfrom Segger import dev_menus, timing, seggerVersion\nfrom CGLutil.AdaptiveTree import AdaptiveTree\n\nfrom chimera.resCode import nucleic3to1\nfrom chimera.resCode import protein3to1\n\ndevMenus = True\n\nimport qscores\nreload (qscores)\n\nchargedIons = { \"MG\":2, \"NA\":1, \"CL\":-1, \"CA\":2, \"ZN\":2, \"MN\":2, \"FE\":3, \"CO\":2, \"NI\":2 }\n\natomColors = {'C' : chimera.MaterialColor (0.565,0.565,0.565),\n 'Cbb' : chimera.MaterialColor (0.2,0.6,0.2),\n 'S' : chimera.MaterialColor (1.000,1.000,0.188),\n 'O' : chimera.MaterialColor (1.000,0.051,0.051),\n 'N' : chimera.MaterialColor (0.188,0.314,0.973),\n 'P' : chimera.MaterialColor (1.0, 0.502, 0.0),\n 'H' : chimera.MaterialColor (0.9,.9,.9),\n ' ' : chimera.MaterialColor (0.2,1,.2),\n \"MG\" : chimera.MaterialColor (0,1,0),\n \"NA\" : chimera.MaterialColor (.7,.4,.9),\n \"CL\" : chimera.MaterialColor (.95,.59,.21), # orange\n \"CA\" : chimera.MaterialColor (0,1,0),\n \"ZN\" : chimera.MaterialColor (.52,.60,.25), # dark green\n \"MN\" : chimera.MaterialColor (0,1,0),\n \"FE\" : chimera.MaterialColor (.42,.48,.27), # turquise\n \"CO\" : chimera.MaterialColor (0,1,0),\n \"NI\" : chimera.MaterialColor (0,1,0)\n}\n\n\n\nfrom segment_dialog import current_segmentation, segmentation_map\n\n\ndef umsg ( txt ) :\n print txt\n status ( txt )\n\ndef status ( txt ) :\n txt = txt.rstrip('\\n')\n msg.configure(text = txt)\n msg.update_idletasks()\n\n\n# https://android.googlesource.com/toolchain/python/+/243b47fbef58ab866ee77567f2f52affd8ec8d0f/Python-2.7.3/Demo/tkinter/ttk/treeview_multicolumn.py\n\n\nclass SWIM_Dialog ( chimera.baseDialog.ModelessDialog ):\n\n title = \"SWIM (v1.0)\"\n name = \"swim\"\n\n if devMenus :\n buttons = (\"Thr\", \"Go\", \"Stats\", \"S1\", \"SN\", \"Options\")\n else :\n buttons = (\"Thr\", \"Go\", \"Stats\", \"Options\", \"Log\")\n #buttons = (\"Thr\", \"Go\", \"Stats\", \"Options\", \"Log\", \"Close\")\n\n\n help = 'https://github.com/gregdp/segger/blob/master/tutorials/Segger%20Tutorial%209%20-%20SWIM.pdf'\n\n def fillInUI(self, parent):\n\n self.group_mouse_mode = None\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n parent.columnconfigure(0, weight = 1)\n #parent.columnconfigure(1, weight = 1)\n\n row = 0\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n if 1 :\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n #Tkinter.Grid.columnconfigure(parent, 0, weight=1)\n #Tkinter.Grid.columnconfigure(ff, 0, weight=1)\n\n\n l = Tkinter.Label(ff, text='Map: ')\n l.grid(column=0, row=0, sticky='w')\n\n self.cur_dmap = None\n self.dmap = Tkinter.StringVar(parent)\n\n self.mb = Tkinter.Menubutton ( ff, textvariable=self.dmap, relief=Tkinter.RAISED )\n self.mb.grid (column=1, row=0, sticky='we', padx=2)\n self.mb.menu = Tkinter.Menu ( self.mb, tearoff=0, postcommand=self.MapMenu )\n self.mb[\"menu\"] = self.mb.menu\n\n ff.columnconfigure(1, weight=1)\n\n self.cur_dmap = None\n self.SetVisMap ()\n\n\n if 1 :\n row += 1\n\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w') # put we to stretch\n\n l = Tkinter.Label(ff, text='Model:', anchor=Tkinter.W)\n l.grid(column=0, row=0, sticky='w')\n\n self.struc = Tkinter.StringVar(parent)\n self.strucMB = Tkinter.Menubutton ( ff, textvariable=self.struc, relief=Tkinter.RAISED )\n self.strucMB.grid (column=1, row=0, sticky='we', padx=1)\n self.strucMB.menu = Tkinter.Menu ( self.strucMB, tearoff=0, postcommand=self.StrucMenu )\n self.strucMB[\"menu\"] = self.strucMB.menu\n\n ff.columnconfigure(1, weight=1)\n\n self.cur_mol = None\n self.cur_chains = []\n\n\n if 1 :\n\n row += 1\n\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='news')\n\n self.id_mg = {}\n\n self.tree = ttk.Treeview(ff)\n\n #self.tree[\"columns\"]=(\"one\",\"two\",\"three\")\n self.tree.column(\"#0\", width=50, minwidth=50, stretch=Tkinter.YES)\n #self.tree.column(\"one\", width=150, minwidth=150, stretch=Tkinter.NO)\n #self.tree.column(\"two\", width=400, minwidth=200)\n #self.tree.column(\"three\", width=80, minwidth=50, stretch=Tkinter.NO)\n\n self.tree.heading(\"#0\",text=\"Chain,Residue,Atom\",anchor=Tkinter.W)\n #self.tree.heading(\"one\", text=\"Date modified\",anchor=Tkinter.W)\n #self.tree.heading(\"two\", text=\"Type\",anchor=Tkinter.W)\n #self.tree.heading(\"three\", text=\"Size\",anchor=Tkinter.W)\n\n #self.tree.pack(side=Tkinter.TOP,fill=Tkinter.X)\n #self.tree.grid(column=0, row=0, sticky='nsew')\n #self.tree.pack(fill=Tkinter.BOTH, expand=1)\n #tree.place(x=0, y=0, relwidth=1, relheight=1)\n\n self.tree.grid(row = 0, column = 0, sticky='news')\n parent.columnconfigure(0, weight=1)\n parent.rowconfigure(row, weight = 1)\n ff.rowconfigure(0, weight = 1)\n ff.columnconfigure(0, weight=1)\n\n self.tree.bind('<<TreeviewSelect>>', self.select_mg_cb)\n self.tree.bind('<<TreeviewOpen>>', self.open_mg_cb)\n self.tree.bind('<<TreeviewClose>>', self.close_mg_cb)\n\n\n if 1 :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n b = Tkinter.Button(ff, text=\"R\", command=self.RefreshTree)\n b.grid (column=0, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"Sel\", command=self.SelectSel)\n b.grid (column=1, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"All\", command=self.SelectAll)\n b.grid (column=2, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"W&I\", command=self.SelW)\n b.grid (column=3, row=0, sticky='w', padx=1, pady=1)\n\n b = Tkinter.Button(ff, text=\"Show\", command=self.ShowSel)\n b.grid (column=5, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"Only\", command=self.ShowSelOnly)\n b.grid (column=6, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"All\", command=self.ShowAll)\n b.grid (column=7, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"Z\", command=self.Zone)\n b.grid (column=8, row=0, sticky='w', padx=1, pady=1)\n\n self.zoneRad = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.zoneRad.set ( \"5\" )\n e = Tkinter.Entry(ff, width=2, textvariable=self.zoneRad)\n e.grid(column=9, row=0, sticky='w', padx=1, pady=1)\n\n #b = Tkinter.Button(ff, text=\"Avg\", command=self.Average)\n #b.grid (column=1, row=0, sticky='w', padx=0, pady=1)\n\n #b = Tkinter.Button(ff, text=\"Open\", command=self.Open)\n #b.grid (column=2, row=0, sticky='w', padx=0, pady=1)\n\n\n\n\n\n if 1 :\n\n row += 1\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=1, pady=2, sticky='we')\n\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n um = Hybrid.Checkbutton(ff, 'Place with Mouse (Ctrl+Click)', False)\n um.button.grid(column = 1, row=0, sticky = 'w', padx=5)\n self.use_mouse = um.variable\n um.callback(self.bind_placement_button_cb)\n\n\n b = Tkinter.Label(ff, text=\"Add To Chain:\")\n b.grid (column=8, row=0, sticky='w', padx=0, pady=1)\n\n self.addToChain = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.addToChain.set ( \"\" )\n e = Tkinter.Entry(ff, width=2, textvariable=self.addToChain)\n e.grid(column=9, row=0, sticky='w', padx=5, pady=1)\n\n\n\n um = Hybrid.Checkbutton(ff, 'Find Peak', False)\n um.button.grid(column = 10, row=0, sticky = 'w', padx=5)\n self.use_mouse_max = um.variable\n self.use_mouse_max.set(True)\n #um.callback(self.bind_placement_button_cb)\n\n\n if 1 :\n\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n\n self.guessOpt = Tkinter.StringVar()\n self.guessOpt.set ( 'guess' )\n\n l = Tkinter.Label(ff, text=' ' )\n l.grid(column=0, row=0, sticky='w')\n\n c = Tkinter.Radiobutton(ff, text=\"Guess\", variable=self.guessOpt, value = 'guess')\n c.grid (column=1, row=0, sticky='w')\n\n\n c = Tkinter.Radiobutton(ff, text=\"Add:\", variable=self.guessOpt, value = 'opt')\n c.grid (column=2, row=0, sticky='w')\n\n\n self.addStr = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.addStr.set ( \"W\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.addStr)\n e.grid(column=3, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Label(ff, text=\" e.g. Mg, Ca, Na, Zn, Fe, W (Water)\")\n b.grid (column=4, row=0, sticky='w', padx=0, pady=1)\n\n\n\n #um = Hybrid.Checkbutton(ff, 'Guess', False)\n #um.button.grid(column = 9, row=0, sticky = 'w', padx=5)\n #self.use_mouse_guess = um.variable\n #self.use_mouse_guess.set(True)\n #um.callback(self.bind_placement_button_cb)\n\n\n #b = Tkinter.Button(ff, text=\"SWIM\", command=self.Place)\n #b.grid (column=5, row=0, sticky='w', padx=5)\n\n\n\n\n\n\n if devMenus :\n\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n\n #b = Tkinter.Button(ff, text=\"Stats\", command=self.Hoh)\n #b.grid (column=2, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"ShoW\", command=self.HohShow)\n #b.grid (column=46, row=0, sticky='w', padx=5)\n\n\n #b = Tkinter.Button(ff, text=\"Thr\", command=self.Thr)\n #b.grid (column=4, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"WEx\", command=self.HohE)\n #b.grid (column=5, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"Asn\", command=self.Asn)\n #b.grid (column=51, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Dw\", command=self.HohD)\n b.grid (column=6, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Di\", command=self.HohD2)\n b.grid (column=7, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Da\", command=self.AllD)\n b.grid (column=8, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Dwn\", command=self.HohDn)\n b.grid (column=9, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Comb\", command=self.Combine)\n b.grid (column=10, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(ff, text=\"Dup\", command=self.Duplicates)\n b.grid (column=11, row=0, sticky='w', padx=5)\n\n #b = Tkinter.Button(ff, text=\"RMSD\", command=self.RMSD)\n #b.grid (column=10, row=0, sticky='w', padx=5)\n\n\n if devMenus :\n row += 1\n ff = Tkinter.Frame(parent)\n ff.grid(column=0, row=row, sticky='w')\n\n b = Tkinter.Label(ff, text=\"Map Res:\")\n b.grid (column=1, row=0, sticky='w', padx=0, pady=1)\n\n self.mapRes = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.mapRes.set ( \"\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.mapRes)\n e.grid(column=2, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Label(ff, text=\"A\")\n b.grid (column=3, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"Est.\", command=self.GuessRes)\n b.grid (column=4, row=0, sticky='w', padx=5)\n\n b = Tkinter.Label(ff, text=\"using\")\n b.grid (column=5, row=0, sticky='w', padx=0, pady=1)\n\n self.mapResN = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.mapResN.set ( \"20\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.mapResN)\n e.grid(column=6, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Label(ff, text=\"atoms\")\n b.grid (column=7, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Button(ff, text=\"vW&I\", command=self.SelVisW)\n b.grid (column=8, row=0, sticky='w', padx=1, pady=1)\n\n\n if 1 :\n\n row += 1\n op = Hybrid.Popup_Panel(parent)\n df = op.frame\n df.grid(column = 0, row = row, sticky = 'w')\n #df.grid_remove()\n #ff.columnconfigure(0, weight=1)\n self.optionsPanel = op.panel_shown_variable\n\n orow = 0\n dummyFrame = Tkinter.Frame(df, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=0,column=orow,columnspan=1, pady=2, sticky='we')\n\n if 1 :\n orow += 1\n ff = Tkinter.Frame(df)\n ff.grid(column=0, row=orow, sticky='w')\n\n b = Tkinter.Label(ff, text=\"Distance ranges (in Angstroms):\")\n b.grid (column=0, row=0, sticky='w', padx=0, pady=1)\n\n orow += 1\n ff = Tkinter.Frame(df)\n ff.grid(column=0, row=orow, sticky='w')\n\n b = Tkinter.Label(ff, text=\" Ion distances: from \")\n b.grid (column=1, row=0, sticky='w', padx=0, pady=1)\n\n self.ionMinD = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.ionMinD.set ( \"1.8\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.ionMinD)\n e.grid(column=2, row=0, sticky='w', padx=5, pady=1)\n\n\n b = Tkinter.Label(ff, text=\"A to \")\n b.grid (column=3, row=0, sticky='w', padx=0, pady=1)\n\n self.ionMaxD = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.ionMaxD.set ( \"2.4\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.ionMaxD)\n e.grid(column=4, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Label(ff, text=\"A\")\n b.grid (column=5, row=0, sticky='w', padx=0, pady=1)\n\n\n\n #b = Tkinter.Label(ff, text=\"Distances (in Angstroms):\")\n #b.grid (column=0, row=0, sticky='w', padx=0, pady=1)\n\n b = Tkinter.Label(ff, text=\" Water distances: from \")\n b.grid (column=1, row=1, sticky='w', padx=0, pady=1)\n\n self.waterMinD = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.waterMinD.set ( \"2.4\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.waterMinD)\n e.grid(column=2, row=1, sticky='w', padx=5, pady=1)\n\n\n b = Tkinter.Label(ff, text=\"A to\")\n b.grid (column=3, row=1, sticky='w', padx=0, pady=1)\n\n self.waterMaxD = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.waterMaxD.set ( \"3.3\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.waterMaxD)\n e.grid(column=4, row=1, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Label(ff, text=\"A\")\n b.grid (column=5, row=1, sticky='w', padx=0, pady=1)\n\n\n if 1 :\n orow += 1\n ff = Tkinter.Frame(df)\n ff.grid(column=0, row=orow, sticky='w')\n\n um = Hybrid.Checkbutton(ff, 'Put water/ion only when Q-score >', False)\n um.button.grid(column = 1, row=0, sticky = 'w', padx=5)\n self.useQScore = um.variable\n #um.callback(self.bind_placement_button_cb)\n self.useQScore.set(False)\n\n #b = Tkinter.Label(ff, text=\"Put water/ion only when Q-score >\")\n #b.grid (column=1, row=0, sticky='w', padx=0, pady=1)\n\n self.placeQ = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.placeQ.set ( \"0.9\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.placeQ)\n e.grid(column=2, row=0, sticky='w', padx=5, pady=1)\n\n self.qsigma = Tkinter.StringVar(ff)\n self.qsigma.set ( \"0.6\" )\n\n if 1 :\n b = Tkinter.Label(ff, text=\" sigma:\")\n b.grid (column=3, row=0, sticky='w', padx=0, pady=1)\n\n e = Tkinter.Entry(ff, width=5, textvariable=self.qsigma)\n e.grid(column=4, row=0, sticky='w', padx=5, pady=1)\n\n\n if 1 :\n orow += 1\n ff = Tkinter.Frame(df)\n ff.grid(column=0, row=orow, sticky='w')\n\n b = Tkinter.Label(ff, text=\"Near atoms named: \")\n b.grid (column=3, row=0, sticky='w', padx=0, pady=1)\n\n self.selNearAts = Tkinter.StringVar(ff)\n #self.addRess.set ( \"vsgtngtkrf\" )\n self.selNearAts.set ( \"N7\" )\n e = Tkinter.Entry(ff, width=5, textvariable=self.selNearAts)\n e.grid(column=4, row=0, sticky='w', padx=5, pady=1)\n\n b = Tkinter.Button(ff, text=\"Sel\", command=self.SelNear)\n b.grid (column=8, row=0, sticky='w', padx=1, pady=1)\n\n\n\n\n self.optionsPanel.set(False)\n #self.selPanel.set(False)\n\n\n\n row += 1\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=1, pady=2, sticky='we')\n\n\n row += 1\n global msg\n msg = Tkinter.Label(parent, width = 10, anchor = 'w', justify = 'left', fg=\"red\")\n msg.grid(column=0, row=row, sticky='ew')\n self.msg = msg\n msg.configure(text = \"Press Help below for more information\")\n\n\n self.SelectedMgId = None\n self.SetVisMol ()\n\n\n\n callbacks = (self.mouse_down_cb, self.mouse_drag_cb, self.mouse_up_cb)\n #callbacks = (self.mouse_down_cb)\n from chimera import mousemodes\n mousemodes.addFunction('mark swim', callbacks, self.mouse_mode_icon())\n\n if 1 :\n # bind, unbind in case it was left bound before...\n from chimera import mousemodes\n print \" - unbinding mouse...\"\n button, modifiers = ('3', ['Ctrl'])\n def_mode = mousemodes.getDefault(button, modifiers)\n mousemodes.setButtonFunction(button, modifiers, def_mode)\n self.bound_button = None\n\n\n\n\n\n def Options ( self ) :\n self.optionsPanel.set (not self.optionsPanel.get())\n\n\n def Log ( self ) :\n import Idle\n Idle.start_shell()\n\n def bind_placement_button_cb(self) :\n\n if self.use_mouse.get() :\n print \" - binding mouse...\"\n button, modifiers = ('1', ['Ctrl'])\n from chimera import mousemodes\n mousemodes.setButtonFunction(button, modifiers, 'mark swim')\n self.bound_button = (button, modifiers)\n elif self.bound_button:\n print \" - unbinding mouse...\"\n button, modifiers = self.bound_button\n from chimera import mousemodes\n def_mode = mousemodes.getDefault(button, modifiers)\n mousemodes.setButtonFunction(button, modifiers, def_mode)\n self.bound_button = None\n\n\n def mouse_mode_icon(self) :\n\n import os.path\n icon_path = os.path.join(os.path.dirname(__file__), 'marker.gif')\n from PIL import Image\n image = Image.open(icon_path)\n from chimera import chimage\n from chimera import tkgui\n icon = chimage.get(image, tkgui.app)\n return icon\n\n def mouse_down_cb(self, viewer, event) :\n\n print \" mouse - \"\n\n #print event.x, event.y\n if 0 :\n print dir(event)\n print event.char\n print event.keycode\n print event.keysym\n print event.keysym_num\n print event.num\n print event.state\n\n hits = []\n import VolumePath.tracer as tracer\n\n if 1 :\n from VolumeViewer import volume_list\n hits.extend(tracer.volume_maxima(event.x, event.y, volume_list()))\n print \"vol\"\n\n if 0 :\n from VolumeViewer import volume_list\n hits.extend(VolumePath.tracer.volume_plane_intercepts(event.x, event.y, volume_list()))\n\n if 0 :\n from Surface import surface_models\n hits.extend(tracer.surface_intercepts(event.x, event.y, surface_models()))\n print \"surf\"\n\n for C, vol in hits :\n print \" --> \", vol.name, \" --> %.1f, %.1f, %.1f\" % (C[0], C[1], C[2])\n self.PlaceAt ( C, vol )\n\n\n\n\n\n #grabbed = (self.move_markers.get() and self.grab_marker(event.x, event.y))\n #if not grabbed:\n # self.add_marker_at_screen_xy(event.x, event.y)\n\n\n\n def mouse_drag_cb(self, viewer, event):\n shift_mask = 1\n shift = (event.state & shift_mask)\n capslock_mask = 2\n capslock = (event.state & capslock_mask)\n #self.move_or_resize_marker(event.x, event.y, shift, capslock):\n\n\n def mouse_up_cb(self, viewer, event):\n #self.ungrab_marker()\n #self.pause_marker_placement = False\n #print \"mouse up\"\n pass\n\n\n# To distinguish between water and ions, we use the criteria in the Undowser paper. The paper does not describe exact distances, but the criteria is as follows:\n# 1. A placed water that clashes with two or more atoms of the same polarity, and with no nonpolars (C) or opposite polars (O and N), is almost certainly an ion.\n# 2. If the 'placed water' clashes (is too close) to negative atoms it is a positive ion\n# 3. If the 'placed water' clashes with positive atoms, it is a negative ion\n# 4. A doubly charged ion (e.g. Mg++) almost always interacts with at least one fully charged atom (e.g. phosphate or carboxyl O)\n# 5. A singly charged ion (e.g. Na+) often interacts with just partial charges (e.g. OH, backbone CO)\n# We add to these criteria our previous observations in a high-resolution X-ray structure (pdb:3ajo), the following distances are observed:\n# 1. Water atom to nearby polar atoms: 2.8A +/- ~0.4\n# 2. Ion to nearby charged/polar atoms: 2.2A +/- ~0.2\n# Thus we:\n# - Place ion when distance is 2.2A +/- 0.2 and nearby atoms include\n# * Charged atoms (place double charged ion such as Mg++)\n# * Single or opposite polars (O and N) (place single charged atom such as Na+)\n# - Place water when distance is 2.8 +/- 0.4 and nearby atoms include\n# * Charged atom or polar atom\n\n\n\n def GuessAtom ( self, mol, P, atTree = None, nearAtMap = None, doMsg=True, checkNewAtoms=None ) :\n\n # mol - molecule to add new ions/waters to\n # P - point on which to consider adding new ion/water\n # atTree - tree of atoms to consider for collisions or nearby\n # nearAtMap - only place ions/waters near these atoms\n # doMsg - create message explaining criteria used to place ion or water\n\n nearAts = None\n if atTree :\n nearAts = self.AtsWithinPt ( P, 6.0, atTree )\n else :\n #nearAts = [None] * len(mol.atoms)\n nearAts = []\n P = chimera.Point ( P[0], P[1], P[2] )\n for i, at in enumerate(mol.atoms) :\n V = P - at.coord()\n if V.length < 6.0 :\n nearAts.append ( [V.length, at] )\n\n newAtsMap = {}\n if checkNewAtoms :\n for at in checkNewAtoms :\n V = P - at.coord()\n if V.length < 6.0 :\n nearAts.append ( [V.length, at] )\n newAtsMap[at] = 1\n\n\n\n\n #minDistW, maxDistW = 2.5, 3.3\n #minDistI, maxDistI = 1.9, 2.5\n\n minDistW, maxDistW = float(self.waterMinD.get()), float(self.waterMaxD.get())\n minDistI, maxDistI = float(self.ionMinD.get()), float(self.ionMaxD.get())\n\n\n #minDistW, maxDistW = 2.4, 3.2\n #minDistI, maxDistI = 2.0, 2.4\n\n # these are nearby protein atoms with ion distances that are typically charged - put Mg++\n chargedAtomsIon = []\n\n # these are already placed ions within ion/water distances - should place water, not anotheer ion\n ionAtomsIon, ionAtomsWater = [], []\n\n # these are nearby protein atoms that are polar positive (e.g. N) within ion distances - put Cl-\n posPolarAtomsIon = []\n\n # these are nearby protein atoms that are polar negative (e.g. O) within ion distances - put Na+/Mg++\n negPolarAtomsIon = []\n\n # these are nearby charged or polar atoms (e.g. O, N, S in Cys) within water distances - put water\n chargedAtomsWater, polarAtomsWater = [], []\n\n # these are non-polar, non-charged atoms that are too close; don't put anything\n collidingAtoms = []\n\n # is near at least atom in nearAtMap\n isNearAtMap = False\n\n # is near newAtsMap - put waters but not ions\n isNearNewAtMap = False\n\n # closest to this chain\n closestChainId, closestChainD = None, 1e9\n\n # iterate over nearby atoms, adding them to the above lists as appropriate\n for dist, at in nearAts :\n\n if at.element.name == \"H\" :\n continue\n\n #if round(dist*10.0)/10.0 < minDistI :\n if dist < (minDistI-0.1) :\n collidingAtoms.append ( [dist, at] )\n break\n\n if at.element.name == \"C\" or (at.element.name == \"S\" and at.residue.type == \"MET\") :\n if dist < minDistI : # used to be 2.6?\n collidingAtoms.append ( [dist, at] )\n break\n\n #if hasattr ( at, 'Q' ) and at.Q < 0.1 :\n # continue\n\n #if at.altLoc != '' :\n # continue\n\n # only consider points near atoms in nearAtMap, or near new atoms\n # note each atom should only be added to one list\n if nearAtMap != None and at in nearAtMap :\n isNearAtMap = True\n\n if at in newAtsMap :\n isNearNewAtMap = True\n\n if dist < closestChainD :\n closestChainId = at.residue.id.chainId\n closestChainD = dist\n\n if at.residue.type.upper() in chargedIons :\n if dist <= maxDistI :\n ionAtomsIon.append ( [dist, at] )\n continue\n if dist < maxDistW :\n ionAtomsWater.append ( [dist, at] )\n continue\n\n chargedAt = False\n polarAt = False\n if at.residue.type == 'HIS' and (at.name == \"ND1\" or at.name == \"NE2\") :\n chargedAt = True\n if at.residue.type == \"ASP\" and (at.name == \"OD1\" or at.name == \"OD2\") :\n chargedAt = True\n if at.residue.type == \"GLU\" and (at.name == \"OE1\" or at.name == \"OE2\") :\n chargedAt = True\n if at.residue.type == \"LYS\" and (at.name == \"NZ\") :\n chargedAt = True\n if at.residue.type == \"ARG\" and (at.name == \"NH1\" or at.name == \"NH2\") :\n chargedAt = True\n\n if chargedAt :\n if dist <= maxDistI :\n chargedAtomsIon.append ( [dist, at] )\n continue\n if dist <= maxDistW :\n chargedAtomsWater.append ( [dist, at] )\n continue\n\n #elif at.residue.type == \"HOH\" :\n # if dist <= maxDistI :\n # waterAtomsIon.append ( [dist, at] )\n # continue\n # if dist <= maxDistW :\n # waterAtomsWater.append ( [dist, at] )\n # continue\n\n else :\n # if not charged, check if polar\n if at.element.name == \"N\" :\n polarAt = True\n if dist <= maxDistI :\n posPolarAtomsIon.append ( [dist, at] )\n continue\n if at.element.name == \"O\" or (at.element.name == \"S\" and at.residue.type == \"CYS\") :\n polarAt = True\n if dist <= maxDistI :\n negPolarAtomsIon.append ( [dist, at] )\n continue\n\n if polarAt :\n if dist >= minDistW and dist <= maxDistW :\n polarAtomsWater.append ( [dist, at] )\n\n msg = \"\"\n if doMsg :\n if len(collidingAtoms) > 0 :\n msg = \"Clash with:\"\n for d, at in collidingAtoms :\n msg += \" \" + self.At (at, d)\n\n else :\n msg += \"Near: \"\n if len(chargedAtomsIon) > 0 :\n msg += \"\\n\\nCharged Atoms (at Ion distance):\"\n for d, at in chargedAtomsIon :\n msg += \" \\n\" + self.At (at, d)\n\n if len(negPolarAtomsIon) > 0 :\n msg += \"\\n\\nNegative Polar Atoms (at Ion distance):\"\n for d, at in negPolarAtomsIon :\n msg += \" \\n\" + self.At (at, d)\n\n if len(posPolarAtomsIon) > 0 :\n msg += \"\\n\\nPositive Polar Atoms (at Ion distance):\"\n for d, at in posPolarAtomsIon :\n msg += \" \\n\" + self.At (at, d)\n\n if len(chargedAtomsWater) > 0 :\n msg += \"\\n\\nCharged Atoms (at Water distance):\"\n for d, at in chargedAtomsWater :\n msg += \" \\n\" + self.At (at, d)\n\n if len(polarAtomsWater) > 0 :\n msg += \"\\n\\nPolar Atoms (at Water distance):\"\n for d, at in polarAtomsWater :\n msg += \" \\n\" + self.At (at, d)\n\n if len(ionAtomsIon) > 0 :\n msg += \"\\n\\nIon (at Ion distance):\"\n for d, at in ionAtomsIon :\n msg += \" \\n\" + self.At (at, d)\n\n if len(ionAtomsWater) > 0 :\n msg += \"\\n\\nIon (at Water distance):\"\n for d, at in ionAtomsWater :\n msg += \" \\n\" + self.At (at, d)\n\n\n # use string set by user for type, if in list...\n ionType = \"ZN\"\n adds = self.addStr.get()\n if adds.upper() in chargedIons :\n ionType = adds.upper()\n\n\n atName, atRes = None, None\n clr = None\n placedType = \"\"\n\n if isNearAtMap == False and nearAtMap != None and not isNearNewAtMap :\n # a nearAtMap was given so only take points close to those atoms\n # in this case it was not close to any of them...\n pass\n elif len(collidingAtoms) == 0 :\n if len(ionAtomsIon) > 0 or ( len(ionAtomsWater) > 0 and len(negPolarAtomsIon)+len(posPolarAtomsIon) == 0 ) :\n # an ion at ion/water-distance away, likely water\n atName, atRes = \"O\", \"HOH\"\n placedType = \"\"\n clr = (1,0,0)\n elif len(chargedAtomsIon) > 0 :\n # charged atoms at ion distances, likely 2+ ion\n atName, atRes = ionType, ionType\n placedType = \"2+ ion\"\n clr = (.4,.4,.6)\n elif 1 and len(chargedAtomsWater) > 1 :\n # at least 2 charged atoms at water distances, likely 2+ ion\n atName, atRes = ionType, ionType\n placedType = \"2+ ion\"\n clr = (.4,.4,.6)\n elif len(negPolarAtomsIon)+len(posPolarAtomsIon) > 1 and len(negPolarAtomsIon) > 0 :\n # multiple polar atoms (at least 1 negative), likely 2+ ion\n atName, atRes = ionType, ionType\n placedType = \"2+ ion\"\n clr = (.4,.4,.6)\n elif len (negPolarAtomsIon) > 0 :\n # negative polar atom and no other ion around -> 1+ ion\n if 1 :\n atName, atRes = \"NA\", \"NA\"\n placedType = \"1+ ion\"\n clr = (.7,.4,.9)\n else :\n # in case we want to stick to 2+?\n atName, atRes = ionType, ionType\n placedType = \"2+ ion\"\n clr = (.4,.4,.6)\n elif len (posPolarAtomsIon) > 0 :\n # positive polar atom and no other ion around -> 1- ion\n atName, atRes = \"CL\", \"CL\"\n placedType = \"1- ion\"\n clr = (0,1,0)\n elif len(polarAtomsWater) > 0 or len(chargedAtomsWater) > 0 :\n atName, atRes = \"O\", \"HOH\"\n placedType = \"\"\n clr = (1,0,0)\n\n # don't put ions if they are near new atoms (ion or water) but not near\n # other model atoms, i.e. only put waters in that case...\n if isNearNewAtMap and not isNearAtMap :\n if atName != \"O\" :\n atName, atRes = \"O\", \"HOH\"\n clr = (1,0,0)\n placedType = \"\"\n\n\n msgFull = msg\n msg = \"\"\n\n if doMsg :\n if atName != None :\n msg = \"Placed %s %s/%s\" % (placedType, atName, atRes)\n else :\n if len(collidingAtoms) > 0 :\n msg = msgFull\n else :\n msg = \"Not placed - Not near any atoms (check distances in Options)\"\n\n return msg, msgFull, atName, atRes, closestChainId, clr\n\n\n def At ( self, at, d ) :\n rt = at.residue.type\n #if rt in protein3to1 :\n # rt = protein3to1[rt]\n #elif rt in nucleic3to1 :\n # rt = nucleic3to1[rt]\n\n return \" %.1fA to atom %s (element %s) in residue %s %d, chain %s\" % (d, at.name, at.element.name, rt, at.residue.id.position, at.residue.id.chainId)\n\n\n\n def AtsWithin (self, ats, R, atTree) :\n\n nearAts = []\n R2 = R * R\n for at in ats :\n pt = at.coord()\n vPt = numpy.array ( pt.data() )\n opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )\n if len(opointsNear) > 0 :\n for p in opointsNear :\n try :\n v = vPt - p.coord().data()\n except :\n continue\n sqSum = numpy.sum ( v * v )\n if sqSum < R2 :\n nearAts.append (p)\n\n return nearAts\n\n\n def AtsWithinXf (self, ats, R, atTree) :\n\n nearAts = []\n R2 = R * R\n for at in ats :\n pt = at.xformCoord()\n vPt = numpy.array ( pt.data() )\n opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )\n if len(opointsNear) > 0 :\n for p in opointsNear :\n try :\n v = vPt - p.xformCoord().data()\n except :\n continue\n sqSum = numpy.sum ( v * v )\n if sqSum < R2 :\n nearAts.append (p)\n\n return nearAts\n\n\n def AtsWithinPt (self, pt, R, atTree) :\n\n nearAts = []\n R2 = R * R\n\n vPt = numpy.array ( [pt[0], pt[1], pt[2]] )\n opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )\n if len(opointsNear) > 0 :\n for p in opointsNear :\n try :\n v = vPt - p.coord().data()\n except :\n continue\n sqSum = numpy.sum ( v * v )\n if sqSum < R2 :\n nearAts.append ( [numpy.sqrt(sqSum), p] )\n\n return nearAts\n\n\n def AtsWithinPtXf (self, pt, R, atTree) :\n\n nearAts = []\n R2 = R * R\n\n vPt = numpy.array ( [pt[0], pt[1], pt[2]] )\n opointsNear = atTree.searchTree ( [pt[0], pt[1], pt[2]], R )\n if len(opointsNear) > 0 :\n for p in opointsNear :\n try :\n v = vPt - p.xformCoord().data()\n except :\n continue\n sqSum = numpy.sum ( v * v )\n if sqSum < R2 :\n nearAts.append ( [numpy.sqrt(sqSum), p] )\n\n return nearAts\n\n\n\n def PlaceAt ( self, pt, dmap ) :\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n #chainId = self.chain.get()\n\n #aname, chainId = self.addRess.get().split(\".\")\n aname = self.addStr.get()\n chainId = self.addToChain.get()\n if len(chainId) > 1 :\n umsg ( \"Enter a single character in 'Add To Chain' field\" )\n return\n\n\n\n P = chimera.Point(pt[0], pt[1], pt[2])\n P = dmap.openState.xform.inverse().apply(P)\n\n if self.use_mouse_max.get() :\n pts, avgMapV = PtsToMax ( [ [P[0], P[1], P[2]] ], dmap )\n maxPt = pts[0]\n V = maxPt - P\n #print \" - diff to max: %.3f\" % V.length\n P = maxPt\n\n P = dmap.openState.xform.apply(P)\n P = mol.openState.xform.inverse().apply(P)\n\n\n if self.guessOpt.get() == 'guess' :\n\n #print \" - guessing...\"\n\n msg, msgFull, atName, resName, closestChainId, clr = self.GuessAtom (mol, [P[0],P[1],P[2]] )\n\n if chainId == None or len(chainId) == 0 :\n chainId = closestChainId\n\n atRi = 0\n for r in mol.residues :\n if r.id.chainId == chainId and r.id.position > atRi :\n atRi = r.id.position\n\n atRi += 1\n\n print \"\"\n umsg ( \"Placing %s in chain %s, position %d, for map %s\" % (aname, chainId, atRi, dmap.name) )\n\n\n status ( msg )\n print \"\"\n print msg\n print \"\"\n print msgFull\n print \"\"\n\n #print msg\n\n if atName != None :\n\n nres = mol.newResidue (resName, chimera.MolResId(chainId, atRi))\n nat = mol.newAtom (atName, chimera.Element(atName))\n\n nres.addAtom( nat )\n nat.setCoord ( P )\n #nat.drawMode = nat.Ball\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.display = True\n\n nat.radius = 1.46\n if atName.lower() == \"o\" :\n nat.drawMode = nat.EndCap\n else :\n #print \"ball\"\n nat.drawMode = nat.Ball\n\n #nat.drawMode = nat.EndCap\n\n\n else :\n\n if len(chainId) == 0 :\n chainId = \"_\"\n\n atRi = 0\n for r in mol.residues :\n if r.id.chainId == chainId and r.id.position > atRi :\n atRi = r.id.position\n\n atRi += 1\n\n print \"\"\n umsg ( \"Placing %s in chain %s, position %d, for map %s\" % (aname, chainId, atRi, dmap.name) )\n\n nres, nat = None, None\n if aname.lower() == \"w\" :\n nres = mol.newResidue (\"HOH\", chimera.MolResId(chainId, atRi))\n nat = mol.newAtom (\"O\", chimera.Element('O'))\n else :\n nres = mol.newResidue (aname, chimera.MolResId(chainId, atRi))\n nat = mol.newAtom (aname, chimera.Element(aname))\n\n\n nres.addAtom( nat )\n nat.setCoord ( P )\n nat.display = True\n nat.radius = 1.46\n if aname.lower() == \"w\" :\n nat.drawMode = 2 # nat.EndCap\n nat.color = chimera.MaterialColor( 1.0, 0.0, 0.0, 1.0 )\n else :\n if aname.upper() in atomColors :\n nat.color = atomColors[aname.upper()]\n else :\n nat.color = chimera.MaterialColor( 0.0, 1.0, 0.0, 1.0 )\n nat.drawMode = 3 # nat.Ball\n\n self.RefreshTree ()\n\n\n def SetVisMap ( self ) :\n dmap = None\n mlist = chimera.openModels.list(modelTypes = [VolumeViewer.volume.Volume])\n for m in mlist :\n if m.display and not \"sel_masked\" in m.name :\n dmap = m\n break\n\n if dmap == None :\n if len(mlist) > 0 :\n dmap = mlist[0]\n\n if dmap != None :\n self.dmap.set ( dmap.name + \" (%d)\" % dmap.id )\n self.cur_dmap = dmap\n\n\n def MapMenu ( self ) :\n\n self.mb.menu.delete ( 0, 'end' ) # Clear menu\n from VolumeViewer import Volume\n mlist = chimera.openModels.list(modelTypes = [Volume])\n for m in mlist :\n self.mb.menu.add_radiobutton ( label=m.name + \" (%d)\"%m.id, variable=self.dmap,\n command=lambda m=m: self.MapSelected(m) )\n\n def SetMapMenu (self, dmap):\n\n mname = dmap.name if dmap else ''\n self.dmap.set(mname)\n self.cur_dmap = dmap\n #print \"Set map menu to \", dmap.name\n\n def MapSelected ( self, dmap ) :\n\n self.cur_dmap = dmap\n # if dmap: dmap.display = True\n print \"Map: %s\" % dmap.name\n\n\n\n def SetVisMol ( self ) :\n mol = None\n mlist = chimera.openModels.list(modelTypes = [chimera.Molecule])\n for m in mlist :\n if m.display :\n mol = m\n break\n\n if mol == None :\n if len(mlist) > 0 :\n mol = mlist[0]\n\n if mol != None :\n self.struc.set ( mol.name + \" (%d)\" % mol.id )\n self.cur_mol = mol\n SetBBAts ( mol )\n print \"Mol: %s\" % self.cur_mol.name\n\n self.RefreshTree()\n\n\n def StrucMenu ( self ) :\n self.strucMB.menu.delete ( 0, 'end' ) # Clear menu\n mlist = chimera.openModels.list(modelTypes = [chimera.Molecule])\n for m in mlist :\n self.strucMB.menu.add_radiobutton ( label=m.name+\" (%d)\"%m.id, variable=self.struc,\n command=lambda m=m: self.StrucSelected(m) )\n\n\n def StrucSelected ( self, mol ) :\n\n self.cur_mol = mol\n print \"Selected \", mol.name, \" - \", mol.id\n if mol :\n\n mlist = chimera.openModels.list(modelTypes = [chimera.Molecule])\n for m in mlist :\n m.display = False\n\n mol.display = True\n SetBBAts ( mol )\n\n #print \"Mol: %s\" % self.cur_mol.name\n\n self.RefreshTree()\n\n\n def select_mg_cb (self, event):\n\n\n #print \"Sel:\", self.tree.selection()\n #print \"Focus:\", self.tree.focus()\n\n\n to = self.tree.focus()\n\n if to in self.toChain :\n #print \" -- Chain:\", self.toChain[to]\n pass\n elif to in self.toRes :\n res = self.toRes[to]\n try :\n pass\n #print \" -- Res: %d.%s.%s\" % (res.id.position, res.type, res.id.chainId)\n except :\n pass\n\n elif to in self.toRess :\n ress = self.toRess[to]\n #print \" -- %d res\" % len(ress)\n\n\n return\n\n if self.SelectedMgId != self.tree.focus() :\n self.SelectedMgId = self.tree.focus()\n import Segger.ar_mg_dialog\n reload ( Segger.ar_mg_dialog )\n Segger.ar_mg_dialog.show_dialog().Refresh ()\n\n\n def GetSelAtoms ( self ) :\n\n atoms = []\n for to in self.tree.selection () :\n\n if to in self.toChain :\n #print \" -- Chain:\", self.toChain[to]\n for res in self.cur_mol.residues :\n if res.id.chainId == self.toChain[to] :\n atoms.extend ( res.atoms )\n elif to in self.toRes :\n res = self.toRes[to]\n #print \" -- Res: %d.%s.%s\" % (res.id.position, res.type, res.id.chainId)\n atoms.extend ( res.atoms )\n elif to in self.toRess :\n ress = self.toRess[to]\n #print \" -- %d res\" % len(ress)\n for res in ress :\n try :\n atoms.extend ( res.atoms )\n except :\n status ( \"Atoms not found, molecule may have changed\" )\n pass\n\n return atoms\n\n\n\n def SelectSel ( self ) :\n print \" - selecting...\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n ats = self.GetSelAtoms ()\n umsg ( \"Selecting %d atoms\" % len(ats) )\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( ats )\n\n\n\n def SelectAll ( self ) :\n print \" - selecting all\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n ats = self.GetSelAtoms ()\n umsg ( \"Selecting %d atoms\" % len(self.cur_mol.atoms) )\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( self.cur_mol.atoms )\n\n\n def SelW ( self ) :\n print \" - selecting w&i\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n #ats = self.GetSelAtoms ()\n #umsg ( \"Selecting %d atoms\" % len(self.cur_mol.atoms) )\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n\n chimera.selection.clearCurrent ()\n\n for at in self.cur_mol.atoms :\n #if at.residue.type in protein3to1 :\n # continue\n #elif at.residue.type in nucleic3to1 :\n # continue\n #else :\n # chimera.selection.addCurrent ( at )\n\n if at.residue.type.upper() in chargedIons :\n chimera.selection.addCurrent ( at )\n elif at.residue.type.upper() == \"HOH\" :\n chimera.selection.addCurrent ( at )\n\n\n\n def SelNear ( self ) :\n print \" - selecting w&i\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n nearNs = self.selNearAts.get().split(',')\n print \" - near: \", nearNs\n\n #ats = self.GetSelAtoms ()\n #umsg ( \"Selecting %d atoms\" % len(self.cur_mol.atoms) )\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n\n ats = [at for at in self.cur_mol.atoms if at.element.name != \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d atoms / %d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n\n chimera.selection.clearCurrent ()\n\n for at in self.cur_mol.atoms :\n if at.residue.type.upper() in chargedIons or at.residue.type.upper() == \"HOH\" :\n\n nearAts = self.AtsWithin ( [at], 3.0, atTree )\n for nat in nearAts :\n if nat.name in nearNs :\n chimera.selection.addCurrent ( at )\n chimera.selection.addCurrent ( nat )\n\n\n\n\n def SelVisW ( self ) :\n print \" - selecting w&i visible\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n #ats = self.GetSelAtoms ()\n #umsg ( \"Selecting %d atoms\" % len(self.cur_mol.atoms) )\n\n from chimera.resCode import protein3to1\n from chimera.resCode import nucleic3to1\n\n chimera.selection.clearCurrent ()\n\n for at in self.cur_mol.atoms :\n #if at.residue.type in protein3to1 :\n # continue\n #elif at.residue.type in nucleic3to1 :\n # continue\n #else :\n # chimera.selection.addCurrent ( at )\n\n if at.display == True :\n if 0 and at.residue.type.upper() in chargedIons :\n chimera.selection.addCurrent ( at )\n elif at.residue.type.upper() == \"HOH\" :\n chimera.selection.addCurrent ( at )\n\n\n\n def ShowSel ( self ) :\n print \" - showing sel...\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n #SetBBAts ( self.cur_mol )\n\n for m in chimera.openModels.list () :\n if type(m) == chimera.Molecule and m != self.cur_mol :\n m.display = False\n\n ats = self.GetSelAtoms ()\n umsg ( \"Showing %d atoms\" % len(self.cur_mol.atoms) )\n\n for at in ats :\n r = at.residue\n if r.type in protein3to1 or r.type in nucleic3to1 :\n r.ribbonDisplay = True\n for at in r.atoms :\n at.display = False\n else :\n at.display = True\n self.ColorAt ( at )\n\n for bond in self.cur_mol.bonds :\n bond.display = bond.Smart\n\n\n\n def ShowSelOnly ( self ) :\n\n print \" - showing sel...\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n SetBBAts ( self.cur_mol )\n\n for m in chimera.openModels.list () :\n if type(m) == chimera.Molecule and m != self.cur_mol :\n m.display = False\n\n ats = self.GetSelAtoms ()\n umsg ( \"Showing %d atoms\" % len(self.cur_mol.atoms) )\n\n atm = {}\n resm = {}\n for at in ats :\n atm[at] = 1\n resm[at.residue] = 1\n\n for res in self.cur_mol.residues :\n\n if res in resm :\n if res.isProt or res.isNA :\n res.ribbonDisplay = True\n for at in res.atoms :\n at.display = False\n else :\n for at in res.atoms :\n if at in atm :\n at.display = True\n self.ColorAt ( at )\n else :\n at.display = False\n\n else :\n if res.isProt or res.isNA :\n res.ribbonDisplay = False\n for at in res.atoms :\n at.display = False\n\n for bond in self.cur_mol.bonds :\n bond.display = bond.Smart\n\n\n def ColorAt ( self, at ) :\n if at.element.name.upper() in chargedIons :\n at.drawMode = at.Ball\n at.radius = 1.46\n else :\n at.drawMode = at.EndCap\n try :\n at.color = atomColors[at.element.name.upper()]\n except :\n at.color = atomColors[' ']\n\n def ShowAll ( self ) :\n\n print \" - showing all...\"\n\n if self.cur_mol == None :\n umsg ( \"Select a molecule first\" )\n return\n\n SetBBAts ( self.cur_mol )\n\n\n for res in self.cur_mol.residues :\n if res.isProt or res.isNA :\n res.ribbonDisplay = True\n for at in res.atoms :\n at.display = False\n else :\n for at in res.atoms :\n at.display = True\n self.ColorAt ( at )\n\n\n for bond in self.cur_mol.bonds :\n bond.display = bond.Smart\n\n\n\n\n def open_mg_cb (self, event):\n #print \"open\"\n #print self.tree.selection()\n #print self.tree.focus()\n pass\n\n def close_mg_cb (self, event):\n #print \"close\"\n #print self.tree.selection()\n #print self.tree.focus()\n pass\n\n def mg_b1 (self, event):\n #print \"b1\"\n #print self.tree.selection()\n pass\n\n def mg_b1_up (self, event):\n #print \"b1 up\"\n #print self.tree.selection()\n pass\n\n\n def Zone ( self ) :\n\n print \"Zone:\", self.zoneRad.get()\n\n try :\n rad = float ( self.zoneRad.get() )\n except :\n umsg ( \"Enter a number for zone radius\" )\n return\n\n atoms = chimera.selection.currentAtoms()\n if len(atoms) == 0 :\n umsg ( \"Nothing selected\" )\n return\n\n if self.cur_dmap == None :\n umsg ( \"Select a Map\" )\n return\n\n dmap = self.cur_dmap\n m = atoms[0].molecule\n\n from _multiscale import get_atom_coordinates\n points = get_atom_coordinates ( atoms, transformed = True )\n\n nname = os.path.splitext(dmap.name)[0] + \"_Z%.0f_\" % rad + \".mrc\"\n cmap = PtsToMap ( points, dmap, rad, nname, clr=(.7,.7,.7,.2) )\n\n umsg ( \"Made zone map: \" + nname )\n dmap.display = False\n\n M = dmap.data.full_matrix()\n sdev = numpy.std(M)\n avg = numpy.average(M)\n\n cmap.surface_levels = [avg + 2.0 * sdev]\n chimera.runCommand ( \"vol #%d style surface region all step 1\" % cmap.id )\n\n\n #chimera.openModels.add ( [cmap] )\n\n #dpath = os.path.splitext(m.openedAs[0])[0] + \"_chain_\" + cid + \".mrc\"\n #print \" -> \", dpath\n #cmap.write_file ( dpath, \"mrc\" )\n\n\n\n def RefreshTree ( self ) :\n\n \"\"\" Updates treeview with water/ion atoms in self.cur_mol \"\"\"\n\n if 0 :\n # Level 1\n at1=self.tree.insert(\"\", 1, \"\", text=\"1\" )\n self.tree.insert(\"\", 2, \"\", text=\"2\")\n\n # Level 2\n self.tree.insert(at1, \"end\", \"\", text=\"1.1\", values=(\"t1.1\"))\n self.tree.insert(at1, \"end\", \"\", text=\"1.2\", values=(\"t1.2\"))\n self.tree.insert(at1, \"end\", \"\", text=\"1.3\", values=(\"t1.3\"))\n\n #from os import listdir\n #from os.path import isfile, join\n\n self.tree.delete(*self.tree.get_children())\n\n #print \"Refresh with: %s\" % self.cur_mol.name\n\n SetBBAts ( self.cur_mol )\n\n cress = {}\n ress = []\n\n for res in self.cur_mol.residues :\n\n if res.id.chainId in cress :\n cress[res.id.chainId].append ( res )\n else :\n cress[res.id.chainId] = [ res ]\n\n if res.isProt or res.isNA :\n continue\n\n ress.append ( [res.id.position, res] )\n\n if 0 :\n ress.sort ( reverse=False, key=lambda x: int(x[0]) )\n for ri, res in ress :\n tRes = self.tree.insert(\"\", \"end\", \"\", text=\"%d.%s - %d atoms\" % (ri, res.type, len(res.atoms)) )\n\n else :\n\n self.toRes = {}\n self.toRess = {}\n self.toChain = {}\n\n cids = cress.keys()\n cids.sort()\n for ci in cids :\n ress = cress[ci]\n protRes, naRes, molRes = [], [], []\n for r in ress :\n if r.isProt :\n protRes.append ( [r.id.position, r] )\n elif r.isNA :\n naRes.append ( [r.id.position, r] )\n else :\n molRes.append ( [r.id.position, r] )\n\n label = \"Chain %s\" % ci\n if 0 :\n if nMol != 0 and nProt == 0 and nNA == 0 :\n # only Ligands\n label = \"Chain %s: %d Molecules\" % (ci, nMol)\n elif nProt != 0 and nMol == 0 and nNA == 0 :\n label = \"Chain %s: %d Protein residues\" % (ci, nProt)\n elif nNA != 0 and nMol == 0 and nProt == 0 :\n label = \"Chain %s: %d Nucleotides\" % (ci, nNA)\n else :\n label = \"Chain %s: %d Residues, %d Nucleotides, %d Other\" % (ci, nProt, nNA, nMol)\n\n chainTO = self.tree.insert(\"\", \"end\", \"\", text=label )\n self.toChain[chainTO] = ci\n\n if len(molRes) > 0 :\n molTO = self.tree.insert(chainTO, \"end\", \"\", text=\"%d molecules\" % len(molRes) )\n self.toRess[molTO] = [r for ri, r in molRes]\n\n molRes.sort ()\n for ri, res in molRes :\n resTO = self.tree.insert(molTO, \"end\", \"\", text=\"%d.%s - %d atoms\" % (ri, res.type, len(res.atoms)) )\n self.toRes[resTO] = res\n\n if len(protRes) > 0 :\n protTO = self.tree.insert(chainTO, \"end\", \"\", text=\"%d residues\" % len(protRes) )\n self.toRess[protTO] = [r for ri, r in protRes]\n\n protRes.sort ()\n for ri, res in protRes :\n resTO = self.tree.insert(protTO, \"end\", \"\", text=\"%d.%s - %d atoms\" % (ri, res.type, len(res.atoms)) )\n self.toRes[resTO] = res\n\n if len(naRes) > 0 :\n naTO = self.tree.insert(chainTO, \"end\", \"\", text=\"%d nucleotides\" % len(naRes) )\n self.toRess[naTO] = [r for ri, r in naRes]\n\n naRes.sort ()\n for ri, res in naRes :\n resTO = self.tree.insert(naTO, \"end\", \"\", text=\"%d.%s - %d atoms\" % (ri, res.type, len(res.atoms)) )\n self.toRes[resTO] = res\n\n self.tree.item(chainTO, open=False)\n\n\n def Average ( self ) :\n\n for eid in self.tree.selection() :\n print eid\n e = self.id_mg [eid] # {'fname':fname, 'fpath':fpath, 'vdata':vdata }\n #print e['fpath']\n path, fname = os.path.split ( e['fpath'] )\n\n #from ar_mg_proc import avg_frames\n\n avg_frames ( e['vdata'] )\n\n\n\n\n\n def HohE ( self ) :\n\n print \"hoh - figure in Q-scores paper\"\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n chimera.selection.clearCurrent()\n\n s = {184:1,280:1,278:1,183:1,236:1,281:1,357:1,282:1,399:1}\n s = {184:1,280:1,278:1,183:1,236:1,281:1,357:1}\n\n for res in mol.residues :\n if res.type == \"HOH\" or res.type == \"MG\" :\n if res.id.position in s :\n for at in res.atoms :\n at.display = True\n if res.id.position == 183 or res.id.position == 184 :\n at.drawMode = at.Sphere\n chimera.selection.addCurrent ( at )\n else :\n for at in res.atoms :\n at.display = False\n\n\n\n def HohD_ ( self ) :\n\n print \"hoh-D - distances between HOH atoms using same residue numbers\"\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n m1, m2 = mols\n\n print \"M1: %s\" % m1.name\n print \"M2: %s\" % m2.name\n\n atm1 = {}\n for at in m1.atoms :\n if at.residue.type == \"HOH\" :\n aid = \"%d.%s.%s\" % (at.residue.id.position, at.residue.id.chainId, at.name)\n atm1[aid] = at\n\n ds = []\n rm, N = 0.0, 0.0\n for at2 in m2.atoms :\n if at2.residue.type == \"HOH\" :\n aid = \"%d.%s.%s\" % (at2.residue.id.position, at2.residue.id.chainId, at2.name)\n at1 = atm1[aid]\n\n p1 = at1.xformCoord() # m2.openState.xform.inverse().apply (at1.xformCoord())\n p2 = at2.xformCoord()\n v = p1 - p2\n ds.append ( v.length )\n rm += v.length * v.length\n N += 1.0\n\n rmsd = numpy.sqrt (rm/N)\n print \"%.0f atoms, min: %2f, max: %.2f, avg: %.2f, rmsd: %.2f\" % (N, min(ds), max(ds), numpy.average(ds), rmsd)\n\n\n ds = []\n rm, N = 0.0, 0.0\n nsame = 0\n for at2 in m2.atoms :\n if at2.residue.type == \"HOH\" and at2.Q >= 0.7 :\n aid = \"%d.%s.%s\" % (at2.residue.id.position, at2.residue.id.chainId, at2.name)\n at1 = atm1[aid]\n\n p1 = at1.xformCoord() # m2.openState.xform.inverse().apply (at1.xformCoord())\n p2 = at2.xformCoord()\n v = p1 - p2\n ds.append ( v.length )\n rm += v.length * v.length\n N += 1.0\n if v.length < 0.25 :\n nsame += 1\n\n rmsd = numpy.sqrt (rm/N)\n print \"%.0f atoms, min: %2f, max: %.2f, avg: %.2f, rmsd: %.2f -- %d same\" % (N, min(ds), max(ds), numpy.average(ds), rmsd, nsame)\n\n\n\n\n def HohD ( self ) :\n\n print \"hoh-D - distances between HOH atoms - nearest search\"\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n m1, m2 = mols\n\n print \"M1: %s\" % m1.name\n print \"M2: %s\" % m2.name\n\n\n ats = [at for at in m1.atoms if at.residue.type == \"HOH\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d HOH atoms / %d ats\" % ( len(ats), len(m1.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n\n Ds = {}\n\n def addD ( t, d ) :\n if not t in Ds :\n Ds[t] = numpy.zeros ( 16 )\n i = int ( numpy.round(d*5.0) )\n if i < 31 :\n Ds[t][i] += 1\n\n num = 0\n sum, N = 0.0, 0.0\n for at2 in m2.atoms :\n if at2.residue.type == \"HOH\" :\n\n nearAts = self.AtsWithin ( [at2], 3.0, atTree )\n for nat in nearAts :\n d = (nat.coord() - at2.coord()).length\n addD ( \"-\", d )\n if d <= 1 :\n num += 1\n sum += d*d\n N += 1.0\n\n\n print \"\"\n print \"Distances:\"\n\n s = \"\"\n for i in range ( 16 ) :\n s = s + \"\\t%.2f\" % (i/5.0)\n print s\n\n for t, dists in Ds.iteritems () :\n s = t\n for n in dists :\n s = s + \"\\t%d\" % n\n print s\n\n print \"\"\n print \"%d within 1A, rmsd: %.6f\" % (num,numpy.sqrt(sum/N))\n\n\n\n def HohD2 ( self ) :\n\n print \"hoh-D - distances between HOH atoms - nearest search\"\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n m1, m2 = mols\n\n num, pp, rmsd = self.wiDistNum (\"HOH\", m1, m2)\n\n def wiAtoms ( self, m1, tp ) :\n\n ats1 = []\n for at in m1.atoms :\n itype = None\n rtype = at.residue.type\n if rtype.upper() in chargedIons :\n itype = \"%d\" % chargedIons[rtype.upper()]\n else :\n itype = rtype\n\n if itype.upper() == tp.upper() or rtype.upper() == tp.upper() :\n ats1.append ( at )\n\n return ats1\n\n\n def wiDistNum ( self, tp, m1, m2 ) :\n\n print \"M1: %s\" % m1.name,\n print \"M2: %s\" % m2.name\n\n ats1 = self.wiAtoms ( m1, tp )\n if len(ats1) == 0 :\n print \" - no atoms/res of type %s in %s\" % (tp, m1.name)\n return 0.0, 0.0, 0.0\n\n ats2 = self.wiAtoms ( m2, tp )\n if len(ats2) == 0 :\n print \" - no atoms/res of type %s in %s\" % (tp, m2.name)\n return 0.0, 0.0, 0.0\n\n points = _multiscale.get_atom_coordinates ( ats1, transformed = False )\n #print \" - search tree: %d %s atoms / %d ats\" % ( len(ats1), tp, len(m1.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats1, 2.0)\n\n Ds = {}\n\n def addD ( t, d ) :\n if not t in Ds :\n Ds[t] = numpy.zeros ( 16 )\n i = int ( numpy.round(d*5.0) )\n if i < 31 :\n Ds[t][i] += 1\n\n num = 0\n sum, N = 0.0, 0.0\n for at2 in ats2 :\n nearAts = self.AtsWithin ( [at2], 3.0, atTree )\n minD = 1e9\n for nat in nearAts :\n d = (nat.coord() - at2.coord()).length\n addD ( \"-\", d )\n if d <= 1.0 :\n if d < minD :\n minD = d\n if minD < 1e8 :\n num += 1\n sum += minD*minD\n N += 1.0\n\n #print \"\"\n #print \"Distances:\"\n\n s = \"\"\n for i in range ( 16 ) :\n s = s + \"\\t%.2f\" % (i/5.0)\n #print s\n\n for t, dists in Ds.iteritems () :\n s = t\n for n in dists :\n s = s + \"\\t%d\" % n\n #print s\n\n pp = 100.0 * float(num) / float ( min(len(ats1),len(ats2)) )\n pp = 100.0 * float(num) / float ( len(ats2) )\n rmsd = numpy.sqrt(sum/N) if N > 0 else 0.0\n\n #print \"\"\n print \"%d/%d|%d %.0f%% within 1A, rmsd: %.6f\" % (num, len(ats1), len(ats2), pp, rmsd)\n\n return num, pp, rmsd\n\n\n\n\n\n\n def Duplicates ( self ) :\n\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n\n print m.name\n SetBBAts ( m )\n\n ats = []\n for at in m.atoms :\n if at.residue.isProt or at.residue.isNA :\n continue\n ats.append ( at )\n\n print \" - %d ats\" % len(ats)\n points = _multiscale.get_atom_coordinates ( ats, transformed = True )\n print \" - search tree: %d ion atoms / %d ats\" % ( len(ats), len(m.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n for at in m.atoms :\n nats = self.AtsWithinXf ( [at], 1.0, atTree )\n for nat in nats :\n if nat != at :\n v = nat.xformCoord() - at.xformCoord()\n print \" - %s.%s.%d -- %s.%s.%d -- %.2f\" % (at.name, at.residue.id.chainId, at.residue.id.position, nat.name, nat.residue.id.chainId, nat.residue.id.position, v.length)\n\n\n\n\n def Combine ( self ) :\n\n dmap = self.cur_dmap\n xf = dmap.openState.xform.inverse()\n\n print \"hoh-D - distances between HOH atoms - nearest search -- \"\n\n cids = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n nmol = None\n\n mols = []\n mi = 0\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n atTree = None\n if nmol != None :\n SetBBAts ( nmol )\n ats = []\n for at in nmol.atoms :\n if at.residue.isProt or at.residue.isNA :\n continue\n ats.append ( at )\n\n points = _multiscale.get_atom_coordinates ( ats, transformed = True )\n print \" - search tree: %d ion atoms / %d ats\" % ( len(ats), len(nmol.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n print \"%s %d -> %s\" % (m.name, mi, cids[mi])\n #nmol = self.AddChain ( nmol, m, \"A\", cids[mi], xf )\n #nmol = self.AddChain ( nmol, m, \"A\", cids[mi].lower(), xf, atTree )\n nmol = self.AddChain ( nmol, m, \"A\", cids[mi], xf, atTree )\n mi += 1\n\n\n\n\n def AddChain ( self, toMol, fromMol, cid, ncid, xf, atTree=None ) :\n\n if toMol == None :\n toMol = chimera.Molecule()\n toMol.name = \"combine\"\n chimera.openModels.add ( [toMol] )\n\n aMap = dict()\n from random import random as rand\n clr = ( rand(), rand(), rand() )\n\n for res in fromMol.residues :\n if res.id.chainId == cid :\n\n clash = False\n for at in res.atoms :\n atc = at.xformCoord()\n if atTree != None :\n nats = self.AtsWithinPtXf ( atc, 1.0, atTree )\n if len(nats) > 0 :\n clash = True\n break\n\n if clash :\n print \" - not adding clashing res %s.%d.%s\" % (res.type, res.id.position, res.id.chainId)\n continue\n\n nres = toMol.newResidue (res.type, chimera.MolResId(ncid, res.id.position))\n # print \"New res: %s %d\" % (nres.id.chainId, nres.id.position)\n for at in res.atoms :\n nat = toMol.newAtom (at.name, chimera.Element(at.element.number))\n # todo: handle alt\n aMap[at] = nat\n nres.addAtom( nat )\n nat.setCoord ( xf.apply (at.xformCoord()) )\n nat.drawMode = nat.Sphere\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.display = False\n nat.altLoc = at.altLoc\n nat.occupancy = at.occupancy\n nat.bfactor = at.bfactor\n\n nres.isHelix = res.isHelix\n nres.isHet = res.isHet\n nres.isSheet = res.isSheet\n nres.isStrand = res.isStrand\n nres.ribbonDisplay = True\n nres.ribbonDrawMode = 2\n nres.ribbonColor = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 );\n\n for bond in fromMol.bonds :\n try :\n nb = toMol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = nb.Smart\n except :\n pass\n\n return toMol\n\n\n def IonD ( self ) :\n\n print \"hoh-D - distances between HOH atoms - nearest search -- \"\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n m1, m2 = mols\n\n print \"M1: %s\" % m1.name\n print \"M2: %s\" % m2.name\n\n ats = []\n for at in m1.atoms :\n if at.residue.type.upper() in chargedIons :\n ats.append ( at )\n\n points = _multiscale.get_atom_coordinates ( ats, transformed = True )\n print \" - search tree: %d ion atoms / %d ats\" % ( len(ats), len(m1.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n\n Ds = {}\n\n def addD ( t, d ) :\n if not t in Ds :\n Ds[t] = numpy.zeros ( 16 )\n i = int ( numpy.round(d*5.0) )\n if i < 31 :\n Ds[t][i] += 1\n\n\n nums, numd = {}, {}\n\n selAts = {}\n\n for at2 in m2.atoms :\n if at2.residue.type.upper() in chargedIons :\n\n #print at2.element.name, at2.residue.type\n\n nearAts = self.AtsWithinXf ( [at2], 3.0, atTree )\n for nat in nearAts :\n d = (nat.xformCoord() - at2.xformCoord()).length\n\n if nat.element.name == at2.element.name :\n addD ( nat.element.name, d )\n\n if d <= 1 :\n selAts[at2] = 1\n selAts[nat] = 1\n if nat.element.name in nums :\n nums[nat.element.name] += 1\n numd[nat.element.name] += d\n else :\n nums[nat.element.name] = 1\n numd[nat.element.name] = d\n\n\n sats = []\n for m in mols :\n SetBBAts (m)\n for at in m.atoms :\n if at.residue.isProt or at.residue.isNA :\n continue\n if at.residue.type == \"HOH\" :\n continue\n if at not in selAts :\n sats.append ( at )\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( sats )\n\n\n print \"\"\n print \"Distances:\"\n\n s = \"\"\n for i in range ( 16 ) :\n s = s + \"\\t%.2f\" % (i/5.0)\n print s\n\n for t, dists in Ds.iteritems () :\n s = t\n for n in dists :\n s = s + \"\\t%d\" % n\n print s\n\n print \"\"\n for aname, num in nums.iteritems() :\n print \" - %s - %d within 1A, avgd %0.2f\" % (aname, num, numd[aname]/float(num) )\n\n\n def AllD ( self ) :\n\n print \"hoh-D - distances between HOH atoms - nearest search\"\n\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n m1, m2 = mols\n\n print \"M1: %s\" % m1.name\n print \"M2: %s\" % m2.name\n\n ionOrW = { \"MG\":2, \"NA\":1, \"CA\":2, \"ZN\":2, \"MN\":2, \"FE\":3, \"CO\":2, \"NI\":2, \"HOH\":0 }\n\n ats = []\n for at in m1.atoms :\n if at.residue.type.upper() in ionOrW :\n ats.append ( at )\n\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d ion atoms / %d ats\" % ( len(ats), len(m1.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n\n Ds = {}\n\n def addD ( t, d ) :\n if not t in Ds :\n Ds[t] = numpy.zeros ( 16 )\n i = int ( numpy.round(d*5.0) )\n if i < 31 :\n Ds[t][i] += 1\n\n num = 0\n for at2 in m2.atoms :\n if at2.residue.type.upper() in ionOrW :\n\n nearAts = self.AtsWithin ( [at2], 3.0, atTree )\n for nat in nearAts :\n d = (nat.coord() - at2.coord()).length\n addD ( \"-\", d )\n if d <= 1 :\n num += 1\n\n\n print \"\"\n print \"Distances:\"\n\n s = \"\"\n for i in range ( 16 ) :\n s = s + \"\\t%.2f\" % (i/5.0)\n print s\n\n for t, dists in Ds.iteritems () :\n s = t\n for n in dists :\n s = s + \"\\t%d\" % n\n print s\n\n print \"\"\n print \"%d within 1A\" % num\n\n\n\n def HohDn ( self ) :\n\n print \"hoh-D - distances between HOH atoms - nearest search\"\n\n m1 = None\n mols = []\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == chimera.Molecule :\n mols.append ( m )\n\n #if len(mols) < 2 :\n # umsg ( \"Make at least two molecules visible\" )\n # return\n\n #m1, mols = mols[0], mols[1:]\n\n print \"\\nUsing %s as ref - %d HOH\" % (m1.name, len([r for r in m1.residues if r.type == \"HOH\"]))\n\n for at in m1.atoms :\n if at.residue.type == \"HOH\" :\n at.nclose = 0\n at.aclose = []\n\n for m2 in mols :\n\n print \" %s - %d HOH\" % (m2.name, len([r for r in m2.residues if r.type == \"HOH\"]))\n\n ats = [at for at in m2.atoms if at.residue.type == \"HOH\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d HOH atoms / %d ats\" % ( len(ats), len(m2.atoms) )\n atTree = AdaptiveTree ( points.tolist(), ats, 2.0)\n\n for at in m2.atoms :\n if at.residue.type == \"HOH\" :\n at.display = False\n at.drawMode = 2\n\n for at1 in m1.atoms :\n if at1.residue.type == \"HOH\" :\n\n at1xc = m2.openState.xform.inverse().apply(at1.xformCoord())\n atsNear = atTree.searchTree ( at1xc.data(), 3.0 )\n num = 0\n for nat in atsNear :\n d = (nat.coord() - at1xc).length\n if d <= 1.0 :\n num += 1\n at1.aclose.append (nat)\n if num > 0 :\n at1.nclose += 1\n\n ns = [0] * (len(mols)+1)\n for at in m1.atoms :\n if at.residue.type == \"HOH\" :\n ns[at.nclose] += 1\n at.drawMode = 2\n if at.nclose == len(mols) :\n at.display = True\n else :\n at.display = False\n\n print \"\"\n print \"Numbers:\"\n print ns, \" / \", numpy.sum(ns)\n\n\n\n\n\n\n\n def Stats0 ( self ) :\n\n print \"\"\n print \"Test solvent atoms for Q-scores (make distributions) and\"\n print \"distances to other atoms\"\n print \"\"\n\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule :\n SetBBAts ( m )\n num={}\n for r in m.residues :\n if r.isProt or r.isNA :\n continue\n if r.type in num :\n num[r.type] += 1\n else :\n num[r.type] = 1\n print m.name\n for t, n in num.iteritems() :\n print \" - \", t, n\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n print \" - in mol: %s\" % mol.name\n\n #chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - scale map: %s\" % dmap.name\n\n points = _multiscale.get_atom_coordinates ( mol.atoms, transformed = False )\n print \" - search tree: %d ats\" % ( len(mol.atoms) )\n atTree = AdaptiveTree ( points.tolist(), mol.atoms, 2.0)\n\n Ds = {}\n Qs = {}\n\n def addD ( t, d ) :\n if not t in Ds :\n Ds[t] = numpy.zeros ( 31 )\n i = int ( numpy.round(d*5.0) )\n if i < 31 :\n Ds[t][i] += 1\n\n def addQ ( t, q ) :\n if not t in Qs :\n Qs[t] = numpy.zeros ( 11 )\n i = int ( max (numpy.floor(q*10.0), 0) )\n if i > 10 :\n i = 10\n Qs[t][i] += 1\n\n\n for r in self.cur_mol.residues :\n\n #if r.id.chainId != chainId :\n # continue\n\n #rid = \"%d.%s\" % (r.id.position, r.id.chainId)\n\n #if not r.isProt and not r.isNA :\n if r.type == \"HOH\" :\n\n #at = r.atoms[0]\n\n at = None\n for a in r.atoms :\n if a.element.name == \"O\" :\n at = a\n\n if at == None :\n #print \" - O not found in HOH %d.%s\" % (r.id.position, r.id.chainId)\n continue\n\n #totAt += 1\n\n if 1 :\n addQ ( 'HOH', at.Q )\n if at.Q < 0.7 :\n deletAts[at] = 1\n #continue\n pass\n\n nearAts = self.AtsWithin ( [at], 6.0, atTree )\n for nat in nearAts :\n\n if nat == at or nat.element.name == \"H\" :\n continue\n\n d = (nat.coord() - at.coord()).length\n\n if d < 2.0 and nat.residue.isProt :\n print \" - Hoh res %d.%s may overlap %s.%s.%d.%s - d: %.2f\" % (at.residue.id.position, at.residue.id.chainId, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId, d)\n deletAts[at] = 1\n pass\n\n #if d < 2.0 and nat.residue.id.chainId != at.residue.id.chainId :\n # print \" - hoh res %d may overlap at %s.%s.%d.%s\" % (at.residue.id, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId)\n\n #if d < 2.0 :\n # print \" - Hoh res %d may overlap at %s.%s.%d.%s - d: %.2f\" % (at.residue.id.position, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId, d)\n\n if d < 2.0 and nat.residue.type == \"HOH\" and at != nat :\n print \" - Hoh res %d.%s may overlap %s.%s.%d.%s - d: %.2f - \" % (at.residue.id.position, at.residue.id.chainId, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId, d)\n deletAts[at] = 1\n pass\n\n if nat.element.name == \"O\" :\n if nat.residue.type == \"HOH\" :\n addD ( \"HOH-HOH\", d )\n\n\n print \"\"\n print \"Distances:\"\n\n s = \"\"\n for i in range ( 31 ) :\n s = s + \"\\t%.2f\" % (i/5.0)\n print s\n\n for t, dists in Ds.iteritems () :\n s = t\n for n in dists :\n s = s + \"\\t%d\" % n\n print s\n\n\n\n\n\n def Stats ( self ) :\n\n print \"\"\n print \"Test solvent atoms for Q-scores (make distributions) and\"\n print \"distances to other atoms\"\n print \"\"\n\n if 0 :\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule :\n SetBBAts ( m )\n num={}\n for r in m.residues :\n if r.isProt or r.isNA :\n continue\n if r.type in num :\n num[r.type] += 1\n else :\n num[r.type] = 1\n print m.name\n for t, n in num.iteritems() :\n print \" - \", t, n\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n print \" - in mol: %s\" % mol.name\n\n\n dmap = self.cur_dmap\n if dmap == None :\n umsg (\"Select a map first\")\n return []\n\n\n print \" - in map: %s\" % dmap.name\n minD, maxD = qscores.MinMaxD ( dmap )\n\n self.Log()\n\n umsg ( \"Making statistics on ions and waters...\" )\n\n ats = [at for at in mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 2.0 )\n\n #points = _multiscale.get_atom_coordinates ( mol.atoms, transformed = False )\n #print \" - search tree: %d ats\" % ( len(mol.atoms) )\n #atTree = AdaptiveTree ( points.tolist(), mol.atoms, 2.0)\n\n Ds = {}\n Qs = {}\n Hoh_Hoh, Hoh_O = [], []\n Mg_Hoh, Mg_O, Mg_N = [], [], []\n\n def addD ( t, d ) :\n if not t in Ds :\n Ds[t] = numpy.zeros ( 21 )\n i = int ( numpy.round(d*5.0) )\n if i < 21 :\n Ds[t][i] += 1\n\n def addQ ( t, q ) :\n if not t in Qs :\n Qs[t] = numpy.zeros ( 11 )\n i = int ( max (numpy.floor(q*10.0), 0) )\n if i > 10 :\n i = 10\n Qs[t][i] += 1\n\n\n SetBBAts ( mol )\n doRes = []\n doAts = {}\n for res in mol.residues :\n # only looking for ions or water molecules which should have just one heavy atom\n if res.isProt or res.isNA :\n continue\n rats = [at for at in res.atoms if not at.element.name == \"H\"]\n if len(rats) == 1 :\n at = rats[0]\n doRes.append ( [res, at] )\n doAts[at] = 1\n\n if len(doRes) == 0 :\n umsg ( \"No water or ions found in %s?\" % mol.name )\n return\n\n numSame = 0\n deletAts = {}\n atI = 0\n avgQs = {}\n for res, atom in doRes:\n\n if 1 or not hasattr ( atom, 'Q' ) :\n #at.Q = 0.0\n atom.Q = qscores.Qscore ( [atom], dmap, 0.6, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n\n if atom.Q < 0.9 :\n #deletAts[atom] = 1\n #continue\n pass\n\n rtype = \"H2O\" if res.type.upper() == \"HOH\" else res.type.upper()\n\n addQ ( rtype, atom.Q )\n\n if 0 :\n if not rtype in avgQs :\n avgQs[rtype] = [ atom.Q, 1.0 ]\n else :\n avgQs[rtype][0] += atom.Q\n avgQs[rtype][1] += 1.0\n else :\n itype = None\n if rtype.upper() in chargedIons :\n itype = \"%d\" % chargedIons[rtype.upper()]\n else :\n itype = rtype\n\n if not itype in avgQs :\n avgQs[itype] = [ atom.Q, 1.0 ]\n else :\n avgQs[itype][0] += atom.Q\n avgQs[itype][1] += 1.0\n\n #if at.residue.id.position == 200 and at.residue.id.chainId == \"K\" :\n # print \" - Q: %.3f\" % atom.Q\n\n\n nearAts = self.AtsWithin ( [atom], 6.0, allAtTree )\n for nat in nearAts :\n\n if nat == atom :\n continue\n\n d = (nat.coord() - atom.coord()).length\n if d < 0.2 :\n # duplicates from applying symmetry - keep just one\n # keep atom with lowest chainId\n at1, at2 = nat, atom\n keepAt = at1 if at1.residue.id.chainId < at2.residue.id.chainId else at2\n delAt = at1 if keepAt == at2 else at2\n deletAts[delAt] = 1\n #if not hasattr ( nat, 'keep' ) :\n # deletAts[nat] = 1\n #at.keep = True\n numSame += 1\n continue\n\n #if d < 2.0 and nat.residue.isProt :\n # print \" - Hoh res %d.%s may overlap %s.%s.%d.%s - d: %.2f\" % (at.residue.id.position, at.residue.id.chainId, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId, d)\n # deletAts[at] = 1\n\n #if d < 2.0 and nat.residue.id.chainId != at.residue.id.chainId :\n # print \" - hoh res %d may overlap at %s.%s.%d.%s\" % (at.residue.id, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId)\n\n #if d < 2.0 :\n # print \" - Hoh res %d may overlap at %s.%s.%d.%s - d: %.2f\" % (at.residue.id.position, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId, d)\n\n #if d < 2.0 and nat.residue.type == \"HOH\" and nat.residue.id.chainId != at.residue.id.chainId :\n # print \" - Hoh res %d.%s may overlap %s.%s.%d.%s - d: %.2f - \" % (at.residue.id.position, at.residue.id.chainId, nat.name, nat.residue.type, nat.residue.id.position, nat.residue.id.chainId, d)\n # deletAts[at] = 1\n\n if nat.element.name == \"O\" :\n if 1 and nat.residue.type == \"HOH\" :\n addD ( \"%s-H2O\" % rtype, d )\n addD ( \"H2O-%s\" % rtype, d )\n #if d < 3.5 :\n # Hoh_Hoh.append ( d )\n else :\n addD ( \"%s-O\" % rtype, d )\n #nr = nat.residue\n #if d > 2.0 and d < 3.5 :\n # Hoh_O.append ( d )\n # #print \" Hoh-O res %d.%s %s - %d.%s %s - d %.2f\" % (r.id.position, r.id.chainId, r.type, nr.id.position, nr.id.chainId, nr.type, d)\n\n elif nat in doAts :\n nrtype = \"H2O\" if nat.residue.type.upper() == \"HOH\" else nat.residue.type.upper()\n addD ( \"%s-%s\" % (rtype, nrtype), d )\n addD ( \"%s-%s\" % (nrtype, rtype), d )\n\n else :\n #if nat.element.name == \"N\" :\n addD ( \"%s-%s\" % (rtype, nat.element.name), d )\n addD ( \"%s-%s\" % (nat.element.name, rtype), d )\n\n #if nat.element.name == \"C\" :\n # addD ( \"%s-C\" % rtype, d )\n\n\n atI += 1\n if atI % 10 == 0 :\n status ( \"Making statistics on ions and waters... at %d/%d\" % (atI, len(doRes)) )\n print \".\",\n\n\n status ( \"Statistics done - open Log (IDLE) for results\" )\n\n print \"\"\n print \" - # same: %d\" % numSame\n\n\n if 1 :\n print \"\"\n print \" - deleting %d ats\" % len(deletAts.keys())\n for at in deletAts.keys() :\n #print \" - %s in res %s %d chain %s\" % (at.name, at.residue.type, at.residue.id.position, at.residue.id.chainId)\n if len(at.residue.atoms) == 1 :\n mol.deleteResidue ( at.residue )\n else :\n mol.deleteAtom ( at )\n\n #print \"\"\n #print \"Type\\tAvg\\tStd\"\n\n #for l, ds in [ [\"HOH-HOH\", Hoh_Hoh], [\"HOH-O\", Hoh_O] ] :\n # print \"%s\\t%f\\t%f\" % (l, numpy.average(ds), numpy.std(ds))\n\n #for l, ds in [ [\"Mg-HOH\", Mg_Hoh], [\"Mg-O\", Mg_O], [\"Mg-N\", Mg_N] ] :\n # print \"%s\\t%f\\t%f\" % (l, numpy.average(ds), numpy.std(ds))\n\n #print \" - tot HOH/MG: %d\" % (totAt)\n\n print \"\"\n print \"\"\n print \"Distances:\"\n\n s = \"\"\n for i in range ( 21 ) :\n s = s + \"\\t%.2f\" % (i/5.0)\n print s\n\n done = {}\n types = Ds.keys()\n types.sort()\n for typ in types :\n t1, t2 = typ.split(\"-\")\n if \"%s-%s\" % (t2, t1) in done :\n continue\n done[typ] = 1\n f = 2 if t1 == t2 else 1 # counted twice if t1 == t2\n s = typ\n for n in Ds[typ] :\n s = s + \"\\t%d\" % (n/f)\n print s\n\n print \"\"\n print \"Q-scores:\"\n\n s = \"\"\n for i in range ( 11 ) :\n s = s + \"\\t%.1f\" % ( i/10.0 )\n print s\n\n for t, qs in Qs.iteritems () :\n s = t\n for n in qs :\n s = s + \"\\t%d\" % n\n print s\n\n print \"\"\n print \"Type\\tAvg.Q\\tNum\"\n print mol.name.replace ( \".maxit.pdb\", \"\" )\n for tp, qn in avgQs.iteritems() :\n print \"%s\\t%.2f\\t%.0f\" % ( tp, qn[0]/qn[1], qn[1] )\n\n print \"\"\n\n if 1 :\n if not os.path.isfile (\"/Users/greg/Desktop/txt.txt\") :\n fp = open ( \"/Users/greg/Desktop/txt.txt\", \"a\" )\n fp.write ( \"Mol Name\\t_AvgQ(H2O)_\\t_#(H2O)_\\t_AvgQ(+2)_\\t_#(+2)_\\t_AvgQ(+1)_\\t_#(+1)_\\t_AvgQ(-1)_\\t_#(-1)_\\t_AvgQ(+3)_\\t_#(+3)_\\n\" )\n fp.close()\n fp = open ( \"/Users/greg/Desktop/txt.txt\", \"a\" )\n fp.write ( \"%s\" % mol.name.replace(\".maxit.pdb\", \"\") )\n for tp in [\"H2O\", \"2\", \"1\", \"-1\", \"3\"] :\n if tp in avgQs :\n qn = avgQs[tp]\n fp.write ( \"\\t%.2f\\t%.0f\" % ( qn[0]/qn[1], qn[1] ) )\n else :\n fp.write ( \"\\t\\t\" )\n fp.write ( \"\\n\" )\n fp.close()\n\n\n\n def S1 ( self ) :\n\n print \"\"\n\n for m in chimera.openModels.list() :\n\n if type(m) != chimera.Molecule :\n continue\n\n print \"\\n\\n-------------- %s -------- \" % m.name\n\n self.cur_mol = m\n self.Stats()\n\n\n\n def SN ( self ) :\n\n print \"\"\n\n mols = []\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule :\n mols.append ( m )\n\n tp = \"HOH\"\n\n\n fp = open ( \"/Users/greg/Desktop/%s.txt\" % tp, \"w\" )\n for m in mols :\n ats = self.wiAtoms ( m, tp )\n mname = m.name.replace(\".maxit.pdb\", \"\").replace(\"T020\", \"\").replace(\"EM0\", \"_\")\n fp.write ( \"\\t%s [%s]\" % (mname, len(ats)) )\n fp.write ( \"\\n\" )\n fp.close()\n\n fp = open ( \"/Users/greg/Desktop/%s.txt\" % tp, \"a\" )\n\n for m1 in mols :\n\n ats1 = self.wiAtoms ( m1, tp )\n mname = m1.name.replace(\".maxit.pdb\", \"\").replace(\"T020\", \"\").replace(\"EM0\", \"_\")\n fp.write ( \"%s [%s]\" % (mname, len(ats1)) )\n\n for m2 in mols :\n num, pp, rmsd = self.wiDistNum ( tp, m1, m2)\n #fp.write ( \"\\t%d (%.0f%%) %.2f\" % (num, pp, rmsd) )\n fp.write ( \"\\t%.0f\" % pp )\n #fp.write ( \"\\t%d\" % num )\n #break\n\n fp.write ( \"\\n\" )\n\n fp.close()\n\n\n\n def HohShow ( self ) :\n\n print \"hoh - show\"\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - scale map: %s\" % dmap.name\n\n\n totAt, showAt = 0, 0\n\n tot = {}\n\n\n for r in self.cur_mol.residues :\n\n #if r.id.chainId != chainId :\n # continue\n\n rid = \"%d.%s\" % (r.id.position, r.id.chainId)\n\n #if not r.isProt and not r.isNA :\n if not r.isProt and not r.isNA :\n\n for at in r.atoms :\n\n totAt += 1\n\n if at.Q < 0.6 :\n at.display = False\n try :\n tot[at.element.name] += 1\n except :\n tot[at.element.name] = 1\n else :\n at.display = True\n showAt += 1\n\n umsg ( \"Showing %d/%d solvent atoms\" % (showAt, totAt) )\n for tp, n in tot.iteritems() :\n print tp, n\n\n\n\n def GuessRes ( self ) :\n\n print \"\"\n\n dmap = self.cur_dmap\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return []\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n #chainId = self.chain.get()\n\n\n nats = int(self.mapResN.get())\n status ( \"Estimating resolution of %s using %d atoms\" % (dmap.name, nats) )\n\n\n ats = [at for at in self.cur_mol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(self.cur_mol.atoms) )\n allAtTree = AdaptiveTree ( points.tolist(), ats, 1.0)\n\n minD, maxD = qscores.MinMaxD ( self.cur_dmap )\n\n qscores.SetBBAts (self.cur_mol)\n bbAts = [at for at in self.cur_mol.atoms if at.isBB == True]\n scAts = [at for at in self.cur_mol.atoms if at.isBB == False]\n\n nats = nats/2\n\n import random\n atoms = []\n atoms = atoms + random.sample ( bbAts, min(nats,len(bbAts)) )\n atoms = atoms + random.sample ( scAts, min(nats,len(scAts)) )\n\n avgQ, N = 0.0, 0.0\n for ati, at in enumerate(atoms) :\n\n\n if 0 and hasattr ( at, 'Q' ) :\n avgQ += at.Q\n continue\n\n rr = qscores.RadCC ( [at], dmap, sigma=0.5, allAtTree=allAtTree, show=0, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n #CC, CCm, yds, err = rr\n CC, CCm = rr\n\n rr = qscores.PtCC ( at.coord().data(), mol.openState.xform, dmap, sigma=0.5, allAtTree=allAtTree, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n #CC, CCm, yds, err = rr\n CC2, CCm2 = rr\n\n print \" - %d - %.4f,%.4f - %.4f,%.4f\" % (ati, CC, CCm, CC2, CCm2)\n\n at.Q = CCm\n avgQ += at.Q\n N += 1.0\n\n\n status ( \"Estimating resolution of %s using %d atoms - at %d\" % (dmap.name, nats, ati) )\n\n avgQ = avgQ / N\n avgR = (avgQ-1.1244)/-0.1794\n\n umsg ( \"Average Q=%0.2f -> res %.2f\" % (avgQ, avgR) )\n\n self.mapRes.set ( \"%.2f\" % avgR )\n\n\n\n def Go ( self ) :\n\n from chimera import tasks, CancelOperation\n import traceback\n task = tasks.Task(\"Placing water/ions\", modal = True)\n\n try :\n self.Go_ ( task=task )\n\n except Exception, err:\n umsg ( \"Canceled\" )\n print Exception, err\n traceback.print_exc()\n return\n\n finally :\n task.finished()\n\n\n def Go_ ( self, task = None ) :\n\n segMap = segmentation_map()\n if segMap == None :\n umsg ( \"Please select a map in the Segment Map Dialog\" )\n return\n\n smod = current_segmentation ()\n if smod == None :\n umsg ( \"Please select a segmentation file in the Segment Map dialog\" )\n return\n\n mol = self.cur_mol\n if self.cur_mol == None :\n umsg (\"Select a molecule first\")\n return []\n\n #chainId = self.chain.get()\n\n dmap = self.cur_dmap\n if self.cur_dmap == None :\n umsg (\"Select a map first\")\n return []\n\n\n #print \" -- chain %s\" % chainId\n toChain = self.addToChain.get()\n if len(toChain) > 1 :\n umsg ( \"Enter a single character in 'Add To Chain' field\" )\n return\n\n nearAtMap = {}\n for at in chimera.selection.currentAtoms() :\n nearAtMap[at] = 1\n\n if len(nearAtMap) == 0 :\n umsg ( \"Select atoms near which ions/waters should be placed\" )\n return []\n\n\n\n\n umsg ( \"Placing water/ions in map: %s, model: %s\" % (segMap.name, mol.name) )\n print \".\",\n if task : task.updateStatus( \"Placing water/ions in map: %s, model: %s\" % (segMap.name, mol.name) )\n\n\n atTree = None\n if 1 :\n points = _multiscale.get_atom_coordinates ( mol.atoms, transformed = False )\n print \" - search tree: %d ats\" % ( len(mol.atoms) )\n atTree = AdaptiveTree ( points.tolist(), mol.atoms, 2.0)\n\n regs = list(smod.regions)\n\n minD, maxD = qscores.MinMaxD ( segMap )\n print \" - mind: %.3f, maxd: %.3f\" % (minD, maxD)\n\n\n useQ = self.useQScore.get()\n try :\n minQ = float(self.placeQ.get())\n sigQ = float(self.qsigma.get())\n except :\n umsg ( \"Check Q-score and sigma, should be numbers...\" )\n return\n\n\n msg = \"\"\n if useQ :\n print \" - using min Q-score: %.2f sigma %.2f\" % (minQ, sigQ)\n #msg = \"minQ %.2f sigma %.2f\" % (minQ, sigQ)\n\n umsg ( \"Placing water in map: %s, model: %s, %d regions %s\" % (segMap.name, mol.name, len(regs), msg) )\n\n import time\n startt = time.time()\n\n n_regs = []\n for reg in regs :\n npts = len(reg.points())\n if reg.surface_piece:\n reg.hide_surface()\n if npts > 3 :\n n_regs.append ( [npts, reg] )\n\n # give larger regions more priority...\n n_regs.sort ( reverse=True, key=lambda x: x[0] )\n\n addPts = []\n addW = []\n addI = []\n skipped, skippedQ = 0, 0\n numW, numI = 0, 0\n xfI = segMap.openState.xform\n\n\n # a temporary molecule to add new waters/ions so they can be considered\n # when adding new waters/ions\n nmol = chimera.Molecule()\n\n regi = 0\n for numRegPts, reg in n_regs :\n\n if regi % 10 == 0 :\n ts = qscores.TimeLeftStr (regi, len(n_regs), time.time() - startt )\n s1 = '{:,}'.format(regi) + \"/\" + '{:,}'.format(len(n_regs))\n s2 = '{:,}'.format(skipped) + \"/\" + '{:,}'.format(skippedQ)\n s3 = '{:,}'.format(numW)\n s4 = '{:,}'.format(numI)\n status ( \"At region %s (%s) - %s waters, %s ions so far, eta: %s\" % (s1, s2, s3, s4, ts) )\n if task : task.updateStatus( \"At region %s (%s) - %s waters, %s ions so far, eta: %s\" % (s1, s2, s3, s4, ts) )\n #print \".\",\n\n regi += 1\n\n\n P, ctr = None, None\n # what point in the region to use...\n if 0 :\n\n # use the center of all points in the region\n # - may not be close to highest value or peak\n ctr = reg.center_of_points()\n P = chimera.Point(ctr[0],ctr[1],ctr[2])\n\n elif 1 :\n # use the highest value in the region\n # - may not be close to the center, but it is the peak\n\n ctr, maxD = None, -1e9\n rpts = reg.map_points()\n map_values = segMap.interpolated_values ( rpts, segMap.openState.xform )\n #print map_values\n #break\n maxValPt = None\n for pt, val in zip(rpts, map_values) :\n if val > maxD :\n maxValPt, maxD = pt, val\n\n if 0 :\n P = chimera.Point(maxValPt[0], maxValPt[1], maxValPt[2])\n ctr = [maxValPt[0], maxValPt[1], maxValPt[2]]\n\n else :\n #print pt\n # go to interpolated maximum...\n # - interestingly this can be a bit different than the voxel with\n # - the highest value\n pts, avgMapV = PtsToMax ( [maxValPt], segMap )\n maxPt = pts[0]\n\n #maxPt_ = [maxPt[0], maxPt[1], maxPt[2]]\n #map_values = segMap.interpolated_values ( [maxPt_], segMap.openState.xform )\n #print map_values\n #print \"|%.5f -> %.5f|\" % (maxD, avgMapV)\n #break\n\n # if movement is too large to maximum, likely this\n # is not a well-separated blob, so ignore it\n V = maxPt - chimera.Point(maxValPt[0], maxValPt[1], maxValPt[2])\n #if maxD > avgMapV :\n # print \"|%.5f -> %.5f|\" % (maxD, avgMapV),\n # skipped += 1\n # continue\n if V.length > segMap.data.step[0] :\n #print \"|%.1f|\" % V.length,\n skipped += 1\n continue\n\n P = maxPt\n ctr = [P[0], P[1], P[2]]\n\n\n if useQ :\n # check Q-score of pt\n qs = qscores.QscorePt ( ctr, xfI, segMap, sigQ, allAtTree=None, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n if qs < minQ :\n skippedQ += 1\n continue\n\n\n #print pt\n #break\n\n\n else :\n # use the center of mass\n rpts = reg.map_points()\n #rpts = reg.points()\n map_values = segMap.interpolated_values ( rpts, segMap.openState.xform )\n #print map_values\n #break\n ctr, sum = numpy.array ( [0,0,0] ), 0.0\n for pt, val in zip(rpts, map_values) :\n ctr += pt * val\n sum += val\n ctr = ctr / sum\n P = chimera.Point(ctr[0],ctr[1],ctr[2])\n\n\n\n # check already added waters (which are not in atTree)\n # - now atoms are now added on the fly, so this is not needed\n if 0 :\n clash = False\n for atName, resName, clr, reg, P0 in addPts :\n d = P - P0\n if d.length < 2.2 :\n clash = True\n break\n\n if clash :\n continue\n\n msg, msgFull, atName, resName, closestChainId, clr = self.GuessAtom ( mol, P, atTree=atTree, nearAtMap=nearAtMap, doMsg=False, checkNewAtoms=nmol.atoms )\n\n if atName != None :\n\n if atName == 'CL' : # or atName == \"NA\" :\n continue\n\n if closestChainId == None :\n print \" - new %s atom doesn't have nearest chain\" % atName\n continue\n\n # add atom to new molecule, to be used in checkNewAtoms list\n nres = nmol.newResidue (resName, chimera.MolResId(closestChainId, len(nmol.residues)+1))\n nat = nmol.newAtom (atName, chimera.Element(atName))\n nres.addAtom( nat )\n nat.setCoord ( P )\n\n addPts.append ( [atName, resName, clr, reg, P, closestChainId] )\n if atName == 'O' :\n numW += 1\n addW.append ( [atName, resName, clr, reg, P, closestChainId] )\n else :\n numI += 1\n addI.append ( [atName, resName, clr, reg, P, closestChainId] )\n\n\n\n # add ions first\n\n #toChain = chainId.lower()\n print \" - adding %d ions to chain %s, skipped %d/%d regions (move/Q)\" % (len(addI), toChain, skipped, skippedQ)\n\n largestResIdForChain = {}\n for r in mol.residues :\n if not r.id.chainId in largestResIdForChain :\n largestResIdForChain[r.id.chainId] = r.id.position\n else :\n largestResIdForChain[r.id.chainId] = max(r.id.position, largestResIdForChain[r.id.chainId])\n\n numI, numIneg2, numIneg1, numIpos1 = 0, 0, 0, 0\n for atName, resName, clr, reg, P, closestChainId in addI :\n\n numI += 1\n if atName.upper() == \"NA\" :\n numIneg1 += 1\n elif atName.upper() == \"CL\" :\n numIpos1 += 1\n else :\n numIneg2 += 1\n\n reg.show_surface()\n if reg.surface_piece :\n if atName.upper() == \"NA\" :\n reg.surface_piece.color = (1,0,1,1)\n elif atName.upper() == \"CL\" :\n reg.surface_piece.color = (1,0,1,1)\n else :\n reg.surface_piece.color = (0,1,0,1)\n\n cid = \"_\"\n if toChain == None or len(toChain) == 0 :\n cid = closestChainId\n else :\n cid = toChain\n\n if not cid in largestResIdForChain :\n # new chain...\n largestResIdForChain[cid] = 0\n\n i = largestResIdForChain[cid] + 1\n largestResIdForChain[cid] = i\n\n if cid == None :\n print \" - at %s doesn't have closest chain\" % atName\n continue\n\n nres = mol.newResidue (resName, chimera.MolResId(cid, i))\n nat = mol.newAtom (atName, chimera.Element(atName))\n\n nres.addAtom( nat )\n nat.setCoord ( P )\n #nat.drawMode = nat.Ball\n nat.drawMode = 2\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.display = True\n\n nat.radius = 1.46\n nat.drawMode = nat.EndCap if atName.lower() == \"o\" else nat.Ball\n nat.color = atomColors[atName.upper()] if atName.upper() in atomColors else atomColors[' ']\n\n\n print \" - added %d ions, %d 2+, %d 1+ (Cl), %d 1- (NA)\" % (numI, numIneg2, numIneg1, numIpos1)\n # then add waters\n\n #toChain = \"w\"\n print \" - adding %d waters to chain %s, skipped %d regions\" % (len(addW), toChain, skipped)\n\n\n for atName, resName, clr, reg, P, closestChainId in addW :\n\n reg.show_surface()\n if reg.surface_piece :\n reg.surface_piece.color = (1,0,0,1)\n\n cid = \"_\"\n if toChain == None or len(toChain) == 0 :\n cid = closestChainId\n else :\n cid = toChain\n\n if not cid in largestResIdForChain :\n # new chain...\n largestResIdForChain[cid] = 0\n\n i = largestResIdForChain[cid] + 1\n largestResIdForChain[cid] = i\n\n if cid == None :\n print \" - at %s doesn't have closest chain\" % atName\n continue\n\n nres = mol.newResidue (resName, chimera.MolResId(cid, i))\n nat = mol.newAtom (atName, chimera.Element(atName))\n\n nres.addAtom( nat )\n nat.setCoord ( P )\n nat.drawMode = nat.EndCap\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.display = True\n\n nat.radius = 1.46\n nat.drawMode = nat.EndCap if atName.lower() == \"o\" else nat.Ball\n nat.color = atomColors[atName.upper()] if atName.upper() in atomColors else atomColors[' ']\n\n i += 1\n\n\n\n\n status ( \"Added %d waters, %d ions - done\" % (len(addW), len(addI)) )\n\n if 1 :\n\n qs = \"\"\n if useQ :\n qs = \"_Q%.2f_%.2f\" % (minQ, sigQ)\n\n thr = 0.0\n if hasattr ( segMap, \"segmentThreshold\" ) :\n thr = segMap.segmentThreshold\n else :\n thr = segMap.surface_levels[0]\n\n molPath = os.path.splitext(mol.openedAs[0])[0]\n nname = molPath + \"_thr%.4f%s__%d-water__%d-ion.pdb\" % (thr, qs, len(addW), len(addI))\n\n print \"\"\n print \"Saving pdb waters ->\", nname\n chimera.PDBio().writePDBfile ( [mol], nname )\n\n print \"\"\n\n\n self.RefreshTree ()\n\n\n\n def Thr ( self ) :\n\n\n #mol = self.cur_mol\n #if self.cur_mol == None :\n # umsg (\"Select a molecule first\")\n # return []\n\n #chainId = self.chain.get()\n\n dmap = self.cur_dmap\n print \" - scale map: %s\" % dmap.name\n\n if dmap == None :\n umsg ( \"Open & select a map first...\" )\n return\n\n\n M = dmap.data.full_matrix()\n sdev = numpy.std(M)\n avg = numpy.average(M)\n thr = dmap.surface_levels[0]\n\n print \"\"\n umsg ( \"Calculating sigma in %s...\" % dmap.name )\n print \"Avg: %.3f, sdev: %.3f, thr: %.4f [%.4f sdev above mean]\" % (avg, sdev, thr, (thr-avg)/sdev)\n print \" - 0.5 sdev above avg: %.4f\" % (avg + 0.5 * sdev)\n print \" - 1 sdev above avg: %.4f\" % (avg + 1.0 * sdev)\n print \" - 2 sdev above avg: %.4f\" % (avg + 2.0 * sdev)\n print \" - 3 sdev above avg: %.4f\" % (avg + 3.0 * sdev)\n\n sig1 = avg + 1.0 * sdev\n sig2 = avg + 2.0 * sdev\n sig3 = avg + 3.0 * sdev\n\n umsg ( \"1-sigma=[%.4f], 2-sigma:[%.4f], 3-sigma:[%.4f] -- in %s\" % (sig1, sig2, sig3, dmap.name) )\n\n\n #dmap.surface_levels[0] = sig2\n #chimera.runCommand ( \"vol #%d style surface region all step 1\" % dmap.id )\n\n\n\n\n\n\ndef PtsToMax ( pts, dmap ) :\n\n from numpy import array, ones\n\n #import _multiscale\n #fpoints = _multiscale.get_atom_coordinates ( mol.atoms, transformed = False )\n apts = array ( pts, dtype=numpy.float32 )\n\n wts = ones ( len(pts), numpy.float32 )\n\n darray = dmap.data.matrix()\n\n from FitMap import locate_maximum, overlap_and_correlation\n\n xyz_to_ijk_tf = dmap.data.xyz_to_ijk_transform\n #map_values, outside = VolumeData.interpolate_volume_data(pts, xyz_to_ijk_tf, darray)\n #olap0, cc0, other = overlap_and_correlation ( wts, map_values )\n\n #print ( \" ToMax -- CC %.3f\" % (cc0) )\n\n move_tf, stats = locate_maximum(apts, wts,\n darray, xyz_to_ijk_tf,\n max_steps = 1000,\n ijk_step_size_min = 0.01,\n ijk_step_size_max = 0.5,\n optimize_translation = True,\n optimize_rotation = True,\n metric = 'sum product',\n request_stop_cb = None)\n\n from Matrix import chimera_xform\n xf = chimera_xform ( move_tf )\n #xT, xR = xf_2_M ( chimera_xform ( move_tf ) )\n #M = M * xT * xR\n\n #corr = stats['correlation']\n #print ( \" -- fit CC %.3f -> %.3f, shift [%.3f], rot [%.3f], %d steps\" % (cc0, corr, stats['shift'], stats['angle'], stats['steps']) )\n #print stats\n\n mpts = [None] * len(pts)\n for i, pt in enumerate(pts) :\n pt = chimera.Point ( apts[i][0], apts[i][1], apts[i][2] )\n xpt = xf.apply (pt)\n #mpts[i] = [xpt[0], xpt[1], xpt[2]]\n mpts[i] = xpt\n\n return mpts, stats['average map value']\n\n\n\n\n\ndef PtsToMap ( points, dmap, atomRad, nname, showMesh = False, clr = (0.7, 0.7, 0.7, 0.2) ) :\n\n #_contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n #mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, atomRad )\n\n import _contour\n points1 = numpy.copy ( points )\n _contour.affine_transform_vertices ( points1, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n points0 = numpy.copy ( points1 )\n _contour.affine_transform_vertices ( points1, dmap.data.xyz_to_ijk_transform )\n\n bound = 5\n li,lj,lk = numpy.min ( points1, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( points1, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li,lj,lk, hi,hj,hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n nstep = (dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n #nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )\n\n O = dmap.data.origin\n #print \" - %s origin:\" % dmap.name, O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n #print \" - new map origin:\", nO\n\n nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n\n #print \" - fmap grid dim: \", numpy.shape ( fmap.full_matrix() )\n #print \" - new map grid dim: \", numpy.shape ( nmat )\n\n npoints = VolumeData.grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n _contour.affine_transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = dmap.interpolated_values ( npoints, chimera.Xform.identity() )\n #dvals = dmap.interpolated_values ( npoints, dmap.openState.xform.inverse() )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n #f_mat = fmap.data.full_matrix()\n #f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n #df_mat = df_mat * f_mask\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n #try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n #except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n #nv.openState.xform = dmap.openState.xform\n\n mdata = VolumeData.zone_masked_grid_data ( ndata, points0, atomRad )\n gdata = VolumeData.Array_Grid_Data ( mdata.full_matrix(), nO, nstep, dmap.data.cell_angles, name = nname )\n nv = VolumeViewer.volume.volume_from_grid_data ( gdata )\n nv.openState.xform = dmap.openState.xform\n\n nv.name = nname\n #dmap.display = False\n nv.region = ( nv.region[0], nv.region[1], [1,1,1] )\n\n nv.surface_levels[0] = dmap.surface_levels[0]\n\n ro = VolumeViewer.volume.Rendering_Options()\n if 1 :\n ro.smoothing_factor = .3\n ro.smoothing_iterations = 2\n ro.surface_smoothing = True\n #ro.square_mesh = True\n #ro.line_thickness = 2\n nv.update_surface ( False, ro )\n #setro (ro)\n for sp in nv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 :\n sp.display = False\n else :\n if showMesh :\n sp.color = (.5, .5, .5, 1.0)\n sp.displayStyle = sp.Mesh\n else :\n sp.color = clr\n\n return nv\n\n\n\n\n\ndef SetBBAts ( mol ) :\n\n #if hasattr ( mol, \"bbats\" ) :\n # return\n #mol.bbats = True\n\n #print \" - setting bbAts in %s\" % mol.name\n for r in mol.residues :\n\n #r.isProt = \"C\" in r.atomsMap and \"CA\" in r.atomsMap and \"N\" in r.atomsMap\n #r.isProt = \"CA\" in r.atomsMap\n #r.isNA = \"O3'\" in r.atomsMap and \"O5'\" in r.atomsMap\n\n from chimera.resCode import nucleic3to1\n from chimera.resCode import protein3to1\n protein3to1['HSD'] = protein3to1['HIS']\n\n r.isProt = r.type in protein3to1\n r.isNA = r.type in nucleic3to1\n\n if r.isProt :\n r.rtype = \"prot\"\n elif r.isNA :\n r.rtype = \"na\"\n else :\n r.rtype = \"?\"\n\n\n if r.isNA :\n try :\n if nucleic3to1[r.type] == \"G\" :\n r.baseAt = r.atomsMap[\"N9\"][0]\n elif nucleic3to1[r.type] == \"C\" :\n r.baseAt = r.atomsMap[\"N1\"][0]\n elif nucleic3to1[r.type] == \"A\" :\n r.baseAt = r.atomsMap[\"N9\"][0]\n elif nucleic3to1[r.type] == \"U\" :\n r.baseAt = r.atomsMap[\"N1\"][0]\n except :\n #print \" - baseAt not found - \"\n pass\n\n\n r.bbAtoms = []\n r.scAtoms = []\n\n if r.isProt :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n a.isBB = n==\"C\" or n==\"CA\" or n==\"O\" or n==\"N\" or n==\"OT1\" or n==\"OT2\"\n a.isSC = not a.isBB\n if a.isBB :\n r.bbAtoms.append ( a )\n else :\n r.scAtoms.append ( a )\n\n a.isSugar, a.isBase = False, False\n\n elif r.isNA :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n\n a.isBB = n==\"P\" or n==\"O1P\" or n==\"O2P\" or n==\"OP1\" or n==\"OP2\" or n==\"O5'\" or n==\"C5'\" or n==\"O3'\"\n a.isSugar = n==\"C1'\" or n==\"C2'\" or n==\"O4'\" or n==\"O2'\" or n==\"C3'\" or n==\"C4'\"\n a.isBB = a.isBB or a.isSugar\n\n a.isBase = False\n\n if nucleic3to1[r.type] == \"G\" :\n a.isBase = n==\"N9\" or n==\"C8\" or n==\"N7\" or n==\"C5\" or n==\"C4\" or n==\"C6\" or n==\"O6\" or n==\"N1\" or n==\"C2\" or n==\"N2\" or n==\"N3\"\n\n elif nucleic3to1[r.type] == \"C\" :\n a.isBase = n==\"N1\" or n==\"C2\" or n==\"O2\" or n==\"N3\" or n==\"C4\" or n==\"N4\" or n==\"C5\" or n==\"C6\"\n\n elif nucleic3to1[r.type] == \"A\" :\n a.isBase = n==\"N9\" or n==\"C8\" or n==\"N7\" or n==\"C5\" or n==\"C4\" or n==\"N3\" or n==\"C2\" or n==\"N1\" or n==\"C6\" or n==\"N6\"\n\n elif nucleic3to1[r.type] == \"U\" :\n a.isBase = n==\"N1\" or n==\"C2\" or n==\"O2\" or n==\"N3\" or n==\"C4\" or n==\"O4\" or n==\"C5\" or n==\"C6\"\n\n else :\n #print \" -x- NA res %d.%s is ?\" % (r.id.position, r.type)\n break\n\n a.isSC = a.isBase\n\n #if nucleic3to1[r.type] == \"G\" :\n # r.isBase = n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n==\"\" or n=\"\" or n=\"\" or n=\"\"\n # r.baseAt = r.atomsMap[\"N9\"][0]\n\n if a.isBB :\n r.bbAtoms.append ( a )\n else :\n r.scAtoms.append ( a )\n\n else :\n for a in r.atoms :\n a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False\n\n\n\n\n\ndef dialog ( ) :\n\tfrom chimera import dialogs\n\treturn dialogs.find ( SWIM_Dialog.name, create=False )\n\n\ndef show_dialog ( closeOld = True ):\n\n\tfrom chimera import dialogs\n\n\td = dialogs.find ( SWIM_Dialog.name, create=False )\n\tif d :\n\t\tif closeOld :\n\t\t\td.toplevel_widget.update_idletasks ()\n\t\t\td.Close()\n\t\t\td.toplevel_widget.update_idletasks ()\n\t\telse :\n\t\t\t# is there a way to bring it to front?\n\t\t\treturn d\n\n\tdialogs.register (SWIM_Dialog.name, SWIM_Dialog, replace = True)\n\n\td = dialogs.find ( SWIM_Dialog.name, create=True )\n\td.toplevel_widget.update_idletasks ()\n\td.enter()\n\n\treturn d\n", "id": "7799853", "language": "Python", "matching_score": 8.447173118591309, "max_stars_count": 6, "path": "Segger/SWIM.py" }, { "content": "\n# Copyright (c) 2020 <NAME> - <EMAIL>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport chimera\nimport os\nimport os.path\nimport numpy\n\nfrom VolumeData import grid_indices, zone_masked_grid_data, interpolate_volume_data\nfrom _multiscale import get_atom_coordinates\nfrom _contour import affine_transform_vertices as transform_vertices\nfrom Matrix import xform_matrix, multiply_matrices, chimera_xform, identity_matrix, invert_matrix, shift_and_angle\nfrom VolumeViewer import volume_from_grid_data\nfrom VolumeViewer.volume import Rendering_Options\nfrom time import clock\nfrom random import random as rand\nimport FitMap\nimport VolumeViewer\nimport Segger.quaternion\nimport Matrix\nimport VolumeData\n\nfrom axes import prAxes\nfrom regions import mask_volume, regions_radius\n\nfrom segment_dialog import current_segmentation, segmentation_map\nfrom Segger import dev_menus, timing, seggerVersion\n\nOML = chimera.openModels.list\n\nSAF_DVOL = 0.75\nSAF_DBRAD = 0.3\nSAF_LS_DEPTH = 4\nSAF_LS_NGROUPS = 1000\n\nREG_OPACITY = 0.45\nMAX_NUM_GROUPS = 1000\n\n\n\ndef umsg ( txt ) :\n print txt\n status ( txt )\n\n\ndef status ( txt ) :\n txt = txt.rstrip('\\n')\n msg.configure(text = txt)\n msg.update_idletasks()\n\n\n#from fit_devel import Fit_Devel\n\nclass Fit_Segments_Dialog ( chimera.baseDialog.ModelessDialog ):\n\n title = \"SegFit (Segger v\" + seggerVersion + \")\"\n name = \"fit segments\"\n #buttons = ( 'SMS', 'Scores', 'Fit', 'Options', \"Close\")\n #buttons = ( 'Place', 'Fit', 'Options', \"Close\")\n buttons = ( 'Fit', 'Stop', 'Options', \"Close\")\n help = 'https://github.com/gregdp/segger'\n\n def fillInUI(self, parent):\n\n import Tkinter\n from CGLtk import Hybrid\n\n tw = parent.winfo_toplevel()\n self.toplevel_widget = tw\n tw.withdraw()\n\n parent.columnconfigure(0, weight = 1)\n\n row = 1\n\n menubar = Tkinter.Menu(parent, type = 'menubar', tearoff = False)\n tw.config(menu = menubar)\n\n self.UseAllMods = Tkinter.IntVar()\n self.UseAllMods.set ( 0 )\n\n fit_menu_entries = [\n ('Delete selected fits from list', self.delete_fit_cb),\n ('Delete ALL fits from list', self.delete_all_fit_cb),\n 'separator',\n ('Place molecule copies', self.place_copies_cb),\n ('Place map copies', self.place_map_copies_cb),\n #('Cube map', self.extractCubeMap),\n ('Close placed copies', self.close_copies_cb),\n\n 'separator',\n (\"Save chosen fit molecules\", self.SaveStrucFit),\n\n 'separator',\n ('Place selected map relative to segmented map', self.save_map_resample),\n\n 'separator']\n\n if dev_menus :\n fit_menu_entries = fit_menu_entries + [\n ('Group regions by SS in visible (Molecule) models', self.GroupRegionsBySS),\n ('Mask map with selection', self.MaskWithSel)\n ]\n\n fit_menu_entries = fit_menu_entries + [\n ('Group regions by visible (Molecule) models', self.GroupRegionsByMols),\n ('Group regions by chains in visible (Molecule) models', self.GroupRegionsByChains),\n\n 'separator',\n (\"Show molecule axes\", self.StrucShowAxes),\n (\"Hide molecule axes\", self.StrucHideAxes),\n (\"Show overlapping regions\", self.ShowOverlappingRegions),\n\n 'separator',\n (\"Export fit scores\", self.ExportFitScores),\n (\"Plot fit scores\", self.PlotFitScores),\n ( \"Score Visible\", self.VisiScores )\n ]\n\n if dev_menus :\n fit_menu_entries = fit_menu_entries + [\n 'separator',\n ('Difference map', self.DifferenceMap),\n ('Intersection map', self.IntersectionMap),\n ('Shape match', self.ShapeMatch),\n\n 'separator',\n ('Fit all visible maps to selected', self.FitAllVisMaps),\n ('Make average map of visible fitted maps', self.AvgFMaps2),\n ('Make difference map of visible fitted maps', self.DifFMaps2),\n ('Take fitted map densities into segmented map', self.TakeFMap_with_DMap0),\n ('Take fitted map densities into segmented map + noise', self.TakeFMap_with_DMapN),\n ('Take fitted map densities into segmented map (Shrink/grow)', self.TakeFMap_with_DMap),\n ('Save visible fitted maps in segmented map grid', self.TakeFMapsVis),\n ('Take segmented map densities into fitted map', self.TakeDMap_with_FMap),\n 'separator',\n ('Group regions by chains in visible molecules', self.GroupRegionsByChains),\n ('Group regions by visible molecules', self.GroupRegionsByMols),\n ('Group regions by selected fitted molecules', self.GroupRegionsByFittedMols),\n ('Group regions by visible maps', self.GroupRegionsByVisiMaps),\n 'separator',\n #('0 map with selection', self.ZeroMapBySel),\n #('0 map with visible molecules', self.ZeroMapByMols),\n ('0 map with selected fitted molecules', self.ZeroMapFittedMols),\n ('0 map with visible molecules', self.ZeroMapVisMols),\n ('Values in map', self.ValuesInMap),\n ('Mask with selected map/model', self.MaskMapWithSel)\n ]\n\n\n fmenu = Hybrid.cascade_menu(menubar, 'Fit', fit_menu_entries)\n #self.add_devel_menus(fmenu)\n\n from chimera.tkgui import aquaMenuBar\n aquaMenuBar(menubar, parent, row = 0, columnspan=3)\n\n\n f = Tkinter.Frame(parent)\n f.grid(column=0, row=row, sticky='ew')\n row += 1\n\n l = Tkinter.Label(f, text='Structure or Map to fit')\n l.grid(column=0, row=0, sticky='w')\n\n self.struc = Tkinter.StringVar(parent)\n self.strucMB = Tkinter.Menubutton ( f, textvariable=self.struc, relief=Tkinter.RAISED )\n self.strucMB.grid (column=1, row=0, sticky='we', padx=5)\n self.strucMB.menu = Tkinter.Menu ( self.strucMB, tearoff=0, postcommand=self.StrucMenu )\n self.strucMB[\"menu\"] = self.strucMB.menu\n\n\n h = '%10s %10s %10s %10s %10s %10s %10s %10s' % ('Corr.', 'At. Incl.', 'BB Incl.', 'Clashes', 'Dens. Occ.', 'Molecule', 'Map', 'Region')\n fl = Hybrid.Scrollable_List(parent, h, 8, self.fit_selection_cb)\n self.fit_listbox = fl.listbox\n self.list_fits = []\n fl.frame.grid(row = row, column = 0, sticky = 'news')\n parent.rowconfigure(row, weight = 1)\n row += 1\n self.fit_listbox.bind('<KeyPress-Delete>', self.delete_fit_cb)\n\n\n op = Hybrid.Popup_Panel(parent)\n opf = op.frame\n opf.grid(row = row, column = 0, sticky = 'news')\n opf.grid_remove()\n opf.columnconfigure(0, weight=1)\n self.optionsPanel = op.panel_shown_variable\n row += 1\n orow = 0\n\n cb = op.make_close_button(opf)\n cb.grid(row = orow, column = 0, sticky = 'e')\n\n l = Tkinter.Label(opf, text='Fitting Options', font = 'TkCaptionFont')\n l.grid(column=0, row=orow, sticky='w', pady=5)\n orow += 1\n\n fopt = Tkinter.Frame(opf)\n fopt.grid(column=0, row=orow, sticky='ew', padx=10)\n orow += 1\n forow = 0\n\n\n if 0 :\n oft = Hybrid.Checkbutton(fopt, 'Treat all sub-models as one structure', False)\n oft.button.grid(row = forow, column = 0, sticky = 'w')\n self.lump_subids = oft.variable\n forow += 1\n\n f = Tkinter.Frame(fopt)\n f.grid(column=0, row=forow, sticky='w')\n\n l = Tkinter.Label(f, text='Density map resolution:')\n l.grid(column=0, row=0, sticky='w')\n\n self.simRes = Tkinter.StringVar(fopt)\n e = Tkinter.Entry(f, width=4, textvariable=self.simRes)\n e.grid(column=1, row=0, sticky='w', padx=5)\n\n l = Tkinter.Label(f, text='grid spacing:')\n l.grid(column=2, row=0, sticky='w')\n\n self.simGridSp = Tkinter.StringVar(fopt)\n e = Tkinter.Entry(f, width=4, textvariable=self.simGridSp)\n e.grid(column=3, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(f, text=\"Calculate Map\", command=self.GenStrucMap)\n b.grid (column=4, row=0, sticky='w', padx=5)\n\n\n forow += 1\n\n #l = Tkinter.Label(fopt, text='Fit to:')\n #l.grid(column=0, row=forow, sticky='w')\n\n forow += 1\n\n if 1 :\n f = Tkinter.Frame(fopt)\n f.grid(column=0, row=forow, sticky='w')\n\n l = Tkinter.Label(f, text='Fit to:')\n l.grid(column=0, row=0, sticky='w')\n\n self.alignTo = Tkinter.StringVar()\n self.alignTo.set ( 'combined_selected_regions' )\n\n #l = Tkinter.Label(f, text=' ', width=5)\n #l.grid(column=0, row=0, sticky='w')\n\n c = Tkinter.Radiobutton(f, text=\"Combined selected regions\", variable=self.alignTo, value = 'combined_selected_regions')\n c.grid (column=1, row=0, sticky='w')\n\n c = Tkinter.Radiobutton(f, text=\"Each selected region\", variable=self.alignTo, value = 'each_selected_region')\n c.grid (column=2, row=0, sticky='w')\n\n #c = Tkinter.Radiobutton(f, text=\"Groups of regions including selected region(s)\", variable=self.alignTo, value = 'around_selected')\n #c.grid (column=1, row=2, sticky='w')\n\n #c = Tkinter.Radiobutton(f, text=\"Groups of regions including all regions\", variable=self.alignTo, value = 'all_groups')\n #c.grid (column=1, row=3, sticky='w')\n\n forow += 1\n\n if 1 :\n f = Tkinter.Frame(fopt)\n f.grid(column=0, row=forow, sticky='w')\n\n l = Tkinter.Label(f, text='Fit by:')\n l.grid(column=0, row=0, sticky='w')\n\n self.rotaSearch = Tkinter.IntVar()\n self.rotaSearch.set ( 0 )\n\n #l = Tkinter.Label(f, text=' ', width=5)\n #l.grid(column=0, row=0, sticky='w')\n\n c = Tkinter.Radiobutton(f, text=\"PCA (faster)\", variable=self.rotaSearch, value = 0)\n c.grid (column=1, row = 0, sticky='w')\n\n #l = Tkinter.Label(f, text=' ', width=5)\n #l.grid(column=0, row=0, sticky='w')\n\n c = Tkinter.Radiobutton(f, text=\"Centers +\", variable=self.rotaSearch, value = 1)\n c.grid (column=2, row = 0, sticky='w')\n\n self.rotaSearchNum = Tkinter.StringVar(f, \"100\")\n e = Tkinter.Entry(f, width=5, textvariable=self.rotaSearchNum)\n e.grid(column=3, row=0, sticky='w', padx=5)\n\n l = Tkinter.Label(f, text='rotations (more accurate)')\n l.grid(column=4, row=0, sticky='w')\n\n\n forow += 1\n\n oft = Hybrid.Checkbutton(fopt, 'Mask map with region(s) to prevent large drifts', False)\n oft.button.grid(row = forow, column = 0, sticky = 'w')\n self.mask_map_when_fitting = oft.variable\n\n\n forow += 1\n\n if 1 :\n oft = Hybrid.Checkbutton(fopt, 'Use Laplacian filter', False)\n #oft.button.grid(row = forow, column = 0, sticky = 'w')\n self.useLaplace = oft.variable\n\n\n forow += 1\n\n oft = Hybrid.Checkbutton(fopt, 'Optimize fits', True)\n oft.button.grid(row = forow, column = 0, sticky = 'w')\n self.optimize_fits = oft.variable\n\n\n forow += 1\n\n f = Tkinter.Frame(fopt)\n f.grid(column=0, row=forow, sticky='w')\n\n oft = Hybrid.Checkbutton(f, 'Cluster fits that are <', True)\n oft.button.grid(row = 0, column = 0, sticky = 'w')\n self.doClusterFits = oft.variable\n\n self.positionTolString = Tkinter.StringVar(f, \"5.0\")\n e = Tkinter.Entry(f, width=3, textvariable=self.positionTolString)\n e.grid(column=1, row=0, sticky='w', padx=5)\n\n l = Tkinter.Label(f, text='Angstroms and <')\n l.grid(column=2, row=0, sticky='w')\n\n self.angleTolString = Tkinter.StringVar(f, \"3.0\")\n e = Tkinter.Entry(f, width=3, textvariable=self.angleTolString)\n e.grid(column=3, row=0, sticky='w', padx=5)\n\n l = Tkinter.Label(f, text='degrees apart' )\n l.grid(column=4, row=0, sticky='w')\n\n\n forow += 1\n\n f = Tkinter.Frame(fopt)\n f.grid(column=0, row=forow, sticky='w')\n\n l = Tkinter.Label(f, text='Add top')\n l.grid(column=0, row=0, sticky='w')\n\n self.numFitsToAdd = Tkinter.StringVar(f, \"1\")\n e = Tkinter.Entry(f, width=5, textvariable=self.numFitsToAdd)\n e.grid(column=1, row=0, sticky='w', padx=5)\n\n l = Tkinter.Label(f, text='fit(s) to list (empty to add all fits to list)')\n l.grid(column=2, row=0, sticky='w')\n\n\n forow += 1\n\n f = Tkinter.Frame(fopt)\n f.grid(column=0, row=forow, sticky='w')\n\n oft = Hybrid.Checkbutton(f, 'Clashes using symmetry:', False)\n oft.button.grid(row = 0, column = 0, sticky = 'w')\n self.calcSymmetryClashes = oft.variable\n\n self.symmetryString = Tkinter.StringVar(f)\n e = Tkinter.Entry(f, width=10, textvariable=self.symmetryString)\n e.grid(column=1, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(f, text=\"Detect\", command=self.DetectSym)\n b.grid (column=2, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(f, text=\"Show\", command=self.PlaceSym)\n b.grid (column=3, row=0, sticky='w', padx=5)\n\n b = Tkinter.Button(f, text=\"Place\", command=self.PlaceSym2)\n b.grid (column=4, row=0, sticky='w', padx=5)\n\n\n dummyFrame = Tkinter.Frame(parent, relief='groove', borderwidth=1)\n Tkinter.Frame(dummyFrame).pack()\n dummyFrame.grid(row=row,column=0,columnspan=7, pady=7, sticky='we')\n\n row = row + 1\n\n global msg\n msg = Tkinter.Label(parent, width = 60, anchor = 'w', justify = 'left', fg=\"red\")\n msg.grid(column=0, row=row, sticky='ew')\n self.msg = msg\n row += 1\n\n umsg ( 'To cite Segger in your paper, please press the Help button for more information.')\n\n self.SetResolution()\n\n chimera.openModels.addRemoveHandler(self.ModelClosed, None)\n\n mlist = OML(modelTypes = [chimera.Molecule])\n if mlist:\n #self.struc.set(self.menu_name(mlist[0]))\n self.cur_mol = mlist[0]\n print \" - set fit mol/map: %s\" % self.cur_mol.name\n self.struc.set ( self.cur_mol.name + \" (%d)\" % self.cur_mol.id)\n\n if dev_menus :\n self.optionsPanel.set(True)\n\n\n self.saveFrames = False\n self.frameAt = 0\n\n\n\n\n def PlotFitScores ( self ) :\n\n\n # self.list_fits.append((fmap, dmap, fmap.M, corr, atomI, bbI, bbC, hdo, regions))\n\n print \"Plotting %d fits:\" % len ( self.list_fits )\n\n if len ( self.list_fits ) == 0 :\n umsg ( \"No fits in list to plot\" )\n return\n\n\n minCorr = 1e9\n maxCorr = 0\n for fmap, dmap, M, corr, atomI, bbI, bbC, hdo, regions in self.list_fits :\n if maxCorr < corr : maxCorr = corr\n if minCorr > corr : minCorr = corr\n\n print \" - maxCorr: %.3f minCorr: %.3f\" % (maxCorr, minCorr)\n\n minCorr, maxCorr = 0, 1\n\n w = 600\n h = 400\n import PIL\n from PIL import Image, ImageDraw\n\n im = PIL.Image.new ( 'RGB', (w,h), (255,255,255) )\n\n #im.putpixel ( (i,j), (fclr[0], fclr[1], fclr[2]) )\n\n chartW = w - 40\n chartH = h - 40\n chartX = 20\n chartY = 20\n\n draw = ImageDraw.Draw(im) # Create a draw object\n\n lineClr = (120,120,120)\n draw.rectangle((10, h-10, w-10, h-10), fill=lineClr, outline=lineClr)\n draw.rectangle((10, 10, 10, h-10), fill=lineClr, outline=lineClr)\n\n xAt = chartX\n for fmap, dmap, M, corr, atomI, bbI, bbC, hdo, regions in self.list_fits :\n\n barWidth = int ( float(chartW) / float(len(self.list_fits)) )\n xPos = int ( xAt + barWidth/2 )\n\n x1 = xAt\n x2 = xAt + barWidth\n if ( barWidth > 3 ) :\n x1 = x1 + 1\n x2 = x2 - 2\n\n yTop = int ( h - ( chartY + (corr - minCorr) * chartH / (maxCorr - minCorr) ) )\n yBot = int ( h - chartY )\n\n lineClr = ( int(rand()*255.0), int(rand()*255.0), int(rand()*255.0) )\n draw.rectangle((x1, yTop, x2, yBot), fill=lineClr, outline=lineClr)\n\n print \"x:%d barW %.2f cor %.2f height %.3f yTop %d yBot %d\" % ( xAt, barWidth, corr, chartH, yTop, yBot )\n\n xAt = xAt + barWidth\n\n\n #im.save ( \"plot.png\", 'PNG' )\n\n def save ( okay, dialog ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n umsg ( \"Saved to \" + path )\n im.save ( path, 'PNG' )\n\n idir = None\n ifile = None\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Plot',\n filters = [('PNG', '*.png', '.png')],\n initialdir = idir, initialfile = ifile, command = save )\n\n\n\n\n\n\n def PlotFitScores_Experimental ( self ) :\n\n print \"Plotting fits:\"\n\n N = int ( self.numFitsToAdd.get() )\n\n totAngles = 0\n minCorr = 1e9\n maxCorr = 0\n for corr, M, regions, stats in self.cfits [0 : N-1] :\n print \" - #fits:%d maxAngle:%.1f maxShift:%.2f maxHeight:%.2f\" % ( stats['numFits'], stats['maxAngle'], stats['maxShift'], stats['maxHeight'] )\n totAngles = totAngles + stats['maxAngle']\n if maxCorr < corr : maxCorr = corr\n if minCorr > corr-stats['maxHeight'] : minCorr = corr-stats['maxHeight']\n\n minCorr, maxCorr = 0, 1\n\n print \" - totAngles: %.2f, maxCorr: %.3f minCorr: %.3f\" % (totAngles, maxCorr, minCorr)\n\n w = 600\n h = 400\n import PIL\n from PIL import Image, ImageDraw\n\n im = PIL.Image.new ( 'RGB', (w,h), (255,255,255) )\n\n #im.putpixel ( (i,j), (fclr[0], fclr[1], fclr[2]) )\n\n chartW = w - 40\n chartH = h - 40\n chartX = 20\n chartY = 20\n\n draw = ImageDraw.Draw(im) # Create a draw object\n\n lineClr = (120,120,120)\n draw.rectangle((10, h-10, w-10, h-10), fill=lineClr, outline=lineClr)\n draw.rectangle((10, 10, 10, h-10), fill=lineClr, outline=lineClr)\n\n xAt = chartX\n for corr, M, regions, stats in self.cfits [0 : N-1] :\n\n barWidth = int ( max ( 1, numpy.floor ( stats['maxAngle'] * float(chartW) / totAngles ) ) )\n xPos = int ( xAt + barWidth/2 )\n\n x1 = xAt\n x2 = xAt + barWidth\n if ( barWidth > 3 ) :\n x1 = x1 + 1\n x2 = x2 - 2\n\n yTop = int ( h - ( chartY + (corr - minCorr) * chartH / (maxCorr - minCorr) ) )\n yBot = int ( h - ( chartY + (corr - stats['maxHeight'] - minCorr) * chartH / (maxCorr - minCorr) ) )\n\n lineClr = ( int(rand()*255.0), int(rand()*255.0), int(rand()*255.0) )\n draw.rectangle((x1, yTop, x2, yBot), fill=lineClr, outline=lineClr)\n\n print \"maxA %.2f x:%d barW %.2f cor %.2f height %.3f yTop %d yBot %d\" % ( stats['maxAngle'], xAt, barWidth, corr, stats['maxHeight'], yTop, yBot )\n\n xAt = xAt + barWidth\n\n\n #im.save ( \"plot.png\", 'PNG' )\n\n def save ( okay, dialog, lfits = lfits ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n umsg ( \"Saved to \" + path )\n im.save ( path, 'PNG' )\n\n idir = None\n ifile = None\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Plot',\n filters = [('PNG', '*.png', '.png')],\n initialdir = idir, initialfile = ifile, command = save )\n\n\n\n\n def ExportFitScores ( self ) :\n\n num = self.fit_listbox.size()\n if num == 0 :\n umsg ( \"No fits to export\" )\n return\n\n scores = []\n\n # (fmap, dmap, fmap.M, corr, atomI, bbI, regions)\n\n for lf in self.list_fits :\n scores.append ( [ lf[3], lf[4], lf[5], lf[6], lf[7] ] )\n\n def ZZ ( scs ) :\n if len(scs) < 3 :\n return 0.0\n\n best_score = scs[0]\n other_scores = scs[1:14]\n avg = numpy.average ( other_scores )\n stdev = numpy.std ( other_scores )\n return [(best_score - avg) / stdev, best_score, avg, stdev]\n\n def save ( okay, dialog, scores = scores ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n f = open ( path, \"a\" )\n\n f.write ( \"%s\\t%s\\t%s\\t%s\\t%s\\n\" % (\n \"Cross-correlation\",\n \"Atom Inclusion\",\n \"Backbone-Atom Inclusion\",\n \"Clash score\",\n \"Density occupancy\" ) )\n\n s1, s2, s3, s4, s5 = [], [], [], [], []\n for s in scores :\n f.write ( \"%f\\t%f\\t%f\\t%f\\t%f\\n\" % (s[0], s[1], s[2], s[3], s[4]) )\n s1.append ( s[0] )\n s2.append ( s[1] )\n s3.append ( s[2] )\n s4.append ( s[3] )\n s5.append ( s[4] )\n\n Z1, Z2, Z3, Z4, Z5 = ZZ(s1), ZZ(s2), ZZ(s3), ZZ(s4), ZZ(s5)\n #f.write ( \"Zscores: %f\\t%f\\t%f\\t%f\\t%f\\n\" % (Z1[0], Z2[0], Z3[0], Z4[0], Z5[0]) )\n f.write ( \"Score\\tZ-score\\tTop score\\tMean\\tSTDev\\n\" )\n f.write ( \"Cross-correlation:\\t%f\\t%f\\t%f\\t%f\\n\" % (Z1[0], Z1[1], Z1[2], Z1[3]) )\n f.write ( \"Atom Inclusion:\\t%f\\t%f\\t%f\\t%f\\n\" % (Z2[0], Z2[1], Z2[2], Z2[3]) )\n f.write ( \"Density occupancy:\\t%f\\t%f\\t%f\\t%f\\n\" % (Z5[0], Z5[1], Z5[2], Z5[3]) )\n f.write ( \"Clash score:\\t%f\\t%f\\t%f\\t%f\\n\" % (Z4[0], Z4[1], Z4[2], Z4[3]) )\n\n\n f.close ()\n umsg ( \"Wrote %d fits to %s\" % ( len(scores), path ) )\n\n\n idir = None\n ifile = None\n\n first_fit = self.list_fits[0]\n fit_map = first_fit[0]\n ref_map = first_fit[1]\n regions = first_fit[8]\n\n fit_path = \"\"\n try : fit_path = fit_map.mols[0].openedAs[0]\n except : fit_path = fit_map.data.path\n\n import os.path\n idir, ifile = os.path.split(fit_path)\n base, suf = os.path.splitext(ifile)\n map_base, map_suf = os.path.splitext( ref_map.name )\n ifile = base + \"_fits_in_%s_regs\" % map_base\n\n for r in regions :\n ifile = ifile + (\"_%d\" % r.rid)\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Fit Scores',\n filters = [('TXT', '*.txt', '.txt')],\n initialdir = idir, initialfile = ifile, command = save )\n\n\n def add_fit (self, fmap, dmap):\n\n corr = fmap.fit_score\n regions = fmap.fit_regions\n atomI = fmap.atomInclusion\n bbI = fmap.bbAtomInclusion\n bbC = fmap.bbClashes\n hdo = fmap.hdoScore\n\n ids = ','.join(['%d' % r.rid for r in regions])\n line = '%8.4f %8.4f %8.4f %8.4f %8.4f %10s %10s %10s' % (corr, atomI, bbI, bbC, hdo, fmap.struc_name, dmap.name, ids)\n self.list_fits.append((fmap, dmap, fmap.M, corr, atomI, bbI, bbC, hdo, regions))\n self.fit_listbox.insert('end', line)\n\n def fit_selection_cb (self, event) :\n\n lfits = self.selected_listbox_fits()\n if len(lfits) == 0:\n return\n\n fmap, dmap, mat, corr, aI, bI, bC, hdo, regions = lfits[0]\n for mol in fmap.mols :\n if mol.__destroyed__:\n umsg('Fit molecule was closed')\n else:\n self.place_molecule(fmap, mat, dmap)\n self.make_regions_transparent(regions)\n\n\n def place_molecule(self, fmap, mat, dmap):\n\n import numpy\n tf = numpy.array(mat[:3,:])\n try :\n xf = dmap.openState.xform\n except :\n print \"Reference map no longer open\"\n return\n\n com = chimera_xform(tf).getTranslation()\n q = Segger.quaternion.Quaternion()\n q.fromXform ( chimera_xform(tf) )\n\n print \"COM: \", com, \"Q: %.6f\" % q.s, q.v\n\n xf.multiply(chimera_xform(tf))\n\n try :\n fmap.openState.xform = xf\n except :\n print \"Fitted map no longer open\"\n return\n\n fmap.M = mat\n\n for mol in fmap.mols :\n mol.openState.xform = xf\n\n def make_regions_transparent(self, regions):\n\n for r in regions:\n if r.has_surface():\n c = r.color\n r.surface().color = ( c[0], c[1], c[2], REG_OPACITY )\n\n def selected_listbox_fits(self):\n\n return [self.list_fits[int(i)] for i in self.fit_listbox.curselection()]\n\n\n def delete_all_fit_cb ( self ) :\n\n num = self.fit_listbox.size()\n indices = range ( num )\n indices.reverse()\n\n for i in indices:\n self.fit_listbox.delete(i)\n del self.list_fits[i]\n\n\n def delete_fit_cb(self):\n\n indices = [int(i) for i in self.fit_listbox.curselection()]\n if len(indices) == 0:\n status('No fits chosen from list')\n return\n indices.sort()\n indices.reverse()\n for i in indices:\n self.fit_listbox.delete(i)\n del self.list_fits[i]\n\n status('Deleted %d fits' % len(indices))\n\n\n def place_copies_cb(self):\n\n lfits = self.selected_listbox_fits()\n if len(lfits) == 0:\n status('No fits chosen from list')\n return\n\n\n fmap, dmap = lfits[0][0], lfits[0][1]\n dmapM = xf_2_MM ( dmap.openState.xform )\n\n nPlaced = 0\n for fmap, dmap, mat, corr, aI, bI, bC, bO, regions in lfits:\n if len ( fmap.mols ) > 0 :\n self.PlaceCopy(fmap.mols, mat, dmap, (rand(),rand(),rand(),1) )\n nPlaced += 1\n\n status('Placed %d models' % nPlaced)\n\n\n\n\n def place_map_copies_cb ( self ) :\n\n lfits = self.selected_listbox_fits()\n if len(lfits) == 0 :\n status('No fits chosen from list')\n return\n\n fmap, dmap = lfits[0][0], lfits[0][1]\n dmapM = xf_2_MM ( dmap.openState.xform )\n\n for fmap, dmap, mat, corr, aI, bI, bC, bO, regions in lfits:\n self.place_molecule(fmap, mat, dmap)\n sf = \"_F2Rid%d.mrc\" % regions[0].rid\n pmap = place_map_resample ( fmap, dmap, sf )\n\n try :\n self.fitted_mols = self.fitted_mols + [pmap]\n except :\n self.fitted_mols = [pmap]\n\n\n status('Placed %d models' % len(lfits))\n\n\n def save_map_resample ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n fmap = self.cur_mol\n if fmap == None :\n print \"Select the map to save\"\n return\n\n if type(fmap) != VolumeViewer.volume.Volume :\n umsg ( \"Please select a map to save (molecule is selected)\" )\n\n else :\n place_map_resample ( fmap, dmap, \"_F2Rid.mrc\" )\n\n\n def extractCubeMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : umsg ( \"%s is not open\" % self.dmap.get() ); return\n\n fmap = self.cur_mol\n if fmap == None :\n print \"Select the map to save\"\n return\n\n print \"Saving \", fmap.name\n\n\n npoints = grid_indices ( dmap.data.size, numpy.single) # i,j,k indices\n transform_vertices ( npoints, dmap.data.ijk_to_xyz_transform )\n\n dvals = fmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( dmap.data.size )\n #f_mat = fmap.data.full_matrix()\n #f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n #df_mat = df_mat * f_mask\n\n ndata = VolumeData.Array_Grid_Data ( nmat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n fmap_base = os.path.splitext (fmap.name)[0]\n dmap_base = os.path.splitext (dmap.name)[0]\n fmap_path = os.path.splitext (fmap.data.path)[0]\n dmap_path = os.path.splitext (dmap.data.path)[0]\n\n nv.name = \"emd_1093_62.mrc\"\n nv.openState.xform = dmap.openState.xform\n\n\n\n\n def close_copies_cb ( self ) :\n\n try :\n len ( self.fitted_mols )\n except :\n umsg ( \"No fitted molecules found\" )\n return\n\n chimera.openModels.close ( self.fitted_mols )\n\n\n\n def Options(self):\n\n self.optionsPanel.set(not self.optionsPanel.get())\n\n\n def ModelClosed(self, trigger, n, mlist):\n\n # Clear molecule menu if selected molecule is closed.\n\n mvar = self.struc\n #if len( mvar.get() ) > 0 and len( self.StructuresToFit() ) == 0:\n # mvar.set('')\n\n found = False\n for m in chimera.openModels.list() :\n if m.name + \" (%d)\" % m.id == mvar.get() :\n found = True\n\n if not found :\n #print \" - closed model - selected model not found\", mvar.get()\n mvar.set('')\n self.cur_mol = None\n\n\n def SetResolution ( self ):\n\n dmap = segmentation_map()\n if dmap == None : return\n\n if len ( self.simRes.get() ) == 0:\n res = min(dmap.data.step) * 3\n self.simRes.set ( '%.3g' % res )\n self.simGridSp.set ( '%.3g' % (res/3.0) )\n\n\n def CurrentSegmentation ( self, warn = True ):\n\n return current_segmentation(warn)\n\n\n def StrucMenu0 ( self ) :\n\n self.strucMB.menu.delete ( 0, 'end' ) # Clear menu\n\n self.strucMB.menu.add_radiobutton ( label=\"Open structures:\" )\n self.strucMB.menu.add_separator()\n\n id_struc = {}\n open_mols = {}\n for m in OML() :\n if type(m) == chimera.Molecule or type(m) == VolumeViewer.volume.Volume:\n try : id_struc [ m.id ].append ( m )\n except : id_struc [ m.id ] = [m]\n open_mols[m.name] = True\n else :\n #print type(m)\n pass\n\n if 1 :\n cur_sel_found = False\n for mid, mols in id_struc.iteritems() :\n\n if len(mols) == 1 or self.lump_subids.get () :\n mol = mols[0]\n label = self.menu_name(mol)\n self.strucMB.menu.add_radiobutton ( label= label,\n variable=self.struc,\n command = lambda mol=mol: self.StrucSelected(mol) )\n if label == self.struc.get() :\n cur_sel_found = True\n\n else :\n for mol in mols :\n label = self.menu_name(mol)\n self.strucMB.menu.add_radiobutton ( label=label,\n variable=self.struc,\n command = lambda mol=mol: self.StrucSelected(mol) )\n if label == self.struc.get() :\n cur_sel_found = True\n\n\n if not cur_sel_found :\n self.struc.set ( \"\" )\n self.cur_mol = None\n print \" - set fit mol/map: ?\"\n\n if 0 :\n dmap = segmentation_map()\n if dmap == None : return\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n files = os.listdir ( path );\n mols_in_path = []\n for f in files :\n if f.find ( \".pdb\" ) >= 0 and open_mols.has_key(f) == False :\n mols_in_path.append ( f )\n\n if len ( mols_in_path ) == 0 : return\n\n self.strucMB.menu.add_separator()\n self.strucMB.menu.add_radiobutton ( label=\"In %s:\" % path )\n self.strucMB.menu.add_separator()\n\n for fm in mols_in_path :\n self.strucMB.menu.add_radiobutton ( label=fm, variable=self.struc,\n command = self.StrucSelected )\n\n\n\n def StrucMenu ( self ) :\n\n self.strucMB.menu.delete ( 0, 'end' ) # Clear menu\n\n self.strucMB.menu.add_radiobutton ( label=\"Open structures:\" )\n self.strucMB.menu.add_separator()\n\n id_struc = {}\n open_mols = {}\n for m in OML() :\n if type(m) == chimera.Molecule or type(m) == VolumeViewer.volume.Volume:\n\n self.strucMB.menu.add_radiobutton ( label= m.name + \" (%d)\" % m.id,\n variable = self.struc,\n command = lambda m=m: self.StrucSelected(m, None) )\n\n open_mols[m.name] = True\n\n if 0 :\n dmap = segmentation_map()\n if dmap == None : return\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n files = os.listdir ( path );\n mols_in_path = []\n for f in files :\n if f.find ( \".pdb\" ) >= 0 and open_mols.has_key(f) == False :\n mols_in_path.append ( f )\n\n if len ( mols_in_path ) == 0 : return\n\n self.strucMB.menu.add_separator()\n self.strucMB.menu.add_radiobutton ( label=\"In %s:\" % path )\n self.strucMB.menu.add_separator()\n\n for fm in mols_in_path :\n self.strucMB.menu.add_radiobutton ( label=fm, variable=self.struc,\n command = lambda fm=fm: self.StrucSelected(None, fm) )\n\n\n\n def StrucSelected ( self, mol, molNameToLoad ) :\n\n # Check if selected entry is an existing open molecule.\n\n if mol != None :\n self.cur_mol = mol\n\n if molNameToLoad != None :\n\n dmap = segmentation_map()\n if dmap == None : print \" - no map selected\"; return\n\n path = os.path.dirname(dmap.data.path) + os.path.sep\n\n print \" - opening: %s\" % molNameToLoad\n fmol = chimera.openModels.open ( path + molNameToLoad )[0]\n\n self.struc.set(fmol.name + \" (%d)\" % fmol.id)\n self.cur_mol = fmol\n\n print \" - set fit mol/map: %s\" % self.cur_mol.name\n\n\n def StructuresToFit ( self ) :\n #t = self.struc.get()\n #return [m for m in OML() if self.menu_name(m) == t]\n return [self.cur_mol]\n\n\n #def menu_name(self, mol):\n # show_subid = not self.lump_subids.get() and mol.subid != 0\n # id = '%d.%d' % (mol.id, mol.subid) if show_subid else '%d' % mol.id\n # mname = \"%s (%s)\" % (mol.name, id)\n # return mname\n\n\n def Place ( self ) :\n\n\t self.save_map_resample ();\n\n\n def Fit ( self ) :\n\n\n dmap = segmentation_map()\n if dmap == None :\n umsg ( \"Density map not found or not selected\" )\n return\n\n if self.cur_mol == None :\n umsg ( \"Please select a structure or map to fit\" )\n return\n\n\n\n from chimera import tasks, CancelOperation\n task = tasks.Task('Fitting %s to %s' % (self.cur_mol.name, dmap.name), modal = True)\n print \"Started task...\"\n\n try:\n smod = self.Fit_(dmap, self.cur_mol, task)\n except CancelOperation:\n umsg('Cancelled fitting')\n return None\n finally:\n task.finished()\n\n\n\n def Fit_ ( self, dmap, fmol, task=None ) :\n\n self.doStop = False\n\n #if type(fmol) == VolumeViewer.volume.Volume :\n if type(fmol) == chimera.Molecule :\n\n # this property added to self will indicate to functions called\n # below whether we are fitting a map instead of a molecule\n self.map_to_fit = None\n\n if fmol.openState is dmap.openState :\n umsg('Molecule cannot be moved relative to map\\nbecause they have the same model id number.')\n return\n\n else :\n # looks like we are fitting a map, not a molecule\n fmap = fmol\n\n self.map_to_fit = fmap\n fmap.mols = []\n fmap.struc_name = fmap.name\n\n points, weights = fit_points ( fmap )\n # print \"Points : \", points\n\n fmap.COM, fmap.U, fmap.S, fmap.V = prAxes ( points )\n print \"COM : \", fmap.COM\n print \"U : \", fmap.U\n\n toCOM = numpy.matrix ( [\n [ 1, 0, 0, -fmap.COM[0] ],\n [ 0, 1, 0, -fmap.COM[1] ],\n [ 0, 0, 1, -fmap.COM[2] ],\n [ 0, 0, 0, 1 ] ] )\n\n mR = numpy.matrix ( [\n [ fmap.V[0,0], fmap.V[0,1], fmap.V[0,2], 0 ],\n [ fmap.V[1,0], fmap.V[1,1], fmap.V[1,2], 0 ],\n [ fmap.V[2,0], fmap.V[2,1], fmap.V[2,2], 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n # this matrix centers the map and aligns its principal axes\n # to the x-y-z axes\n fmap.preM = mR * toCOM\n\n\n alignTo = self.alignTo.get()\n\n if alignTo == \"combined_selected_regions\" :\n self.FitMapToSelRGroup ( dmap, task )\n\n elif alignTo == \"each_selected_region\" :\n self.FitToEachRegion ( dmap, task )\n\n elif alignTo == \"around_selected\" :\n self.FitMapToRegionsAroundSel ( dmap, task )\n\n elif alignTo == \"all_groups\" :\n self.FitMapToRGroups ( dmap, task )\n\n dmap = segmentation_map()\n #if dmap:\n # dmap.display = False\n\n\n def Stop (self) :\n print \"will stop...\"\n self.doStop = True\n\n\n\n def DetectSym ( self ) :\n\n dmap = segmentation_map()\n\n if dmap == None:\n umsg ( \"Please select a map in the Segment Map dialog\" )\n return []\n\n print \"Symmetry for\", dmap.name\n\n from Measure.symmetry import find_point_symmetry\n\n syms, msg = find_point_symmetry ( dmap, nMax=8 )\n\n if syms is None :\n umsg ( \"No symmetry detected for %s\" % dmap.name )\n self.symmetryString.set ( \"No symmetry detected\" )\n return []\n\n umsg ( msg )\n start = msg.find(': ')+2\n end = msg.find (', center')\n self.symmetryString.set ( msg [start : end] )\n\n for i, sym in enumerate ( syms ) :\n #print i, \" -> \", sym\n pass\n\n return syms\n\n\n def PlaceSym ( self ) :\n\n fmap = self.MoleculeMap()\n dmap = segmentation_map()\n\n if fmap == None or dmap == None:\n umsg ( \"Please select an open structure to fit\" )\n return\n\n from Measure.symmetry import centers_and_points\n\n syms = []\n esym = self.symmetryString.get()\n if 1 or len (esym) == 0 :\n syms = self.DetectSym ()\n else :\n print \"Custom sym:\", esym\n if ( esym == \"C3\" ) :\n\n print \" - dmap: \", dmap.name\n mpoints, mpoint_weights = fit_points(dmap)\n COM, U, S, V = prAxes ( mpoints )\n print \"COM: \", COM\n print \"U: \", U\n print \"S: \", S\n\n ax = chimera.Vector ( 0, 1, 0 )\n #ax = dmap.openState.xform.inverse().apply ( ax )\n\n syms.append ( Matrix.identity_matrix () )\n rm1 = Matrix.rotation_transform ( (ax.x,ax.y,ax.z), 360.0/3.0, COM )\n print rm1\n syms.append ( rm1 )\n #syms.append ( Matrix.rotation_transform ( (1.0,0.0,0.0), 2.0*360.0/3.0 ) )\n\n #centers, xyz, w = centers_and_points(dmap)\n #print \" - center:\", centers\n #ctf = Matrix.translation_matrix([-x for x in COM[0]])\n #syms = Matrix.coordinate_transform_list(syms, ctf)\n\n\n smols = []\n\n for si, sym in enumerate ( syms [1 : ] ) :\n\n T = numpy.array ( sym )\n #print \"\\nSym %d\\n\" % si, T\n\n xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n M = xf_2_MM ( xf )\n\n #mols = self.PlaceCopy (fmap.mols, M*fmap.M, dmap, (0,0,0,1) )\n mols = self.PlaceCopy (fmap.mols, M*fmap.M, dmap, (.4, .8, .4, 1) )\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n return smols\n\n\n\n def PlaceSym2 ( self ) :\n\n #fmap = self.MoleculeMap()\n dmap = segmentation_map()\n\n\n label = self.struc.get()\n sel_str = \"#\" + label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n fmol = None\n try :\n fmol = chimera.selection.OSLSelection(sel_str).molecules()[0]\n except :\n umsg ( \"%s not open - \" % self.struc.get() ); return\n\n\n if fmol == None or dmap == None:\n umsg ( \"Please select an open structure and/or map\" )\n return\n\n from Measure.symmetry import centers_and_points\n\n syms = []\n esym = self.symmetryString.get()\n if 1 or len (esym) == 0 :\n syms = self.DetectSym ()\n else :\n print \"Custom sym:\", esym\n if ( esym == \"C3\" ) :\n\n print \" - dmap: \", dmap.name\n mpoints, mpoint_weights = fit_points(dmap)\n COM, U, S, V = prAxes ( mpoints )\n print \"COM: \", COM\n print \"U: \", U\n print \"S: \", S\n\n ax = chimera.Vector ( 0, 1, 0 )\n #ax = dmap.openState.xform.inverse().apply ( ax )\n\n syms.append ( Matrix.identity_matrix () )\n rm1 = Matrix.rotation_transform ( (ax.x,ax.y,ax.z), 360.0/3.0, COM )\n print rm1\n syms.append ( rm1 )\n #syms.append ( Matrix.rotation_transform ( (1.0,0.0,0.0), 2.0*360.0/3.0 ) )\n\n #centers, xyz, w = centers_and_points(dmap)\n #print \" - center:\", centers\n #ctf = Matrix.translation_matrix([-x for x in COM[0]])\n #syms = Matrix.coordinate_transform_list(syms, ctf)\n\n\n from SWIM import SetBBAts\n SetBBAts ( fmol )\n\n if 0 :\n smols = []\n\n #mol = fmap.mols[0]\n cid = fmol.residues[0].id.chainId\n print \"Symming %s, chain %s\" % (fmol.name, cid)\n nmol = CopyChain ( fmol, None, cid, cid, dmap.openState.xform.inverse() )\n chimera.openModels.add ( [nmol] )\n\n chains = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890abcdefghijklmnopqrstuvwxyz\"\n atCi = 0\n\n for si, sym in enumerate ( syms [1 : ] ) :\n\n T = numpy.array ( sym )\n #print \"\\nSym %d\\n\" % si, T\n\n xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n #M = xf_2_MM ( xf )\n\n if chains[atCi] == cid :\n atCi += 1\n ncid = chains[atCi]\n\n xf1 = dmap.openState.xform.inverse()\n xf1.premultiply (xf)\n\n print \" - %d - %s\" % (atCi, ncid)\n\n CopyChain ( fmol, nmol, cid, ncid, xf1 )\n\n atCi += 1\n #mols = self.PlaceCopy (fmap.mols, M*fmap.M, dmap, (0,0,0,1) )\n #mols = self.PlaceCopy (fmap.mols, M*fmap.M, dmap, (.4, .8, .4, 1) )\n\n #for m in mols :\n # m.openState.xform = dmap.openState.xform\n #smols = smols + mols\n\n #break\n\n return smols\n\n else :\n\n cmap = {}\n for r in fmol.residues :\n cmap[r.id.chainId] = 1\n\n chains = cmap.keys()\n\n for si, sym in enumerate ( syms [1 : ] ) :\n\n T = numpy.array ( sym )\n xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n xf1 = dmap.openState.xform.inverse()\n xf1.premultiply (xf)\n\n nmol = CopyMolX ( fmol, xf1 )\n nmol.name = fmol.name + \"__sym%d\" % si\n chimera.openModels.add ( [nmol] )\n print \".\",\n\n print \"\"\n\n\n\n\n def PlaceSymOld ( self ) :\n\n fmap = self.MoleculeMap()\n dmap = segmentation_map()\n\n if fmap == None or dmap == None:\n print \"Fit or segmentation map not found\"\n return\n\n fpoints = grid_indices(dmap.data.size, numpy.single) # i,j,k indices\n transform_vertices( fpoints, dmap.data.ijk_to_xyz_transform )\n mat = dmap.data.full_matrix()\n fpoint_weights = numpy.ravel(mat).astype(numpy.single)\n\n threshold = dmap.surface_levels[0]\n ge = numpy.greater_equal(fpoint_weights, threshold)\n fpoints = numpy.compress(ge, fpoints, 0)\n fpoint_weights = numpy.compress(ge, fpoint_weights)\n nz = numpy.nonzero( fpoint_weights )[0]\n\n print \"%d above %f in %s\\n\" % (len(nz), threshold, dmap.name)\n\n COM, U, S, V = prAxes ( fpoints )\n\n print \"COM: \", COM\n print \"U: \", U\n print \"S: \", S\n\n T0 = numpy.matrix ( [\n [ 1, 0, 0, -COM[0] ],\n [ 0, 1, 0, -COM[1] ],\n [ 0, 0, 1, -COM[2] ],\n [ 0, 0, 0, 1 ] ] )\n\n T = numpy.matrix ( [\n [ 1, 0, 0, COM[0] ],\n [ 0, 1, 0, COM[1] ],\n [ 0, 0, 1, COM[2] ],\n [ 0, 0, 0, 1 ] ] )\n\n\n fmapM = xf_2_MM ( fmap.openState.xform )\n dmapM = xf_2_MM ( dmap.openState.xform )\n\n\n smols = []\n\n if 0 :\n M = xf_2_MM ( chimera.Xform.rotation( 0, 0, 1, 360.0/7.0 ) )\n mols = self.PlaceCopy (fmap.mols, M*fmap.M, dmap, (0,0,0,1) )\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n M = xf_2_MM ( chimera.Xform.rotation( 0, 0, 1, -360.0/7.0 ) )\n mols = self.PlaceCopy (fmap.mols, M*fmap.M, dmap, (0,0,0,1))\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n M1 = xf_2_MM ( chimera.Xform.rotation( 1, 0, 0, 180.0 ) )\n M = xf_2_MM ( chimera.Xform.rotation( 0, 0, 1, 2.0*360.0/7.0 ) )\n mols = self.PlaceCopy (fmap.mols, M*M1*fmap.M, dmap, (0,0,0,1))\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n M1 = xf_2_MM ( chimera.Xform.rotation( 0, 0, 1, 180.0 ) )\n M = xf_2_MM ( chimera.Xform.rotation( 0, 0, 1, 3.0*360.0/7.0 ) )\n mols = self.PlaceCopy (fmap.mols, M*M1*fmap.M, dmap, (0,0,0,1) )\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n\n M = xf_2_MM ( chimera.Xform.rotation( U[0,2], U[1,2], U[2,2], 360.0/7.0 ) )\n mols = self.PlaceCopy (fmap.mols, T*M*T0*fmap.M, dmap, (0,0,0,1) )\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n M = xf_2_MM ( chimera.Xform.rotation( U[0,2], U[1,2], U[2,2], -360.0/7.0 ) )\n mols = self.PlaceCopy (fmap.mols, T*M*T0*fmap.M, dmap, (0,0,0,1) )\n for m in mols : m.openState.xform = dmap.openState.xform\n smols = smols + mols\n\n return smols\n\n\n def StrucCenter ( self ) :\n\n label = self.struc.get()\n sel_str = \"#\" + label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n mols = centerMol ( sel_str )\n\n mm = segmentation_map()\n if mm :\n for mol in mols :\n mol.openState.xform = mm.openState.xform\n\n\n\n def StrucShowAxes ( self ) :\n\n if len ( self.struc.get() ) == 0 :\n print \"Please select a structure\"\n return\n\n label = self.struc.get()\n sel_str = \"#\" + label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n mols = centerMol ( sel_str )\n\n if len(mols) == 0:\n print self.struc.get(), \"not open\";\n return\n\n fmol = mols[0]\n print \"Showing axes for\", fmol.name\n print \" - COM:\", fmol.COM\n print \" - extents:\", fmol.Extents\n\n try :\n chimera.openModels.close ( fmol.axes )\n fmol.axes = None\n except :\n pass\n\n import axes\n fmol.axes = axes.AxesMod ( Extents = fmol.Extents, rad = 1.0,\n alignTo = fmol )\n fmol.axes.name = os.path.splitext (fmol.name)[0] + \"_axes\"\n\n\n\n def StrucHideAxes ( self ) :\n\n label = self.struc.get()\n sel_str = \"#\" + label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n mols = centerMol ( sel_str )\n\n if len(mols) == 0 :\n print self.struc.get(), \"not open\"; return\n\n fmol = mols[0]\n\n try :\n chimera.openModels.close ( fmol.axes )\n fmol.axes = None\n except :\n pass\n\n\n def GenStrucMap ( self, show = True ) :\n\n self.SetResolution()\n\n try : res = float ( self.simRes.get() )\n except :\n umsg ( \"Invalid resolution entered, please enter a number\" )\n return\n\n try : grid = float ( self.simGridSp.get() )\n except :\n umsg ( \"Invalid grid spacing entered, using resolution/3.0\" )\n grid = res/3.0\n\n umsg ( \"Simulating map res %.3f, grid %.3f\" % (res, grid) )\n\n if len(self.struc.get()) == 0 :\n umsg ( \"Please select a Structure to fit in the field above\" )\n return\n\n label = self.struc.get()\n sel_str = label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n mols = centerMol ( \"#\" + sel_str )\n if len(mols) == 0 :\n umsg ( \"%s not open\" % self.struc.get() )\n return\n\n mol = mols[0]\n\n base = os.path.splitext(mol.name)[0]\n mname = base + \"_\" + sel_str + \"_r%.1f_sp%.1f\" % (res, grid)\n #if self.useLaplace.get() :\n # mname = mname + \"_L\"\n mname = mname + \".mrc\"\n\n mv = getMod ( mname )\n if mv != None :\n print \"Found\", mname\n return mv\n\n print \"Generating\", mname\n\n #cmd = \"molmap #%s:.C-D@CA %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n cmd = \"molmap #%s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n print \" -\", cmd\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n print \" - found\", mod.name\n mv = mod\n mv.name = mname\n break\n\n if mv == None :\n umsg (\"Map not generated - molmap command did not produce expected result.\")\n return\n\n if 0 or self.useLaplace.get() :\n umsg (\"Generating Laplacian...\")\n from VolumeFilter import laplacian\n mvl = laplacian ( mv )\n chimera.openModels.close ( [mv] )\n mv = mvl\n mv.name = mname\n\n mv.display = show\n clr = mol.color.rgba()\n mv.surfacePieces[0].color = ( clr[0], clr[1], clr[2], 1.0 )\n mv.mols = mols\n mv.struc_name = label\n\n # for consistency when fitting maps, which need this pre-transform\n # since they don't get transformed like the molecules\n mv.preM = numpy.matrix ( [\n [ 1, 0, 0, 0 ],\n [ 0, 1, 0, 0 ],\n [ 0, 0, 1, 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n return mv\n\n\n\n\n def GenChMaps ( self, m ) :\n\n try : res = float ( self.simRes.get() )\n except :\n umsg ( \"Invalid resolution entered, please enter a number\" )\n return\n\n try : grid = float ( self.simGridSp.get() )\n except :\n umsg ( \"Invalid grid spacing entered, using resolution/3.0\" )\n grid = res/3.0\n\n\n\n\n if len(self.struc.get()) == 0 :\n umsg ( \"Please select a Structure to fit in the field above\" )\n return\n\n label = self.struc.get()\n sel_str = label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n mols = centerMol ( \"#\" + sel_str )\n if len(mols) == 0 :\n umsg ( \"%s not open\" % self.struc.get() )\n return\n\n mol = mols[0]\n\n base = os.path.splitext(mol.name)[0]\n mname = base + \"_\" + sel_str + \"_r%.1f_sp%.1f\" % (res, grid)\n if 0 and self.useLaplace.get() :\n mname = mname + \"_L\"\n mname = mname + \".mrc\"\n\n mv = getMod ( mname )\n if mv != None :\n print \"Found\", mname\n return mv\n\n print \"Generating\", mname\n\n #cmd = \"molmap #%s:.C-D@CA %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n cmd = \"molmap #%s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n print \" -\", cmd\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n print \" - found\", mod.name\n mv = mod\n mv.name = mname\n break\n\n if mv == None :\n umsg (\"Map not generated - molmap command did not produce expected result.\")\n return\n\n if 0 and self.useLaplace.get() :\n umsg (\"Generating Laplacian...\")\n from VolumeFilter import laplacian\n mvl = laplacian ( mv )\n chimera.openModels.close ( [mv] )\n mv = mvl\n mv.name = mname\n\n mv.display = show\n clr = mol.color.rgba()\n mv.surfacePieces[0].color = ( clr[0], clr[1], clr[2], 1.0 )\n mv.mols = mols\n mv.struc_name = label\n\n # for consistency when fitting maps, which need this pre-transform\n # since they don't get transformed like the molecules\n mv.preM = numpy.matrix ( [\n [ 1, 0, 0, 0 ],\n [ 0, 1, 0, 0 ],\n [ 0, 0, 1, 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n return mv\n\n\n\n\n\n def SaveStrucFit ( self ) :\n\n lfits = self.selected_listbox_fits()\n if len(lfits) == 0:\n status('No fits chosen from list')\n return\n\n def save ( okay, dialog, lfits = lfits ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n import Midas\n if len(lfits) > 1 and path.find('%d') == -1:\n base, suf = os.path.splitext(path)\n path = base + '_fit%d' + suf\n for i, (fmap, dmap, mat, corr, aI, bI, bC, hdo, regions) in enumerate(lfits):\n p = path if len(lfits) == 1 else path % (i+1)\n self.place_molecule(fmap, mat, dmap)\n Midas.write(fmap.mols, relModel = dmap, filename = p)\n\n mol = lfits[0][0].mols[0]\n if hasattr(mol, 'openedAs'):\n import os.path\n idir, ifile = os.path.split(mol.openedAs[0])\n base, suf = os.path.splitext(ifile)\n if len(lfits) > 1:\n ifile = base + '_fit%d' + suf\n else:\n ifile = base + '_fit' + suf\n else:\n idir = None\n ifile = None\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Fit Molecules',\n filters = [('PDB', '*.pdb', '.pdb')],\n initialdir = idir, initialfile = ifile, command = save )\n\n def SaveFit ( self, fmap, clr=None ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n mols = self.PlaceCopy(fmap.mols, fmap.M, dmap, clr)\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n print \"Saving:\"\n for mol in mols :\n print \" - %s %d.%d\" % ( path + mol.name, mol.id, mol.subid )\n chimera.PDBio().writePDBfile ( mols, path + mols[0].name )\n umsg ( \"Saved fit (%d structures)\" % len(mols) )\n\n return mols\n\n\n\n def PlaceCopy(self, molecules, mat, dmap, clr=None):\n\n try : fit_m_at = len ( self.fitted_mols ) + 1\n except : fit_m_at = 1; self.fitted_mols = []\n\n new_mols = []\n\n for molecule in molecules :\n\n mol = CopyMol ( molecule )\n mol.name = os.path.splitext ( mol.name )[0] + \"_f%d.pdb\" % fit_m_at\n\n if clr :\n r, g, b, a = clr\n mclr = chimera.MaterialColor ( r, g, b, a )\n else : mclr = molecule.residues[0].ribbonColor\n\n if mat != None : molApplyT ( mol, mat )\n new_mols.append ( mol )\n\n for res in mol.residues :\n res.ribbonDisplay = True\n res.ribbonDrawMode = 2\n res.ribbonColor = mclr\n for at in res.atoms : at.display = False\n\n mol.display = True\n\n if dmap :\n self.fitted_mols = self.fitted_mols + new_mols\n\n chimera.openModels.add ( new_mols, noprefs = True )\n\n return new_mols\n\n\n\n def FitToEachRegion ( self, dmap, task=None ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n fmap = None\n if self.map_to_fit :\n fmap = self.map_to_fit\n else :\n fmap = self.MoleculeMap()\n #if not hasattr(fmap.mols[0], 'centered'):\n self.StrucCenter()\n\n regs = smod.selected_regions()\n if len(regs) == 0 :\n umsg ( \"Please select one or more regions to align the structure to\" )\n return\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n for reg in regs :\n\n self.fits = []\n reportFitRegions(fmap.name, regs)\n scores = []\n corrs = []\n\n sp = reg.surface_piece\n sp.display = True\n clr = sp.region.color\n sp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n\n #fmap.display = False\n for mol in fmap.mols : mol.display = True\n\n fmap.fit_regions = [reg]\n\n to_map = dmap\n reg_map = None\n if 1 :\n reg_map = mask_volume( [reg], dmap )\n\n bCloseMap = False\n\n if 0 and self.useLaplace.get () :\n umsg (\"Generating Laplace version of \" + dmap.name)\n from VolumeFilter import laplacian\n to_map = laplacian ( dmap )\n bCloseMap = True\n\n elif self.mask_map_when_fitting.get() :\n to_map = reg_map\n bCloseMap = True\n if to_map is None:\n umsg ('Could not create masked map')\n return\n\n tpoints = reg.map_points()\n if self.rotaSearch.get () :\n self.saFitMapToPoints_byRot ( fmap, tpoints, to_map )\n else :\n self.saFitMapToPoints ( fmap, tpoints, to_map )\n\n scores.append ( 0 )\n corrs.append ( fmap.fit_score )\n\n umsg ( \"Cross-correlation of fit: %f\" % fmap.fit_score )\n\n self.cfits = self.ClusterFits ( self.fits )\n self.cfits.sort ( reverse=True, key=lambda x: x[0] )\n #self.cfits.sort()\n #self.cfits.reverse()\n\n #frame_at = 0\n\n try : nToAdd = int ( self.numFitsToAdd.get() )\n except : nToAdd = len (self.cfits)\n for corr, M, regions, stats in self.cfits [ 0 : nToAdd ] :\n print \" -- clustered Fit -- #fits:%d maxAngle:%.1f maxShift:%.2f maxHeight:%.2f\" % ( stats['numFits'], stats['maxAngle'], stats['maxShift'], stats['maxHeight'] )\n fmap.fit_score, fmap.M, fmap.fit_regions = corr, M, regions\n fmap.atomInclusion, fmap.bbAtomInclusion, fmap.bbClashes, fmap.hdoScore = self.FitScores ( fmap, reg_map )\n #frame_at = frame_at + 1\n self.add_fit (fmap, dmap)\n\n # FitScores fn moves the model, so move it back to top fit\n move_fit_models(fmap, self.cfits[0][1], dmap.openState.xform)\n\n #umsg ( \"Cross-correlation: %.4f\\n\" % (fmap.fit_score) )\n self.ReportZScore ( self.cfits )\n\n # close masked map if it was created\n #if bCloseMap : chimera.openModels.close ( [to_map] )\n if reg_map :\n chimera.openModels.close ( [reg_map] )\n\n\n def saFitMapToPoints ( self, fmap, points, dmap, task=None ) :\n\n print \"fitting %s in map %s, to %d points\" % (fmap.name, dmap.name, len(points))\n\n fpoints, fpoint_weights = fit_points(fmap)\n\n # the 4 alignments to try...\n flips = [ (1,1,1), (-1,-1,1), (-1,1,-1), (1,-1,-1) ]\n #flips = [ (1,-1,-1) ]\n mlist = principle_axes_alignments ( points, flips, fmap.preM )\n\n best = (-2, None, None)\n names = ['%.1f*X %.1f*Y %.1f*Z' % f for f in flips]\n\n optimize = self.optimize_fits.get()\n\n fits = optimize_fits(fpoints, fpoint_weights, mlist, dmap, names, None, optimize)\n corr, Mfit, i = self.make_best_fit(fits, fmap, dmap)\n\n f = flips[i]\n print \" - best fit: %f for %.1f*X %.1f*Y %.1f*Z\" % (\n corr, f[0], f[1], f[2] )\n\n\n def ReportZScore ( self, fits ) :\n\n fit_scores = [c for c,M,regs,stats in fits]\n if ( len(fit_scores) > 3 ) :\n fit_scores.sort ()\n fit_scores.reverse ()\n best_score = fit_scores[0]\n other_scores = fit_scores[1:14]\n print \"Best score: \", best_score\n print \"Next scores: \", other_scores\n avg = numpy.average ( other_scores )\n stdev = numpy.std ( other_scores )\n self.zscore = (best_score - avg) / stdev\n umsg ( \"Top score: %.5f, z-score: %.5f (avg: %.4f, stdev: %.4f)\" % (\n best_score, self.zscore, avg, stdev) )\n\n\n\n def ClusterFits ( self, fits ) :\n\n class ClusterEntry :\n def __init__ (self, T, corr, regs, stats) :\n self.M = T\n xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n self.COM = xf.getTranslation()\n self.Q = Segger.quaternion.Quaternion()\n self.Q.fromXform ( xf )\n self.corr = corr\n self.regs = regs\n self.stats = stats\n\n class Cluster :\n def __init__ ( self, e ) :\n self.entries = [ e ]\n self.COM = e.COM\n self.Q = e.Q\n self.corr = e.corr\n self.M = e.M\n self.regs = e.regs\n self.stats = {}\n self.stats['maxAngle'] = self.sumAngles = e.stats['totAngle']\n self.stats['maxShift'] = self.sumShifts = e.stats['totShift']\n self.stats['maxHeight'] = self.sumHeights = e.stats['difCC']\n self.stats['numFits'] = 1\n\n def AddEntry ( self, new_e ) :\n self.entries.append ( new_e )\n if new_e.corr > self.corr :\n self.corr = new_e.corr\n self.M = new_e.M\n self.regs = new_e.regs\n\n totAngle, totShift, difCC = new_e.stats['totAngle'], new_e.stats['totShift'], new_e.stats['difCC']\n if totAngle > self.stats['maxAngle'] : self.stats['maxAngle'] = totAngle\n if totShift > self.stats['maxShift'] : self.stats['maxShift'] = totShift\n if difCC > self.stats['maxHeight'] : self.stats['maxHeight'] = difCC\n\n self.stats['numFits'] = self.stats['numFits'] + 1\n\n # compute the new averages\n self.COM = chimera.Vector (0,0,0)\n self.Q = Segger.quaternion.Quaternion(0, chimera.Vector(0,0,0))\n\n for e in self.entries :\n self.COM = self.COM + e.COM\n self.Q = self.Q + e.Q\n self.sumAngles = self.sumAngles + totAngle\n self.sumShifts = self.sumShifts + totShift\n self.sumHeights = self.sumHeights + difCC\n\n self.COM = self.COM / float ( len(self.entries) )\n self.Q.normalize()\n\n self.avgAngle = self.sumAngles / float ( len(self.entries) )\n self.avgShift = self.sumShifts / float ( len(self.entries) )\n self.avgHeight = self.sumHeights / float ( len(self.entries) )\n\n\n def SimilarTo ( self, e, posTol=5.0, angleTol=5.0 ) :\n if ( (self.COM - e.COM).length < posTol and\n self.Q.angleTo ( e.Q ) * 180.0 / numpy.pi < angleTol ) :\n return True\n return False\n\n posTol = float ( self.positionTolString.get() )\n angleTol = float ( self.angleTolString.get() )\n if self.doClusterFits.get () :\n print \"Clustering %d fits...\" % len(fits)\n print \" - distance < \", posTol\n print \" - angle < \", angleTol\n\n clusters = []\n for corr, M, regs, stats in fits :\n\n e = ClusterEntry ( M, corr, regs, stats )\n\n bAdded = False\n\n if self.doClusterFits.get () :\n for c in clusters :\n if c.SimilarTo ( e, posTol, angleTol ) :\n c.AddEntry ( e )\n bAdded = True\n break\n\n if bAdded == False :\n clusters.append ( Cluster (e) )\n\n print \"%d clusters\" % len(clusters)\n\n cfits = []\n for c in clusters :\n cfits.append ( [c.corr, c.M, c.regs, c.stats] )\n\n return cfits\n\n\n\n\n def saFitMapToPoints_byRot ( self, fmap, points, dmap, task=None, N=10, M=10 ) :\n\n print \"fitting %s in map %s, to %d points, by rotation\" % (fmap.name, dmap.name, len(points))\n\n num = float ( self.rotaSearchNum.get() )\n N = int ( numpy.floor ( numpy.sqrt ( num ) ) )\n M = int ( numpy.floor ( num / N ) )\n\n fpoints, fpoint_weights = fit_points ( fmap, (self.useLaplace.get()==False) )\n\n print \"%d fits - rotations %d axes, %d angles\" % (num, N, M)\n alist = uniform_rotation_angles(N, M)\n\n COM, U, S, V = prAxes ( points )\n comT = numpy.matrix ( [\n [ 1, 0, 0, COM[0] ],\n [ 0, 1, 0, COM[1] ],\n [ 0, 0, 1, COM[2] ],\n [ 0, 0, 0, 1 ] ] )\n\n mlist = [comT*rotation_from_angles(*angles)*fmap.preM for angles in alist]\n\n from math import pi\n names = ['theta %.0f, phi %.0f, rot %.0f'\n % tuple([a*180/pi for a in a3]) for a3 in alist]\n status_text = 'Rotational fit'\n\n optimize = self.optimize_fits.get()\n\n fits = optimize_fits(fpoints, fpoint_weights, mlist, dmap, names, status_text, optimize, False, task)\n corr, Mfit, i = self.make_best_fit(fits, fmap, dmap)\n\n print \" - best fit: %f\\n\" % ( corr, )\n\n\n def make_best_fit(self, fits, fmap, dmap):\n\n i = numpy.argmax([c for Mf,c,stats in fits])\n Mfit, corr, stats = fits[i] # highest correlation fit\n fmap.fit_score = corr\n fmap.M = Mfit\n\n list_fits = [(c, Mf, fmap.fit_regions, stats) for Mf,c,stats in fits]\n self.fits.extend(list_fits)\n\n move_fit_models(fmap, Mfit, dmap.openState.xform)\n\n return corr, Mfit, i\n\n\n def MoleculeMap ( self, create = True, warn = True ) :\n\n label = self.struc.get()\n sel_str = \"#\" + label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n try :\n fmol = chimera.selection.OSLSelection(sel_str).molecules()[0]\n except :\n umsg ( \"%s not open - \" % self.struc.get() ); return\n\n try :\n fmol.fitting_map.name\n return fmol.fitting_map\n except : pass\n if create:\n fmol.fitting_map = self.GenStrucMap(show = False)\n return fmol.fitting_map\n return None\n\n\n\n\n def GroupAroundReg ( self, smod, regs, target_volume, bRad=-1.0 ) :\n\n dv_rgroups = []\n maxDepthReached = 0\n\n stack = [([reg], reg.enclosed_volume(), 0) for reg in regs]\n\n status ( \"Making groups around %d regions\" % len(regs) )\n\n while len(stack) > 0 :\n\n regs, vol_at, depth_at = stack.pop()\n\n if depth_at > maxDepthReached : maxDepthReached = depth_at\n\n if depth_at >= SAF_LS_DEPTH : continue\n if vol_at >= target_volume * (1.0 + SAF_DVOL) : continue\n\n dv = abs ( vol_at - target_volume ) / target_volume\n dv_rgroups.append ( [dv, regs] )\n\n if len(dv_rgroups) % 100 == 0 :\n status ( \"Making groups around %d - %d groups\" % (reg.rid, len(dv_rgroups)) ),\n\n reg_at = regs[0]\n for cr in reg_at.contacting_regions():\n if regs.count ( cr ) != 0 : continue\n if cr.placed : continue\n vol = vol_at + cr.enclosed_volume()\n stack.insert ( 0, [ [cr]+regs, vol, depth_at+1 ] )\n\n dv_rgroups_f = self.FilterGroups ( dv_rgroups, bRad )\n\n print \" - %d groups --> %d filtered groups\" % (len(dv_rgroups), len(dv_rgroups_f))\n return [dv_rgroups_f, maxDepthReached]\n\n\n\n\n\n def FilterGroups ( self, dv_rgroups, bRad = -1.0 ) :\n\n dv_rgroups_f = []\n len_groups = {}\n inc_regs_map = {}\n\n gi, ngroups = 0, len(dv_rgroups)\n\n for dv, regs in dv_rgroups :\n\n gi = gi + 1\n if gi % 100 == 0 :\n status ( \"Filtering group %d/%d\" % (gi,ngroups) )\n\n if dv > SAF_DVOL : continue\n\n included = False\n\n regs.sort()\n inc_regs_at = inc_regs_map\n for reg in regs :\n try : included, inc_regs_at = inc_regs_at[reg]\n except : included = False; break\n\n if included : continue\n\n if bRad > 0.0 :\n regs_bRad = regions_radius(regs)\n\n brad_d = abs ( regs_bRad - bRad ) / bRad\n\n if brad_d > SAF_DBRAD : continue\n\n dv_rgroups_f.append ( [dv, regs] )\n\n inc_regs_at = inc_regs_map\n last_arr = None\n for reg in regs :\n try :\n last_arr = inc_regs_at[reg]\n inc_regs_at = last_arr[1]\n except :\n last_arr = [ False, {} ]\n inc_regs_at[reg] = last_arr\n inc_regs_at = last_arr[1]\n\n last_arr[0] = True\n\n\n return dv_rgroups_f\n\n\n\n\n def GroupAllRegions ( self, smod, target_volume, bRad=-1.0) :\n\n dv_rgroups = []\n maxDepthReached = 0\n\n print \"Grouping %d regions in %s, target volume %.2f, bounding radius %.2f\" % (\n len(smod.surfacePieces), smod.name, target_volume, bRad )\n\n ri, nregs = 0, len(smod.regions)\n\n for reg in smod.regions :\n\n ri = ri + 1\n\n dv_rgroupsR, maxDepthReachedR = self.GroupAroundReg ( smod, [reg], target_volume, bRad )\n\n dv_rgroups = dv_rgroups + dv_rgroupsR\n\n if maxDepthReachedR > maxDepthReached : maxDepthReached = maxDepthReachedR\n\n\n print \"\\n - max depth reached: %d\" % maxDepthReached\n\n print \" - filtering %d groups...\" % len( dv_rgroups )\n return self.FilterGroups ( dv_rgroups, bRad )\n\n\n\n def FitMapToSelRGroup ( self, dmap = None, task = None ) :\n\n if dmap is None:\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n if self.map_to_fit : fmap = self.map_to_fit\n else : fmap = self.MoleculeMap()\n\n thr = fmap.surface_levels[0]\n mm = fmap.data.matrix()\n mmab = numpy.where ( mm > thr, numpy.ones_like(mm), numpy.zeros_like(mm) )\n nz = numpy.shape ( numpy.nonzero ( mmab ) )[1]\n tvol = float(nz) * fmap.data.step[0] * fmap.data.step[1] * fmap.data.step[2]\n print \"%s - %d above %f, volume %.3f\" % (fmap.name, nz, thr, tvol)\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n regs = smod.selected_regions()\n if len(regs)==0 : umsg ( \"Please select a region to fit to\" ); return\n\n reportFitRegions(fmap.name, regs)\n\n for reg in regs:\n clr = reg.color\n reg.surface_piece.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n print \"\"\n\n points = numpy.concatenate ( [r.map_points()\n for r in regs], axis=0 )\n regions_vol = sum([r.enclosed_volume() for r in regs])\n\n dv = abs(regions_vol - tvol) / tvol\n print \" - regions volume: %.2f - dv %.5f\" % (regions_vol, dv)\n\n self.fits = []\n fmap.fit_regions = regs\n\n to_map = dmap\n reg_map = None\n if 1 :\n reg_map = mask_volume( regs, dmap )\n\n\n bCloseMap = False\n\n if self.useLaplace.get () :\n umsg (\"Generating Laplace version of \" + dmap.name)\n from VolumeFilter import laplacian\n to_map = laplacian ( dmap )\n bCloseMap = True\n\n elif self.mask_map_when_fitting.get() :\n to_map = reg_map\n bCloseMap = True\n if to_map is None:\n umsg ('Could not create masked map')\n return\n\n if self.rotaSearch.get () :\n self.saFitMapToPoints_byRot ( fmap, points, to_map, task )\n else :\n self.saFitMapToPoints ( fmap, points, to_map, task )\n\n self.cfits = self.ClusterFits ( self.fits )\n self.cfits.sort ( reverse=True, key=lambda x: x[0] )\n #cfits.sort()\n #cfits.reverse()\n\n try : nToAdd = int ( self.numFitsToAdd.get () )\n except : nToAdd = len (self.cfits)\n for corr, M, regions, stats in self.cfits [ 0 : nToAdd ] :\n print \" -- clustered Fit -- #fits:%d maxAngle:%.1f maxShift:%.2f maxHeight:%.2f\" % ( stats['numFits'], stats['maxAngle'], stats['maxShift'], stats['maxHeight'] )\n fmap.fit_score, fmap.M, fmap.fit_regions = corr, M, regions\n fmap.atomInclusion, fmap.bbAtomInclusion, fmap.bbClashes, fmap.hdoScore = self.FitScores ( fmap, reg_map )\n self.add_fit (fmap, dmap)\n\n move_fit_models(fmap, self.cfits[0][1], dmap.openState.xform)\n\n #umsg ( \"Cross-correlation: %.4f\\n\" % (fmap.fit_score) )\n self.ReportZScore ( self.cfits )\n\n # close masked map if it was created\n #if bCloseMap : chimera.openModels.close ( [to_map] )\n if reg_map :\n chimera.openModels.close ( [reg_map] )\n\n for m in fmap.mols :\n m.display = True\n\n\n\n\n\n def MapIndexesInMap ( self, ref_map, mask_map ) :\n\n thr = mask_map.surface_levels[0]\n mm = mask_map.data.matrix()\n mm = numpy.where ( mm > thr, mm, numpy.zeros_like(mm) )\n\n nze = numpy.nonzero ( mm )\n\n # copy is needed! transform_vertices requires contiguous array\n points = numpy.empty ( (len(nze[0]), 3), numpy.float32)\n points[:,0] = nze[2]\n points[:,1] = nze[1]\n points[:,2] = nze[0]\n\n print \"Making map indices for %s in %s\" % ( mask_map.name, ref_map.name )\n print \" - %d points above %.3f\" % ( len(points), thr )\n\n # transform to index reference frame of ref_map\n f1 = mask_map.data.ijk_to_xyz_transform\n f2 = xform_matrix ( mask_map.openState.xform )\n f3 = xform_matrix ( ref_map.openState.xform.inverse() )\n f4 = ref_map.data.xyz_to_ijk_transform\n\n tf = multiply_matrices( f2, f1 )\n tf = multiply_matrices( f3, tf )\n tf = multiply_matrices( f4, tf )\n transform_vertices ( points, tf )\n\n imap = set()\n for fi, fj, fk in points :\n for i in [ int(numpy.floor(fi)), int(numpy.ceil(fi)) ] :\n for j in [ int(numpy.floor(fj)), int(numpy.ceil(fj)) ] :\n for k in [ int(numpy.floor(fk)), int(numpy.ceil(fk)) ] :\n imap.add((i,j,k))\n\n return imap\n\n\n def ZeroMatWitMap ( self, ref_mat, ref_map, mask_map ) :\n\n thr = mask_map.surface_levels[0]\n mm = mask_map.data.matrix()\n mm = numpy.where ( mm > thr, mm, numpy.zeros_like(mm) )\n\n nze = numpy.nonzero ( mm )\n\n # copy is needed! transform_vertices requires contiguous array\n points = numpy.empty ( (len(nze[0]), 3), numpy.float32)\n points[:,0] = nze[2]\n points[:,1] = nze[1]\n points[:,2] = nze[0]\n\n print \"Making map indices for %s in %s\" % ( mask_map.name, ref_map.name )\n print \" - %d points above %.3f\" % ( len(points), thr )\n\n # transform to index reference frame of ref_map\n f1 = mask_map.data.ijk_to_xyz_transform\n f2 = xform_matrix ( mask_map.openState.xform )\n f3 = xform_matrix ( ref_map.openState.xform.inverse() )\n f4 = ref_map.data.xyz_to_ijk_transform\n\n tf = multiply_matrices( f2, f1 )\n tf = multiply_matrices( f3, tf )\n tf = multiply_matrices( f4, tf )\n transform_vertices ( points, tf )\n\n imap = set()\n for fi, fj, fk in points :\n for i in [ int(numpy.floor(fi)), int(numpy.ceil(fi)) ] :\n for j in [ int(numpy.floor(fj)), int(numpy.ceil(fj)) ] :\n for k in [ int(numpy.floor(fk)), int(numpy.ceil(fk)) ] :\n #imap.add((i,j,k))\n try :\n ref_mat[k,j,i] = 0\n except :\n pass\n\n\n\n def ZeroMatWitMol ( self, M, dmap, mol ) :\n\n import _multiscale\n points = _multiscale.get_atom_coordinates ( mol.atoms, transformed = False )\n\n # transform to index reference frame of ref_map\n f1 = xform_matrix ( mol.openState.xform )\n f2 = xform_matrix ( dmap.openState.xform.inverse() )\n f3 = dmap.data.xyz_to_ijk_transform\n\n tf = multiply_matrices( f2, f1 )\n tf = multiply_matrices( f3, tf )\n\n transform_vertices ( points, tf )\n\n print \" - \", len(points), \"points\"\n\n l = 3.0 / dmap.data.step[0]\n l2 = l*l\n print l\n\n nz = 0\n\n if 0 :\n for fi, fj, fk in points :\n for i in [ int(numpy.floor(fi-l)), int(numpy.ceil(fi+l))+1 ] :\n for j in [ int(numpy.floor(fj-l)), int(numpy.ceil(fj+l))+1 ] :\n for k in [ int(numpy.floor(fk-l)), int(numpy.ceil(fk+l))+1 ] :\n di, dj, dk = i-fi, j-fj, k-fk\n d = di*di + dj*dj + dk*dk\n if d < l2 :\n try :\n print k, j, i\n M[k,j,i] = 0\n nz += 1\n if nz > 20 :\n return M\n except :\n pass\n for fi, fj, fk in points :\n for i in range ( int(numpy.floor(fi-l)), int(numpy.ceil(fi+l))+1 ) :\n for j in range ( int(numpy.floor(fj-l)), int(numpy.ceil(fj+l))+1 ) :\n for k in range ( int(numpy.floor(fk-l)), int(numpy.ceil(fk+l))+1 ) :\n di, dj, dk = i-fi, j-fj, k-fk\n d = di*di + dj*dj + dk*dk\n if d < l2 :\n try :\n M[k,j,i] = 0\n nz += 1\n except :\n pass\n\n print nz\n return M\n\n\n\n\n\n\n def OverlappingRegions ( self, dmap, fmap, smod, hide_others = True ) :\n\n imap = self.MapIndexesInMap ( dmap, fmap )\n print 'imap', len(imap)\n\n try : fmap.COM\n except : fmap.COM = fmap.mols[0].COM; fmap.bRad = fmap.mols[0].BoundRad\n\n p = numpy.array ( [ fmap.COM ], numpy.float32 )\n transform_vertices ( p, xform_matrix( fmap.openState.xform ) )\n transform_vertices ( p, xform_matrix( dmap.openState.xform.inverse() ) )\n f_COM = chimera.Vector ( *p[0] )\n f_bRad = fmap.bRad\n print \" center\", f_COM, \"brad\", f_bRad\n\n jregs = []\n\n for r in smod.regions :\n\n if r.placed:\n continue\n\n ipoints = r.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n ov = float(noverlap) / len(ipoints)\n\n if ov > .8 :\n jregs.append ( r )\n\n try :\n if ov > r.max_ov :\n r.max_ov = ov\n r.max_ov_cid = fmap.chain_id\n r.max_ov_bioM = smod.bio_mt_at\n except :\n pass\n\n\n oregs = jregs\n\n umsg ( \"Found %d regions overlapping\" % len(oregs) )\n sel_sps = []\n\n for sp in smod.surfacePieces :\n try : clr = sp.region.color\n except : continue\n if oregs.count ( sp.region ) > 0 :\n sp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n sp.display = True\n sel_sps.append ( sp )\n else :\n sp.color = ( clr[0], clr[1], clr[2], 1.0 )\n sp.display = not hide_others\n\n return jregs\n\n\n\n\n\n def ShowOverlappingRegions ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n\n oregs = self.OverlappingRegions ( dmap, fmap, smod )\n\n return oregs\n\n\n\n\n def FitMapToGroupsAround ( self, fmap, smod, regs, dmap, bFirst = True ) :\n\n bestFitScore = -1e99\n bestFitM = None\n bestFitGroup = None\n bestFitRegions = None\n fmap.fit_score = None\n\n tvol = self.MapVolume ( fmap )\n bRad = -1.0 # fmap.mol.BoundRad / float(dmap.data.step[0]); # self.MapBoundingRad ( fmap )\n bRad = fmap.mols[0].BoundRad\n\n print \"\\nMaking groups around %d regions - target vol %.3f, b-Rad %.3f\" % ( len(regs), tvol, bRad )\n smod.rgroups, maxDepthReached = self.GroupAroundReg ( smod, regs, tvol, bRad )\n smod.rgroups.sort()\n print \" - depth reached: %d\" % maxDepthReached\n\n if len(smod.rgroups) == 0 : umsg ( \"No groups found\" ); return -1e99\n\n nsearchgrps = min ( len(smod.rgroups), SAF_LS_NGROUPS )\n print \"________________________________________________________________________\"\n umsg ( \"Fitting %s to %d/%d groups...\" % ( fmap.name, nsearchgrps, len(smod.rgroups) ) )\n print \"________________________________________________________________________\"\n\n\n self.fits = []\n\n for i, dv_regs in enumerate ( smod.rgroups[0:nsearchgrps] ) :\n\n dv, regs = dv_regs\n\n umsg ( \"Fitting to group %d/%d, dVolume %.4f, %d regions\" % (i+1, nsearchgrps, dv, len(regs) ) )\n print \" - regions:\",\n for r in regs : print r.rid,\n print \"\"\n\n fmap.fit_regions = regs\n\n points = numpy.concatenate ( [r.map_points() for r in regs], axis=0 )\n\n if self.rotaSearch.get () :\n self.saFitMapToPoints_byRot ( fmap, points, dmap, task )\n else :\n self.saFitMapToPoints ( fmap, points, dmap, task )\n\n print \"\"\n\n if fmap.fit_score > bestFitScore :\n bestFitScore = fmap.fit_score\n bestFitM = fmap.M\n bestFitRegions = regs\n\n umsg ( \"Best cross-correlation: %.4f\\n\\n\" % ( bestFitScore ) )\n\n fmap.fit_score = bestFitScore\n fmap.M = bestFitM\n fmap.fit_regions = bestFitRegions\n\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * fmap.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n self.cfits = self.ClusterFits ( self.fits )\n #cfits.sort ( reverse=True, key=lambda x: x[0] )\n self.cfits.sort()\n self.cfits.reverse()\n\n try : nToAdd = int ( self.numFitsToAdd.get () )\n except : nToAdd = len (self.cfits)\n\n for corr, M, regions, stats in self.cfits [ 0 : nToAdd ] :\n fmap.fit_score, fmap.M, fmap.fit_regions = corr, M, regions\n self.add_fit (fmap, dmap)\n # TODO - add atom inclusion comp\n\n #umsg ( \"Cross-correlation: %.4f\\n\" % (fmap.fit_score) )\n self.ReportZScore ( self.cfits )\n\n\n def FitMapToRegionsAroundSel ( self, task=None ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n regs = smod.selected_regions()\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n if timing: t0 = clock()\n self.FitMapToGroupsAround ( fmap, smod, regs, dmap )\n if timing:\n t1 = clock()\n print \"Time: %.1f sec\" % (t1-t0,)\n\n if fmap.fit_score is None:\n umsg('No groups of regions meet size requirement')\n return\n\n oregs = self.OverlappingRegions ( dmap, fmap, smod, hide_others = False )\n\n if len(oregs) == 1 :\n oregs[0].placed = True\n\n\n def FitOpenMapsToGroupsAround ( self, smod, reg, dmap, bFirst=True ) :\n\n bestFitScore = -1e99\n bestFitMap = None\n bestFitM = None\n\n fmaps = []\n\n # TODO: Don't use \"centered\" to decide what maps to fit.\n for fmap in OML() :\n try : fmap.mols[0].centered\n except : continue\n #fmap.display = False\n for mol in fmap.mols : mol.display = False\n fmaps.append ( fmap )\n\n\n for fmap in fmaps :\n\n print \"\\n****************************************************\"\n print \"Fitting: \", fmap.name\n print \"****************************************************\\n\"\n\n for mol in fmap.mols : mol.display = True\n\n self.FitMapToGroupsAround ( fmap, smod, reg, dmap, bFirst )\n\n if fmap.fit_score == None :\n print \" - no fits for map\", fmap.name\n\n elif fmap.fit_score > bestFitScore :\n bestFitScore = fmap.fit_score\n bestFitMap = fmap\n bestFitM = fmap.M\n\n for mol in fmap.mols : mol.display = False\n\n\n if bestFitM == None :\n print \"No best fit recorded, perhaps there were not groups to start with\"\n return None\n\n fmap = bestFitMap\n for mol in fmap.mols : mol.display = True\n\n print \"\\n**********************************************************\"\n print \"Best fit score was %.4f for %s\" % (bestFitScore, fmap.name)\n print \"**********************************************************\\n\"\n fmap.M = bestFitM\n\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * fmap.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n oregs = self.OverlappingRegions ( dmap, fmap, smod, hide_others = False )\n\n if oregs.count ( reg ) == 0 :\n print \"Overlapping regions not inclusive\"\n return False\n\n\n if len(oregs) > 1 :\n jreg = smod.join_regions ( oregs )\n jreg.placed = True\n self.ReportRegionCount(smod)\n\n jsp = jreg.surface_piece\n clr = jreg.color\n jsp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n jsp.display = False\n\n elif len(oregs) == 1 :\n oregs[0].placed = True\n oregs[0].surface_piece.display = False\n\n else :\n for mol in fmap.mols : mol.display = False\n return False\n\n for mol in fmap.mols : mol.display = False\n\n self.add_fit(fmap, dmap)\n\n return True\n\n\n\n def FitMapsToRGroups ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for sp in smod.surfacePieces :\n clr = sp.region.color\n sp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n #sp.display = False\n\n\n if timing: t0 = clock()\n\n while 1 :\n\n sp = None\n for spi in smod.surfacePieces :\n\n try :\n spi.region.placed.display = False\n\n clr = spi.region.color\n spi.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n spi.display = True\n spi.region.placed.display = True\n spi.display = False\n\n except :\n pass\n\n if spi.region.placed == False :\n sp = spi\n break\n\n if sp == None :\n print \"\\nAll regions have maps placed\"\n break\n\n clr = sp.region.color\n sp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n sp.display = True\n\n if self.FitOpenMapsToGroupsAround ( smod, sp.region, dmap, False ) == False :\n print \"___ No map fit for region! ___\", sp.region.rid, sp.region.enclosed_volume()\n sp.failed_fit = True\n\n sp.region.placed = True\n\n\n if timing:\n t1 = clock()\n print \"Time: %.1f sec\" % (t1-t0)\n\n for sp in smod.surfacePieces :\n try : sp.failed_fit\n except : continue\n print \"Region %d failed fit and still in model\" % sp.region.rid\n\n\n\n def MapVolume ( self, fmap ) :\n\n thr = fmap.surface_levels[0]\n mm = fmap.data.matrix()\n mmab = numpy.where ( mm > thr, numpy.ones_like(mm), numpy.zeros_like(mm) )\n nz = numpy.shape ( numpy.nonzero ( mmab ) )[1]\n vvol = fmap.data.step[0] * fmap.data.step[1] * fmap.data.step[2]\n tvol = vvol * float(nz)\n print \"%s - %d above %f, VOLUME %.3f\" % (fmap.name, nz, thr, tvol)\n return tvol\n\n\n def Scores ( self ) :\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n self.FitScores ( fmap )\n\n\n def SMS ( self ) :\n\n dmap = segmentation_map()\n if dmap == None :\n print \"No segmentation map\";\n return\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n sel_str = \"#%d@C,N,CA\" % fmap.mols[0].id\n sel = chimera.selection.OSLSelection (sel_str)\n backbone_atoms = sel.atoms()\n\n sms = ShapeMatchScore ( backbone_atoms, dmap )\n\n\n\n def VisiScores ( self ) :\n\n print \"Visi scores...\"\n dmap = segmentation_map()\n if dmap == None :\n return\n\n print \" - in map: \" + dmap.name\n\n molmap = None\n mols = []\n\n for m in chimera.openModels.list() :\n if m.display == False :\n continue\n if type(m) == chimera.Molecule :\n print \" - mol: \", m.name\n mols.append ( m )\n if type(m) == VolumeViewer.Volume :\n print \" - map: \", m.name\n molmap = m\n\n if molmap != None :\n fpoints, fpoint_weights = fit_points(molmap, True)\n map_values = dmap.interpolated_values ( fpoints, molmap.openState.xform )\n olap, corr = overlap_and_correlation ( fpoint_weights, map_values )\n print \" - Overlap: %f, Cross-correlation: %f\" % (olap, corr)\n\n\n\n otherPoints = None\n otherAtoms = []\n\n\n for m in mols :\n otherAtoms = otherAtoms + m.atoms\n mpoints = get_atom_coordinates ( m.atoms, transformed = True )\n if otherPoints == None : otherPoints = mpoints\n else : otherPoints = numpy.concatenate ( [otherPoints, mpoints], axis=0 )\n\n # print \"Doing tree with %d %d\" % ( len(otherPoints), len(otherAtoms) )\n\n print \" - making tree, %d atoms\" % len(otherAtoms)\n\n from CGLutil.AdaptiveTree import AdaptiveTree\n searchTreeAll = AdaptiveTree (otherPoints.tolist(), otherAtoms, 4.0)\n\n print \" - checking clashes, %d atoms\" % len(otherAtoms)\n\n numClash = 0.0\n for at in otherAtoms :\n nearby = searchTreeAll.searchTree ( at.xformCoord().data(), 3.0 )\n for nb in nearby :\n if nb.molecule != at.molecule :\n numClash = numClash + 1.0\n break\n\n bbClashes = numClash / float ( len(otherAtoms) )\n\n print \" - clashes: %.0f/%.0f = %0.3f clash-free\" % (numClash, len(otherAtoms), 1.0-bbClashes);\n\n\n\n\n\n\n def FitScores ( self, fmap, regionMap = None ) :\n\n dmap = segmentation_map()\n if dmap == None :\n print \"No segmentation map\";\n return [0.0, 0.0, 0.0, 0.0]\n\n print \"Fit scores for\", fmap.name, \"in\", dmap.name\n\n import numpy\n import _contour\n\n # move fmap (and structures) to fmap.M if it's there\n # (it's not there if we do scores for a selected structure\n # that hasn't been fit yet)\n try : fmap.M; move = True\n except : move = False\n\n if move :\n tf = numpy.array(fmap.M)\n xf = dmap.openState.xform\n xf.multiply(chimera_xform(tf))\n fmap.openState.xform = xf\n for mol in fmap.mols :\n mol.openState.xform = xf\n\n\n # ---------------------------------------------------------------\n # Cross-correlation\n # ---------------------------------------------------------------\n fpoints, fpoint_weights = fit_points(fmap)\n map_values = dmap.interpolated_values ( fpoints, fmap.openState.xform )\n olap, corr = overlap_and_correlation ( fpoint_weights, map_values )\n print \" - Overlap: %f, Cross-correlation: %f\" % (olap, corr)\n\n\n # ---------------------------------------------------------------\n # By-residue cross-correlation\n # ---------------------------------------------------------------\n if 0 :\n cc_by_residue ( fmap, dmap, 16 )\n\n\n # ---------------------------------------------------------------\n # Atom inclusion -- all atoms\n # ---------------------------------------------------------------\n backbone_atoms = []\n allIncl = 0.0\n bbIncl = 0.0\n bbClashes = 0.0\n\n all_atoms = []\n for mol in fmap.mols : all_atoms = all_atoms + mol.atoms\n numAllAtoms = float ( len(all_atoms) )\n\n if len(all_atoms) == 0 :\n return [0.0, 0.0, 0.0, 0.0]\n\n #dmapXfInv = xform_matrix( dmap.openState.xform.inverse() )\n #transform_vertices( points, dmapXfInv )\n points = get_atom_coordinates ( all_atoms, transformed = True )\n dvals = dmap.interpolated_values ( points, chimera.Xform() )\n min_d = dmap.surface_levels[0]\n dvals = numpy.where ( dvals > min_d, dvals, numpy.zeros_like(dvals) )\n nze = numpy.nonzero ( dvals )\n allIn = float(len(nze[0]))\n allIncl = allIn / numAllAtoms\n\n print \" - Atom inclusion: %.0f/%.0f = %.3f\" % ( allIn, numAllAtoms, allIncl )\n\n # ---------------------------------------------------------------\n # Atom inclusion -- backbone atoms\n # ---------------------------------------------------------------\n bbIncl = 0.0\n\n sel_str = \"#%d@C,N,CA\" % fmap.mols[0].id\n sel = chimera.selection.OSLSelection (sel_str)\n backbone_atoms = sel.atoms()\n\n if len(backbone_atoms) > 0 :\n\n points = get_atom_coordinates ( backbone_atoms, transformed = True )\n #dmapXfInv = xform_matrix( dmap.openState.xform.inverse() )\n #transform_vertices( points, dmapXfInv )\n dvals = dmap.interpolated_values ( points, chimera.Xform() )\n min_d = dmap.surface_levels[0]\n dvals = numpy.where ( dvals > min_d, dvals, numpy.zeros_like(dvals) )\n nze = numpy.nonzero ( dvals )\n bbIn = float(len(nze[0]))\n numBBAtoms = float(len(backbone_atoms))\n bbIncl = bbIn / numBBAtoms\n print \" - BB Atom inclusion: %.0f/%.0f = %.3f\" % (bbIn, numBBAtoms, bbIncl );\n\n\n if 0 :\n sms = ShapeMatchScore ( backbone_atoms, dmap )\n\n\n # ---------------------------------------------------------------\n # Coverage of high-density areas - Density Occupancy\n # ---------------------------------------------------------------\n\n hdo = 0.0\n if regionMap :\n #fpoints, fpoint_weights = fit_points ( regionMap )\n #nz_fpoints = len(fpoints)\n\n nz_fpoints = len ( numpy.nonzero ( regionMap.data.full_matrix() )[0] )\n\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( regionMap.openState.xform.inverse() ) )\n s = regionMap.data.step[0]\n mdata = VolumeData.zone_masked_grid_data ( regionMap.data, points, numpy.sqrt(3*s*s) )\n #gv = VolumeViewer.volume.volume_from_grid_data ( mdata )\n #gv.openState.xform = dmap.openState.xform\n #gv.name = \"Masked\"\n\n mat = mdata.full_matrix()\n nz_mdata = len ( numpy.nonzero ( mat )[0] )\n if nz_fpoints > 0 :\n hdo = float (nz_mdata) / float(nz_fpoints)\n print \" - Density Occupancy: %d / %d grid points above %.3f occupied (%.4f)\" % (\n nz_mdata, nz_fpoints, dmap.surface_levels[0], hdo )\n else :\n print \" - not computing density occupancy\"\n\n\n # ---------------------------------------------------------------\n # Clashes with symmetric copies\n # ---------------------------------------------------------------\n if self.calcSymmetryClashes.get() :\n\n symMols = self.PlaceSym ()\n if symMols:\n\n otherPoints = None\n otherAtoms = []\n\n for m in symMols :\n otherAtoms = otherAtoms + m.atoms\n mpoints = get_atom_coordinates ( m.atoms, transformed = True )\n if otherPoints == None : otherPoints = mpoints\n else : otherPoints = numpy.concatenate ( [otherPoints, mpoints], axis=0 )\n\n # print \"Doing tree with %d %d\" % ( len(otherPoints), len(otherAtoms) )\n\n from CGLutil.AdaptiveTree import AdaptiveTree\n searchTreeAll = AdaptiveTree (otherPoints.tolist(), otherAtoms, 4.0)\n\n #if len ( backbone_atoms ) == 0 :\n # sel_str = \"#%d@C,N,CA\" % fmap.mols[0].id\n # sel = chimera.selection.OSLSelection (sel_str)\n # backbone_atoms = sel.atoms()\n\n numClash = 0.0\n for at in all_atoms :\n nearby = searchTreeAll.searchTree ( at.xformCoord().data(), 3.0 )\n if len(nearby) > 0 :\n numClash = numClash + 1.0\n\n bbClashes = numClash / numAllAtoms\n\n chimera.openModels.close ( symMols )\n\n print \" - Clashes with symmetric copies: %.0f/%.0f = %0.3f\" % (numClash, numAllAtoms, bbClashes);\n\n print fmap.name, corr, allIncl, bbClashes, hdo\n\n #for i in range ( 1 ) :\n # print (frame_at+i),\n # chimera.printer.saveImage ( \"./frames/%06d.png\" % (frame_at + i) )\n #print \"\"\n\n return [allIncl, bbIncl, bbClashes, hdo]\n\n\n def FitMapToRGroups ( self, task=None ) :\n\n print \"_______________________________________________________________\"\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n tvol = self.MapVolume ( fmap )\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n print \"---\"\n\n if timing: t0 = clock()\n\n bRad = fmap.mols[0].BoundRad\n\n smod.rgroups = self.GroupAllRegions ( smod, tvol, bRad )\n smod.rgroups.sort()\n\n bestFitScore = -1e99\n bestFitM = None\n\n print \"Got %d groups...\" % (len(smod.rgroups) )\n\n dmap_name = os.path.splitext ( dmap.name )[0]\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n nsearchgrps = min ( MAX_NUM_GROUPS, len(smod.rgroups) )\n if nsearchgrps == 0:\n umsg('No groups of regions meet size requirement')\n return\n\n self.fits = []\n\n for i, dv_regs in enumerate ( smod.rgroups [0:nsearchgrps] ) :\n\n dv, regs = dv_regs\n\n umsg ( \"Fitting to group %d/%d, dVolume %.4f, %d regions\" % (i+1, nsearchgrps, dv, len(regs) ) )\n print \" - regions:\",\n for r in regs : print r.rid,\n print \"\"\n\n for sp in smod.surfacePieces :\n if regs.count ( sp.region ) > 0 :\n sp.display = True\n clr = sp.region.color\n sp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n\n else : sp.display = False\n\n fmap.fit_regions = regs\n\n # TODO: points need to be in dmap coordinates.\n points = numpy.concatenate ( [r.map_points()\n for r in regs], axis=0 )\n if self.rotaSearch.get () :\n self.saFitMapToPoints_byRot ( fmap, points, dmap )\n else :\n self.saFitMapToPoints ( fmap, points, dmap )\n\n if fmap.fit_score > bestFitScore :\n bestFitScore = fmap.fit_score\n bestFitM = fmap.M\n bestFitRegs = regs\n\n umsg ( \"Best cross-correlation: %.4f\\n\\n\" % ( bestFitScore ) )\n\n fmap.fit_score = bestFitScore\n fmap.M = bestFitM\n\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * fmap.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n if timing:\n t1 = clock()\n print \"Time: %.1f sec\" % (t1-t0)\n\n oregs = self.OverlappingRegions ( dmap, fmap, smod, hide_others = False )\n\n self.cfits = self.ClusterFits ( self.fits )\n self.cfits.sort ( reverse=True, key=lambda x: x[0] )\n #cfits.sort()\n #cfits.reverse()\n\n try : nToAdd = int ( self.numFitsToAdd.get () )\n except : nToAdd = len (self.cfits)\n\n for corr, M, regions, stats in self.cfits [ 0 : nToAdd ] :\n fmap.fit_score, fmap.M, fmap.fit_regions = corr, M, regions\n self.add_fit (fmap, dmap)\n # TODO - add atom inclusion comp\n\n #umsg ( \"Cross-correlation: %.4f\\n\" % (fmap.fit_score) )\n self.ReportZScore ( self.cfits )\n\n\n\n\n def GetMapFromMolRes ( self, mol, cid, rStart, rEnd ) :\n\n sel_str = \"#%d:%d-%d.%s\" % (mol.id, rStart, rEnd, cid)\n print \"[%s]\" % (sel_str),\n\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = \"_\" + sel_str\n break\n\n if mv == None :\n umsg (\" - error - could not find chain map\")\n\n return mv\n\n\n\n def GetMapFromMolRanges ( self, mol, cid, ranges ) :\n\n sel_str = \"#%d:%s\" % (mol.id, ranges)\n print \"[%s]\" % (sel_str),\n\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = \"_\" + sel_str\n break\n\n if mv == None :\n umsg (\" - error - could not find chain map\")\n\n return mv\n\n\n\n def GroupRegionsBySS ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n #fmap = self.MoleculeMap()\n #if fmap == None : return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n print \"---\"\n\n #mol = fmap.mols[0]\n #print mol.name\n\n #chain_colors = RandColorChains ( mol )\n\n chain_maps = []\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f _________________________________\" % (res, grid)\n\n\n for mol in chimera.openModels.list() :\n\n if type(mol) != chimera.Molecule or mol.display == False : continue\n\n basename = os.path.splitext ( mol.name )[0]\n #chain_colors = RandColorChains ( mol )\n\n\n chainsRes = {}\n for res in mol.residues :\n try :\n chainsRes[res.id.chainId].append ( res )\n except :\n chainsRes[res.id.chainId] = [res]\n\n chainsList = chainsRes.keys()\n chainsList.sort()\n\n\n for chainId in chainsList :\n\n residues = chainsRes[chainId]\n\n print \" - chain \" + chainId + \", %d \" % len(residues) + \" residues\"\n\n ss, rStart = \"\", 0\n rI = 0\n\n oRanges = \"\"\n\n while 1 :\n res = residues[rI]\n\n if rStart == 0 :\n print \" - at first res %d \" % rI + \", pos: %d \" % res.id.position,\n\n rStart = res.id.position\n if res.isHelix :\n print \" - H\"\n ss = \"H\"\n else :\n print \"\"\n ss = \"\"\n\n else :\n #print \" - at res %d \" % rI + \", pos: %d \" % res.id.position,\n\n if res.isHelix :\n #print \" - H \"\n if ss != \"H\" :\n print \" - _->H - at res %d \" % rI + \", pos: %d \" % res.id.position\n #mv = self.GetMapFromMolRes ( mol, chainId, rStart, res.id.position-1 )\n #chain_maps.append ( [mv, self.MapIndexesInMap ( dmap, mv )] )\n #mv.chain_id = basename + \"_\" + chainId + \"_H%d\" % rStart\n if len(oRanges) > 0 : oRanges = oRanges + \",\"\n oRanges = oRanges + \"%d-%d.%s\" % (rStart, res.id.position-1,chainId)\n rStart = res.id.position\n ss = \"H\"\n else :\n #print \"\"\n if ss == \"H\" :\n print \" - H->_ - at res %d \" % rI + \", pos: %d \" % res.id.position\n mv = self.GetMapFromMolRes ( mol, chainId, rStart, res.id.position-1 )\n chain_maps.append ( [mv, self.MapIndexesInMap ( dmap, mv )] )\n mv.chain_id = basename + \"_\" + chainId + \"_%d\" % rStart\n rStart = res.id.position\n ss = \"\"\n\n rI += 1\n if rI >= len(residues) :\n print \" - done chain \" + chainId + \" - at res %d \" % rI + \", pos: %d \" % res.id.position,\n\n if res.isHelix :\n print \" - H \"\n mv = self.GetMapFromMolRes ( mol, chainId, rStart, res.id.position )\n chain_maps.append ( [mv, self.MapIndexesInMap ( dmap, mv )] )\n mv.chain_id = basename + \"_\" + chainId + \"_\" + ss + \"%d\" % rStart\n else :\n print \"\"\n if len(oRanges) > 0 : oRanges = oRanges + \",\"\n oRanges = oRanges + \"%d-%d.%s\" % (rStart, res.id.position, chainId)\n\n\n mv = self.GetMapFromMolRanges ( mol, chainId, oRanges )\n chain_maps.append ( [mv, self.MapIndexesInMap ( dmap, mv )] )\n mv.chain_id = basename + \"_\" + chainId\n\n break;\n\n\n #break\n\n # print chain_maps\n\n rgroups = {}\n\n print \" - %d regions\" % len(smod.regions)\n\n for ri, reg in enumerate ( smod.regions ) :\n\n if ri % 100 == 0 :\n print \" %d/%d \" % (ri+1, len(smod.regions) )\n\n max_ov = 0.0\n max_ov_chm = None\n for chmImap in chain_maps :\n chm, imap = chmImap\n ipoints = reg.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n #print \" - \", chm.name, noverlap\n\n ov = float(noverlap) / reg.point_count()\n if ov > max_ov :\n max_ov = ov\n max_ov_chm = chm\n\n if max_ov_chm :\n try : rgroups[max_ov_chm.chain_id]\n except : rgroups[max_ov_chm.chain_id] = []\n rgroups[max_ov_chm.chain_id].append ( reg )\n\n\n import regions\n\n for chid, regs in rgroups.iteritems () :\n print \"Chain %s - %d regions\" % (chid, len(regs))\n\n jregs = regions.TopParentRegions(regs)\n jreg = smod.join_regions ( jregs )\n jreg.make_surface(None, None, smod.regions_scale)\n\n\n for chmImap in chain_maps :\n chimera.openModels.close ( chmImap )\n\n\n\n def GroupRegionsByChains ( self ) :\n\n dmap = segmentation_map()\n if dmap == None :\n umsg ( \"Please choose map in Segment Dialog\" )\n return\n\n #fmap = self.MoleculeMap()\n #if fmap == None : return\n\n smod = self.CurrentSegmentation()\n if smod is None :\n umsg ( \"Please select a segmentation in Segment Dialog\" )\n return\n\n print \"---\"\n\n #mol = fmap.mols[0]\n #print mol.name\n\n #chain_colors = RandColorChains ( mol )\n\n\n umsg ( \"Grouping with chains... making chain maps...\" )\n\n\n chain_maps = []\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f _________________________________\" % (res, grid)\n\n\n mols = []\n\n for mol in chimera.openModels.list() :\n\n if type(mol) != chimera.Molecule or mol.display == False :\n continue\n\n mols.append ( mol )\n\n nchains = 0\n from random import random as rand\n\n mol_ch_colors = {}\n\n for i, mol in enumerate (mols) :\n\n chain_colors = {} # RandColorChains ( mol )\n for r in mol.residues:\n if hasattr ( r, 'ribbonColor' ) and r.ribbonColor != None :\n chain_colors[r.id.chainId] = r.ribbonColor.rgba()\n mol_ch_colors[mol.name + \"_\" + r.id.chainId] = r.ribbonColor.rgba()\n else :\n if not r.id.chainId in chain_colors :\n clr = ( rand()*.7, rand()*.7, rand()*.7, 1.0 )\n chain_colors[r.id.chainId] = clr\n mol_ch_colors[mol.name + \"_\" + r.id.chainId] = clr\n\n\n ci = 1\n\n for cid, clr in chain_colors.iteritems() :\n\n umsg ( \"Grouping with chains... making map for chain %d/%d of mol %d/%d\" % (ci,len(chain_colors),i+1,len(mols)) )\n ci += 1\n nchains += 1\n\n basename = os.path.splitext ( mol.name )[0]\n #cname = basename + \"_\" + cid\n cname = basename + \"_\" + cid\n #cname = basename.split (\"__\")[-1]\n\n sel_str = \"#%d:.%s\" % (mol.id, cid)\n print \"%s [%s]\" % (cname, sel_str),\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n chimera.runCommand ( cmd )\n\n if cid.lower() == cid :\n cid = \"_\" + cid\n\n cname = basename + \"_\" + cid\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = cname\n break\n\n if mv == None :\n umsg (\" - error - could not find chain map\")\n return\n\n imap = self.MapIndexesInMap ( dmap, mv )\n\n chain_maps.append ( [mv, imap] )\n mv.mol_name = mol.name\n mv.chain_id = cid # cname\n\n #break\n\n # print chain_maps\n\n rgroups = {}\n\n print \" - %d regions\" % len(smod.regions)\n\n for ri, reg in enumerate ( smod.regions ) :\n\n if ri % 100 == 0 :\n umsg ( \"Grouping regions... %d/%d \" % (ri+1, len(smod.regions) ) )\n\n max_ov = 0.0\n max_ov_chm = None\n for chmImap in chain_maps :\n chm, imap = chmImap\n ipoints = reg.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n #print \" - \", chm.name, noverlap\n\n ov = float(noverlap) / reg.point_count()\n if ov > max_ov :\n max_ov = ov\n max_ov_chm = chm\n\n if max_ov_chm :\n if not max_ov_chm in rgroups : # max_ov_chm.chain_id?\n rgroups[max_ov_chm] = []\n rgroups[max_ov_chm].append ( reg )\n\n\n import regions\n from Segger.extract_region_dialog import dialog as exdialog\n\n base = \"\"\n if exdialog() != None :\n base = exdialog().saveMapsBaseName.get()\n\n for mv, regs in rgroups.iteritems () :\n\n #cid = chid.split(\"_\")[-1]\n\n print \"%s.%s - %d regions\" % (mv.mol_name, mv.chain_id, len(regs))\n\n jregs = regions.TopParentRegions(regs)\n jreg = smod.join_regions ( jregs )\n jreg.color = (.7,.7,.7,1)\n mvId = mv.mol_name + \"_\" + mv.chain_id\n if mvId in mol_ch_colors :\n jreg.color = mol_ch_colors[mvId]\n jreg.make_surface(None, None, smod.regions_scale)\n jreg.chain_id = mv.chain_id\n\n #jreg.chain_id = chid\n\n if 0 and exdialog() != None :\n exdialog().saveMapsBaseName.set( base % cid )\n exdialog().Extract2 ( dmap, dmap, smod, [jreg] )\n\n\n for chmImap in chain_maps :\n chimera.openModels.close ( chmImap )\n\n\n umsg ( \"Done - total %d chains in %d visible Molecules\" % (nchains,len(mols)) )\n\n\n def MaskWithSel ( self ) :\n\n selats = chimera.selection.currentAtoms()\n print \"%d selected atoms\" % len(selats)\n\n dmap = None\n for m in chimera.openModels.list() :\n if m.display == True and type(m) == VolumeViewer.volume.Volume :\n dmap = m\n break\n\n if dmap == None :\n return\n\n print \"map: %s\" % dmap.name\n\n\n import _multiscale\n points = _multiscale.get_atom_coordinates ( selats, transformed = True )\n\n import _contour\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n\n s = dmap.data.step[0]\n s2 = numpy.sqrt ( s*s + s*s + s*s )\n mdata = VolumeData.zone_masked_grid_data ( dmap.data, points, numpy.sqrt(s2) )\n\n from VolumeFilter import gaussian\n gvm = gaussian.gaussian_convolution ( mdata.full_matrix(), (.1,.1,.1) )\n #gvm = gvol.full_matrix()\n\n gdata = VolumeData.Array_Grid_Data ( gvm, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name = dmap.name + \"_m\" )\n nvg = VolumeViewer.volume.volume_from_grid_data ( gdata )\n nvg.name = dmap.name + \"___\"\n\n\n\n\n\n def GroupRegionsByMols ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n #fmap = self.MoleculeMap()\n #if fmap == None : return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n print \"---\"\n\n #mol = fmap.mols[0]\n #print mol.name\n\n #chain_colors = RandColorChains ( mol )\n\n chain_maps = []\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f ______________\" % (res, grid)\n\n\n for mol in chimera.openModels.list() :\n\n if type(mol) != chimera.Molecule or mol.display == False : continue\n\n chain_colors = RandColorChains ( mol )\n\n basename = os.path.splitext ( mol.name )[0]\n cname = basename\n sel_str = \"#%d\" % (mol.id)\n print \"%s [%s]\" % (mol.name, sel_str),\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = cname\n break\n\n if mv == None :\n umsg (\" - error - could not find chain map\")\n return\n\n imap = self.MapIndexesInMap ( dmap, mv )\n\n chain_maps.append ( [mv, imap] )\n mv.chain_id = cname\n\n #break\n\n # print chain_maps\n\n rgroups = {}\n\n print \" - %d regions\" % len(smod.regions)\n\n for ri, reg in enumerate ( smod.regions ) :\n\n if ri % 100 == 0 :\n status ( \" %d/%d \" % (ri+1, len(smod.regions) ) )\n\n max_ov = 0.0\n max_ov_chm = None\n for chmImap in chain_maps :\n chm, imap = chmImap\n ipoints = reg.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n #print \" - \", chm.name, noverlap\n\n ov = float(noverlap) / reg.point_count()\n if ov > 0.8 and ov > max_ov :\n max_ov = ov\n max_ov_chm = chm\n\n if max_ov_chm :\n try : rgroups[max_ov_chm.chain_id]\n except : rgroups[max_ov_chm.chain_id] = []\n rgroups[max_ov_chm.chain_id].append ( reg )\n\n\n import regions\n\n for chid, regs in rgroups.iteritems () :\n print \"Chain %s - %d regions\" % (chid, len(regs))\n\n jregs = regions.TopParentRegions(regs)\n jreg = smod.join_regions ( jregs )\n jreg.make_surface(None, None, smod.regions_scale)\n\n\n for chmImap in chain_maps :\n chimera.openModels.close ( chmImap )\n\n\n\n def GroupRegionsByFittedMols ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n #fmap = self.MoleculeMap()\n #if fmap == None : return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n print \"---\"\n\n #mol = fmap.mols[0]\n #print mol.name\n\n #chain_colors = RandColorChains ( mol )\n\n chain_maps = []\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f _________________________________\" % (res, grid)\n\n\n #lfits = self.selected_listbox_fits()\n\n lfits = self.list_fits\n\n if len(lfits) == 0:\n umsg ( \"No selected fitted molecules\" )\n return\n\n umsg('Looking at %d fitted molecules' % len(lfits))\n\n fit_i = 1\n for fmap, dmap, mat, corr, aI, bI, bC, bO, regions in lfits:\n\n for mol in fmap.mols :\n if mol.__destroyed__:\n umsg('Fit molecule was closed - ')\n return\n\n self.place_molecule(fmap, mat, dmap)\n\n mol = fmap.mols[0]\n\n basename = os.path.splitext ( mol.name )[0]\n cname = basename\n sel_str = \"#%d\" % (mol.id)\n print \"%s [%s]\" % (mol.name, sel_str),\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = cname\n break\n\n if mv == None :\n umsg (\" - error - could not find chain map\")\n return\n\n imap = self.MapIndexesInMap ( dmap, mv )\n\n chain_maps.append ( [fit_i, imap] )\n #mv.chain_id = cname\n fit_i += 1\n chimera.openModels.close ( mv )\n\n #break\n\n # print chain_maps\n\n rgroups = {}\n\n print \" - %d regions\" % len(smod.regions)\n\n for ri, reg in enumerate ( smod.regions ) :\n\n if ri % 1000 == 0 :\n #print \" %d/%d \" % (ri, len(smod.regions) )\n status ( \" %d/%d \" % (ri, len(smod.regions) ) )\n\n max_ov = 0.1\n max_ov_chm = 0\n for chmImap in chain_maps :\n fit_i, imap = chmImap\n ipoints = reg.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n\n ov = float(noverlap) / reg.point_count()\n\n #print \" - fit %d to reg %d, num ov %d, ov %.2f \" % (fit_i, ri, noverlap, ov)\n\n if ov > max_ov :\n max_ov = ov\n max_ov_chm = fit_i\n\n if max_ov_chm > 0 :\n try : rgroups[max_ov_chm]\n except : rgroups[max_ov_chm] = []\n rgroups[max_ov_chm].append ( reg )\n\n\n import regions\n\n print len( rgroups.keys() ), \"groups\"\n\n sregs = []\n\n for chid, regs in rgroups.iteritems () :\n print \"Fit %d - %d regions\" % (chid, len(regs))\n\n jregs = regions.TopParentRegions(regs)\n jreg = smod.join_regions ( jregs )\n sregs.append ( jreg )\n jreg.make_surface(None, None, smod.regions_scale)\n\n\n return\n\n #sel_regs = set ( smod.selected_regions() )\n surfs = [r.surface_piece for r in sregs\n if not r in sel_regs and r.surface_piece]\n\n chimera.selection.clearCurrent ()\n chimera.selection.addCurrent ( surfs )\n\n smod.remove_regions ( regs, remove_children = True )\n\n\n\n\n def GroupRegionsByVisiMaps ( self ) :\n\n umsg ( \"Grouping by visible maps...\" )\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n #fmap = self.MoleculeMap()\n #if fmap == None : return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n print \"---\"\n\n #mol = fmap.mols[0]\n #print mol.name\n\n #chain_colors = RandColorChains ( mol )\n\n chain_maps = []\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f _________________________________\" % (res, grid)\n\n\n for mmap in chimera.openModels.list() :\n\n if type(mmap) != VolumeViewer.volume.Volume or mmap.display == False : continue\n\n print \" -- map: \", mmap.name\n\n imap = self.MapIndexesInMap ( dmap, mmap )\n chain_maps.append ( [mmap, imap] )\n mmap.chain_id = mmap.name\n\n #break\n\n # print chain_maps\n\n rgroups = {}\n\n print \" - %d regions\" % len(smod.regions)\n\n for ri, reg in enumerate ( smod.regions ) :\n\n if ri % 100 == 0 :\n status ( \" %d/%d \" % (ri+1, len(smod.regions) ) )\n print \",\",\n\n max_ov = 0.0\n max_ov_chm = None\n for chmImap in chain_maps :\n chm, imap = chmImap\n ipoints = reg.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n #print \" - \", chm.name, noverlap\n\n ov = float(noverlap) / reg.point_count()\n if ov > max_ov :\n max_ov = ov\n max_ov_chm = chm\n\n if max_ov_chm :\n try : rgroups[max_ov_chm.chain_id]\n except : rgroups[max_ov_chm.chain_id] = []\n rgroups[max_ov_chm.chain_id].append ( reg )\n\n\n import regions\n print \".\"\n\n for chid, regs in rgroups.iteritems () :\n print \"Chain %s - %d regions\" % (chid, len(regs))\n\n jregs = regions.TopParentRegions(regs)\n jreg = smod.join_regions ( jregs )\n jreg.make_surface(None, None, smod.regions_scale)\n\n umsg ( \"Done grouping by visible maps\" )\n\n\n #for chmImap in chain_maps :\n # chimera.openModels.close ( chmImap )\n\n\n\n\t# -----------------------------------------------------------------------------------------------------\n\n\n def ZeroMapBySel ( self ) :\n\n print \"0\"\n\n\n def ZeroMapByMols ( self ) :\n\n print \"0\"\n\n\n\n def ZeroMapFittedMols ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n vmat = dmap.full_matrix().copy()\n\n print \"---\"\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f _________________________________\" % (res, grid)\n\n\n #lfits = self.selected_listbox_fits()\n\n lfits = self.list_fits\n\n if len(lfits) == 0:\n umsg ( \"No selected fitted molecules\" )\n return\n\n umsg('Looking at %d fitted molecules' % len(lfits))\n\n\n for fmap, dmap, mat, corr, aI, bI, bC, bO, regions in lfits:\n\n for mol in fmap.mols :\n if mol.__destroyed__:\n umsg('Fit molecule was closed - ')\n return\n\n self.place_molecule(fmap, mat, dmap)\n\n mol = fmap.mols[0]\n\n basename = os.path.splitext ( mol.name )[0]\n cname = basename\n sel_str = \"#%d\" % (mol.id)\n print \"%s [%s]\" % (mol.name, sel_str),\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = cname\n break\n\n if mv == None :\n\t\t\t\tumsg (\" - error - could not find chain map\")\n\t\t\t\treturn\n\n self.ZeroMatWitMap ( vmat, dmap, mv )\n chimera.openModels.close ( mv )\n\n #break\n\n # print chain_maps\n\n nname = os.path.splitext(dmap.name)[0] + \"_zeroed\"\n\n from VolumeData import Array_Grid_Data\n mgrid = Array_Grid_Data ( vmat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name=nname)\n import VolumeViewer\n nv = VolumeViewer.volume_from_grid_data ( mgrid, show_data = False, show_dialog = False )\n nv.name = nname\n #nv.copy_settings_from(volume)\n nv.show()\n\n\n\n def ZeroMapVisMols ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n vmat = dmap.full_matrix().copy()\n\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule and m.display == True :\n print m.name\n vmat = self.ZeroMatWitMol ( vmat, dmap, m )\n\n\n nname = os.path.splitext(dmap.name)[0] + \"_zeroed\"\n\n from VolumeData import Array_Grid_Data\n mgrid = Array_Grid_Data ( vmat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name=nname)\n\n import VolumeViewer\n #nv = VolumeViewer.volume_from_grid_data ( mgrid, show_data = False, show_dialog = False )\n\n try : df_v = VolumeViewer.volume.add_data_set ( mgrid, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( mgrid )\n\n df_v.name = nname\n #nv.copy_settings_from(volume)\n df_v.show()\n\n\n\n\n def ValuesInMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n dmat = dmap.full_matrix().copy()\n\n chain_maps = []\n\n res = float ( self.simRes.get() )\n grid = float ( self.simGridSp.get() )\n\n print \"_____________ res %2f _______ grid %.2f _________________________________\" % (res, grid)\n\n\n for mmap in chimera.openModels.list() :\n\n if type(mmap) != VolumeViewer.volume.Volume or mmap.display == False : continue\n\n print \" -- map: \", mmap.name\n\n imap = self.MapIndexesInMap ( dmap, mmap )\n chain_maps.append ( [mmap, imap] )\n mmap.chain_id = mmap.name\n\n #break\n\n sumd = 0\n n = 0\n imap = set()\n values = []\n\n for cm, points in chain_maps :\n print \" --- at map --- : \" + cm.name\n #n += len(points)\n for fi, fj, fk in points :\n val = dmat[fk,fj,fi]\n if val < 17 :\n sumd += dmat[fk,fj,fi]\n values.append ( dmat[fk,fj,fi] )\n #print dmat[fk,fj,fi]\n n += 1.0\n\n avg = sumd / len(points)\n\n print \" Average value: %.3f\"%avg + \" at %d\"%len(points) + \" points\"\n\n return;\n\n import numpy\n min = numpy.min(values)\n amax = numpy.max(values)\n max = numpy.min ( [amax, 16] )\n d = 1\n print \"Min: %.2f, max: %.2f (%.2f), step %.2f, avg %.2f\" % (min, amax, max, d, numpy.average(values))\n buckets = numpy.zeros ( (max - min) / d )\n for v in values :\n if v > max :\n continue\n r = (v - min) / (max - min)\n bi = int ( numpy.floor ( r * (len(buckets)-1) ) )\n buckets[bi] += 1\n\n for bi, bnum in enumerate ( buckets ) :\n bv = float (bi) / float(len(buckets)) * (max - min) + min\n print \"%.1f,%d\" % (bv,bnum)\n\n\n\n\n\n def MaskMapWithSel ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - got molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - got map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n df_mat = self.Map2Map ( fmap, dmap )\n\n print \" - using surf level %.5f for mask\" % fmap.surface_levels[0]\n\n if 1 :\n\n try :\n res = float ( self.simRes.get() )\n except :\n umsg ( \"Invalid resolution entered, please enter a number\" )\n return\n\n\n s = dmap.data.step # A/pixel\n diag_l = numpy.sqrt ( s[0]*s[0] + s[1]*s[1] + s[2]*s[2] ) # A/pixel\n num_it = res / diag_l # how many iterations will reach the desired width\n numit = int ( numpy.ceil ( num_it ) )\n\n print \" - using res %.4f for dropoff, diag is %.3f, #it: %d\" % (res, diag_l, numit)\n\n in_mask = numpy.where ( df_mat > fmap.surface_levels[0], numpy.ones_like(df_mat), numpy.zeros_like(df_mat) )\n out_mask = numpy.where ( in_mask > 0, numpy.zeros_like(in_mask), numpy.ones_like(in_mask) )\n gvm = in_mask.copy();\n for i in range (numit) :\n nv_1 = numpy.roll(gvm, 1, axis=0)\n nv_2 = numpy.roll(gvm, -1, axis=0)\n nv_3 = numpy.roll(gvm, 1, axis=1)\n nv_4 = numpy.roll(gvm, -1, axis=1)\n nv_5 = numpy.roll(gvm, 1, axis=2)\n nv_6 = numpy.roll(gvm, -1, axis=2)\n gvm = 1.0/6.0 * ( nv_1 + nv_2 + nv_3 + nv_4 + nv_5 + nv_6 )\n gvm = out_mask * gvm + in_mask\n umsg (\"Adding drop-off - iteration %d\" % i)\n\n df_mat = gvm\n\n\n mmat = dmap.data.full_matrix() * df_mat\n\n df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name=(dmap.name + \"__MaskedWith__\" + fmap.name) )\n\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n\n\n if 0 :\n mapMean, mapStDev = MapStats ( df_v )\n df_v = AddNoiseToMap ( df_v, mapMean, mapStDev / 3.0 )\n\n\n df_v.name = dmap.name + \"__MaskedWith__\" + fmap.name\n df_v.openState.xform = dmap.openState.xform\n\n\n\n\n\n\t# -----------------------------------------------------------------------------------------------------\n\n\n\n\n def MolOrMapSelected ( self ) :\n\n label = self.struc.get()\n\n if len(label) == 0 :\n umsg ( \"No structure selected\" )\n return [None, None]\n\n mod_num = label [ label.rfind(\"(\")+1 : label.rfind(\")\") ]\n\n if len(mod_num) == 0 :\n # this isn't possible given a (#) is added to each name...\n umsg ( \"An internal error that shouldn't happen did.\" )\n return [None, None]\n\n sel_str = \"#\" + mod_num\n\n fmol = None\n try :\n fmol = chimera.selection.OSLSelection ( sel_str ).molecules()[0]\n except :\n print ( \"Selected model is not a molecule...\" )\n\n fmap = None\n try :\n fmap = chimera.selection.OSLSelection ( sel_str ).models()[0]\n except :\n print ( \"Selected model is not a map...\" )\n\n return [fmol, fmap]\n\n\n\n def Map2Map ( self, densitiesFromMap, toGridOfMap, mask = False ) :\n\n #print \"Taking densities from %s with grid of %s\" % ( densitiesFromMap.name, toGridOfMap.name )\n\n #mmc = fmap.writable_copy ( require_copy = True )\n #mmc.name = rname\n #print \" - cloned\", fmap.name\n\n fmap = toGridOfMap\n dmap = densitiesFromMap\n\n import _contour\n n1, n2, n3 = fmap.data.size[0], fmap.data.size[1], fmap.data.size[2]\n f_points = VolumeData.grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices\n _contour.affine_transform_vertices( f_points, fmap.data.ijk_to_xyz_transform )\n\n d_vals = dmap.interpolated_values ( f_points, fmap.openState.xform )\n df_mat = d_vals.reshape( (n3,n2,n1) )\n\n if mask :\n f_mat = fmap.data.full_matrix()\n f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n df_mat = df_mat * f_mask\n\n return df_mat\n\n\n\n\n def Map2MapResize (self, fmap, dmap) :\n\n import axes\n fpoints, weights = axes.map_points ( fmap )\n print \"Fit map - got %d points in contour\" % len (fpoints)\n\n from _contour import affine_transform_vertices as transform_vertices\n #print \"Fit map - xf: \", fmap.openState.xform\n transform_vertices( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )\n #print \"Seg map - xf: \", dmap.openState.xform\n transform_vertices( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )\n #print \"points in %s ref:\" % dmap.name, fpoints\n\n bound = 5\n li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n nstep = (fmap.data.step[0], fmap.data.step[1], fmap.data.step[2] )\n #nstep = (fmap.data.step[0]/2.0, fmap.data.step[1]/2.0, fmap.data.step[2]/2.0 )\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / nstep[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / nstep[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / nstep[2]) )\n\n O = dmap.data.origin\n print \" - %s origin:\" % dmap.name, O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n print \" - new map origin:\", nO\n\n ox = round ( nO[0]/dmap.data.step[0] ) * dmap.data.step[0]\n oy = round ( nO[1]/dmap.data.step[1] ) * dmap.data.step[1]\n oz = round ( nO[2]/dmap.data.step[2] ) * dmap.data.step[2]\n\n nO = ( ox, oy, oz )\n\n print \" - new map origin:\", nO\n\n\n nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n\n #print \" - fmap grid dim: \", numpy.shape ( fmap.full_matrix() )\n #print \" - new map grid dim: \", numpy.shape ( nmat )\n\n npoints = grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n dvals = fmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n #f_mat = fmap.data.full_matrix()\n #f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n #df_mat = df_mat * f_mask\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, nstep, dmap.data.cell_angles )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n\n fmap_base = os.path.splitext(fmap.name)[0]\n dmap_base = os.path.splitext(dmap.name)[0]\n fmap_path = os.path.splitext (fmap.data.path)[0]\n dmap_path = os.path.splitext (dmap.data.path)[0]\n\n nv.name = fmap_base + \"__in__\" + dmap_base\n nv.openState.xform = dmap.openState.xform\n\n #npath = dmap_path + fnamesuf\n #nv.write_file ( npath, \"mrc\" )\n #print \"Wrote \", npath\n\n return nv\n\n\n\n def TakeDMap_with_FMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - got molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - got map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n\n df_mat = self.Map2Map ( dmap, fmap )\n df_data = VolumeData.Array_Grid_Data ( df_mat, fmap.data.origin, fmap.data.step, fmap.data.cell_angles )\n\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.name = dmap.name + \"_in_\" + fmap.name\n df_v.openState.xform = fmap.openState.xform\n\n\n\n def TakeFMap_with_DMap0 ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - got molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - got map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n df_mat = self.Map2Map ( fmap, dmap )\n df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n\n\n if 0 :\n mapMean, mapStDev = MapStats ( df_v )\n df_v = AddNoiseToMap ( df_v, mapMean, mapStDev / 3.0 )\n\n\n df_v.name = fmap.name + \"_in_\" + dmap.name\n df_v.openState.xform = dmap.openState.xform\n\n\n\n def TakeFMap_with_DMapN ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - got molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - got map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n df_mat = self.Map2Map ( fmap, dmap )\n df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n\n\n if 1 :\n mapMean, mapStDev = MapStats ( df_v )\n df_v = AddNoiseToMap ( df_v, mapMean, mapStDev / 3.0 )\n\n\n df_v.name = fmap.name + \"_in_\" + dmap.name\n df_v.openState.xform = dmap.openState.xform\n\n\n\n\n def TakeFMap_with_DMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - got molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - got map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n nv = self.Map2MapResize ( fmap, dmap )\n\n\n\n\n def FitAllVisMaps ( self ) :\n\n from VolumeViewer import Volume\n from chimera import Molecule\n mlist = OML(modelTypes = [Volume,Molecule])\n for m in mlist :\n label = m.name + \" (%d)\" % m.id\n print \"---------------------\", label, \"---------------------\"\n\n if m.display == False :\n continue\n\n self.struc.set(label)\n self.cur_mol = m\n self.Fit()\n\n\n\n def AvgFMaps ( self ) :\n\n from VolumeViewer import Volume\n mlist = OML(modelTypes = [Volume])\n\n fmap = None\n avgMat = None\n N = 0.0\n\n for m in mlist :\n if m.display == True :\n print m.name\n\n if avgMat == None :\n avgMat = m.data.full_matrix()\n fmap = m\n N = 1.0\n else :\n avgMat = avgMat + m.data.full_matrix()\n N = N + 1.0\n\n avgMat = avgMat / N\n\n df_data = VolumeData.Array_Grid_Data ( avgMat, fmap.data.origin, fmap.data.step, fmap.data.cell_angles )\n\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.name = \"Avg\"\n df_v.openState.xform = fmap.openState.xform\n\n return\n\n\n def DifFMaps2 ( self ) :\n\n from VolumeViewer import Volume\n mlist = OML(modelTypes = [Volume])\n\n smaps = []\n for m in mlist :\n if m.display == True :\n print \" - \", m.name\n smaps.append ( m )\n\n\n if len(smaps) != 2 :\n umsg ( \"Need only 2 maps visible\" )\n return\n\n\n m1 = smaps[0]\n m2 = smaps[1]\n\n\n m2_mat = self.Map2Map ( m2, m1 )\n\n difMat = NormalizeMat ( m1.data.full_matrix() ) - NormalizeMat ( m2_mat )\n df_data = VolumeData.Array_Grid_Data ( difMat, m1.data.origin, m1.data.step, m1.data.cell_angles )\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.name = m1.name + \"__-__\" + m2.name\n df_v.openState.xform = m1.openState.xform\n\n difMat = NormalizeMat ( m2_mat ) - NormalizeMat ( m1.data.full_matrix() )\n df_data = VolumeData.Array_Grid_Data ( difMat, m1.data.origin, m1.data.step, m1.data.cell_angles )\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.name = m2.name + \"__-__\" + m1.name\n df_v.openState.xform = m1.openState.xform\n\n\n return\n\n\n\n def AvgFMaps2 (self) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n import extract_region_dialog\n reload ( extract_region_dialog )\n\n superSampleBy = 1\n\n if superSampleBy > 1 :\n ndata = extract_region_dialog.MapSS ( dmap, superSampleBy )\n try : nmap = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nmap = VolumeViewer.volume.volume_from_grid_data ( ndata )\n nmap.name = dmap.name + \"_M%d\" % n\n nmap.openState.xform = dmap.openState.xform\n dmap = nmap\n\n\n from VolumeViewer import Volume\n mlist = OML(modelTypes = [Volume])\n\n if 0 :\n print \" -- making base map -- \"\n\n bmap = None\n m0 = None\n for m in mlist :\n if m.display == True :\n print m.name\n if bmap == None :\n bmap = m\n m0 = m\n else :\n bmap0 = bmap\n bmap = self.Map2MapResize (m, bmap)\n if bmap0 != m0 :\n chimera.openModels.close ( [bmap0] )\n\n\n bmap.name = \"base\"\n dmap = bmap\n\n if 0 :\n print \" -- finding base map --- \"\n largestMap = None\n maxD = 0\n for m in mlist :\n if m.display == True :\n d = numpy.sum ( m.data.size )\n if d > maxD :\n maxD = d\n largestMap = m\n\n print \" - largest map: \", largestMap.name\n\n\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n umsg ('Please select the base map in the field at the top - molecule found')\n return\n elif fmap2 != None :\n print \" - got map map\"\n fmap = fmap2\n else :\n umsg ('Please select the base map in the field at the top')\n return\n\n dmap = fmap2\n #dmap.display = False\n umsg ( \"Using as base map: %s\" % dmap.name )\n\n avgMat = dmap.full_matrix()\n fmap = dmap\n\n weights = avgMat.ravel()\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max (weights)\n\n print \" - (%.4f,%.4f) |%.4f| +/- %.4f\" % (smin, smax, savg, sdev)\n avgMat = avgMat * (1.0 / smax)\n\n N = 1.0\n\n\n #fmap = None\n #avgMat = None\n #N = 0.0\n\n print \" ----------- Averaging... ---------------------\"\n\n for m in mlist :\n if m.display == True and m != dmap :\n #if m.display == True :\n print m.name\n\n df_mat = self.Map2Map ( m, dmap )\n m.display = False\n\n weights = df_mat.ravel()\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max (weights)\n\n thr = 0 # m.surface_levels[0]\n\n print \"%s - (%.4f,%.4f) |%.4f| +/- %.4f -- %.4f\" % (m.name, smin, smax, savg, sdev, thr)\n\n N = N + 1.0\n #df_mat = df_mat - ( numpy.ones_like(df_mat)*thr )\n\n #df_mat = numpy.where ( df_mat > thr, df_mat, numpy.zeros_like(df_mat) )\n\n df_mat = df_mat * (1.0 / smax)\n #df_mat = df_mat + ( numpy.ones_like(df_mat) * 10.0 )\n\n if 0 :\n imhist,bins = numpy.histogram ( df_mat.flatten(), 20, normed=True )\n print \" ------- Histogram:\"\n print imhist\n print \" ------- Bins:\"\n print bins\n\n cdf = imhist.cumsum() #cumulative distribution function\n cdf = 10.0 * cdf / cdf[-1] #normalize\n\n print cdf\n\n #use linear interpolation of cdf to find new pixel values\n #df_mat = numpy.interp ( df_mat.flatten(), bins[:-1], cdf )\n #df_mat = df_mat.reshape(dmap.data.full_matrix().shape)\n\n\n\n\n if avgMat == None :\n avgMat = df_mat\n fmap = m\n else :\n avgMat = avgMat + df_mat\n\n print \" ----------- n=%f ---------------------\" % N\n\n avgMat = avgMat / N\n df_data = VolumeData.Array_Grid_Data ( avgMat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name=\"avg\" )\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.name = \"Avg\"\n df_v.openState.xform = dmap.openState.xform\n\n nv = self.ShrinkMap ( df_v, 1e-3 )\n chimera.openModels.close ( [df_v] )\n\n if 0 :\n stdMat = None\n N = 0.0\n\n for m in mlist :\n if m.display == True :\n print m.name\n\n if m.name == \"Avg\" :\n print \"skipping avg vol\"\n continue\n\n df_mat = self.Map2Map ( m, dmap )\n N = N + 1.0\n\n print \" - sub from avg...\"\n d = numpy.power ( df_mat - avgMat, 2 )\n if stdMat == None :\n stdMat = d\n else :\n stdMat = stdMat + d\n\n stdMat = numpy.power ( stdMat / N, 0.5 )\n df_data = VolumeData.Array_Grid_Data ( stdMat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.name = \"Stdev\"\n df_v.openState.xform = dmap.openState.xform\n\n\n\n # chimera.openModels.close ( dmap )\n\n def ShrinkMap ( self, dmap, thr ) :\n\n import axes\n dmap.surface_levels[0] = thr\n fpoints, weights = axes.map_points ( dmap )\n #print \"%s / %f - %d points in contour\" % (dmap.name, thr, len (fpoints))\n\n from _contour import affine_transform_vertices as transform_vertices\n #print \"Fit map - xf: \", fmap.openState.xform\n #transform_vertices( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )\n\n #print \"Seg map - xf: \", dmap.openState.xform\n #transform_vertices( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )\n #print \"points in %s ref:\" % dmap.name, fpoints\n\n bound = 4\n li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)\n\n n1 = int(hi - li + 1)\n n2 = int(hj - lj + 1)\n n3 = int(hk - lk + 1)\n\n #print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n O = dmap.data.origin\n #print \" - %s origin:\" % dmap.name, O\n #print \" - %s step:\" % dmap.name, dmap.data.step\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n #print \" - new map origin:\", nO\n\n nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, dmap.data.step, dmap.data.cell_angles )\n\n #print \" - new map grid dim: \", numpy.shape ( nmat )\n\n npoints = grid_indices ( (n1, n2, n3), numpy.single) # i,j,k indices\n transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (n3,n2,n1) )\n #f_mat = fmap.data.full_matrix()\n #f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n #df_mat = df_mat * f_mask\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, dmap.data.step, dmap.data.cell_angles, name = dmap.name )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n return nv\n\n\n\n\n def TakeFMapsVis ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n from VolumeViewer import Volume\n mlist = OML(modelTypes = [Volume])\n for m in mlist :\n if m.display == True :\n\n df_mat = self.Map2Map ( m, dmap )\n df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n df_v.openState.xform = dmap.openState.xform\n\n mdir, mfile = os.path.split(m.data.path)\n df_v.name = \"f_\" + mfile\n\n print m.name, \"->\", df_v.name\n\n dpath = mdir + \"/\" + df_v.name\n df_v.write_file ( dpath, \"mrc\" )\n\n\n\n def DifferenceMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - using molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - using map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n\n print \"\\n\\nDiff map \", dmap.name, \" <=> \", fmap.name\n\n closeDMap = False\n\n smod = self.CurrentSegmentation()\n regs = smod.selected_regions()\n if len(regs) > 0 :\n dmap = mask_volume( regs, dmap )\n closeDMap = True\n\n\n\n df_mat = self.Map2Map ( fmap, dmap )\n df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n #MapStats ( dmap )\n #MapDataStats ( df_data )\n\n print \"\"\n print \"Normalizing\", dmap.name\n dmap_data_n = NormalizeData ( dmap.data )\n #dmap_data_n = dmap.data\n\n if 0 :\n try : nv = VolumeViewer.volume.add_data_set ( dmap_data_n, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( dmap_data_n )\n nv.name = os.path.splitext(dmap.name)[0] + \"_norm.mrc\"\n nv.openState.xform = dmap.openState.xform\n #fmapn = NormalizeMap ( fmap )\n\n print \"\"\n print \"Normalizing transferred fit map\"\n df_data_n = NormalizeData ( df_data )\n #df_data_n = df_data\n\n if 0 :\n try : nv = VolumeViewer.volume.add_data_set ( df_data_n, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( df_data_n )\n nv.name = os.path.splitext(dmap.name)[0] + \"_fmap_norm.mrc\"\n nv.openState.xform = dmap.openState.xform\n\n\n diff_mat = numpy.fabs ( dmap_data_n.full_matrix () - df_data_n.full_matrix () )\n weights = diff_mat.ravel()\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n print \"\"\n print \"Difference map:\"\n #print \" -\", len(nz), \" nonzero\"\n print \" - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n diff_data = VolumeData.Array_Grid_Data ( diff_mat, df_data.origin, df_data.step, df_data.cell_angles )\n\n try : nv = VolumeViewer.volume.add_data_set ( diff_data, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( diff_data )\n\n nv.name = os.path.splitext(dmap.name)[0] + \"_--_\" + fmap.name\n nv.openState.xform = dmap.openState.xform\n\n if closeDMap :\n chimera.openModels.close ( [closeDMap] )\n\n\n\n def IntersectionMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - using molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - using map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n\n print \"\\n\\nDiff map \", dmap.name, \" <=> \", fmap.name\n\n closeDMap = False\n\n smod = self.CurrentSegmentation()\n regs = smod.selected_regions()\n if len(regs) > 0 :\n dmap = mask_volume( regs, dmap )\n closeDMap = True\n\n\n df_mat = self.Map2Map ( fmap, dmap )\n df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n #MapStats ( dmap )\n #MapDataStats ( df_data )\n\n print \"\"\n print \"Normalizing\", dmap.name\n dmap_data_n = NormalizeData ( dmap.data )\n\n if 0 :\n try : nv = VolumeViewer.volume.add_data_set ( dmap_data_n, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( dmap_data_n )\n nv.name = os.path.splitext(dmap.name)[0] + \"_norm.mrc\"\n nv.openState.xform = dmap.openState.xform\n #fmapn = NormalizeMap ( fmap )\n\n print \"\"\n print \"Normalizing transferred fit map\"\n df_data_n = NormalizeData ( df_data )\n\n if 0 :\n try : nv = VolumeViewer.volume.add_data_set ( df_data_n, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( df_data_n )\n nv.name = os.path.splitext(dmap.name)[0] + \"_fmap_norm.mrc\"\n nv.openState.xform = dmap.openState.xform\n\n\n diff_mat = numpy.fabs ( dmap_data_n.full_matrix () - df_data_n.full_matrix () )\n weights = diff_mat.ravel()\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n print \"\"\n print \"Difference map:\"\n #print \" -\", len(nz), \" nonzero\"\n print \" - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n diff_data = VolumeData.Array_Grid_Data ( diff_mat, df_data.origin, df_data.step, df_data.cell_angles )\n\n try : nv = VolumeViewer.volume.add_data_set ( diff_data, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( diff_data )\n\n nv.name = os.path.splitext(dmap.name)[0] + \"_--_\" + fmap.name\n nv.openState.xform = dmap.openState.xform\n\n if closeDMap :\n chimera.openModels.close ( [closeDMap] )\n\n\n def ShapeMatch ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = None\n [fmol, fmap2] = self.MolOrMapSelected ();\n if fmol != None :\n print \" - using molecule map\"\n fmap = self.MoleculeMap()\n elif fmap2 != None :\n print \" - using map map\"\n fmap = fmap2\n else :\n umsg ('Please select an open molecule or map in the field above')\n return\n\n\n print \"\\n\\nDiff map \", dmap.name, \" <=> \", fmap.name\n\n closeDMap = False\n realDMap = dmap\n\n smod = self.CurrentSegmentation()\n if smod != None :\n regs = smod.selected_regions()\n if len(regs) > 0 :\n dmap = mask_volume( regs, dmap )\n closeDMap = True\n\n\n df_mat = self.Map2Map ( fmap, dmap )\n #df_data = VolumeData.Array_Grid_Data ( df_mat, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n\n thr = dmap.surface_levels[0]\n\n umsg (\"Generating 1/-1 map for \" + dmap.name + \" thr: %.3f\" % thr)\n\n m0 = dmap.data.full_matrix()\n m1 = numpy.where ( m0 > realDMap.surface_levels[0], numpy.ones_like(m0)*1, numpy.zeros_like(m0) )\n\n m2 = numpy.where ( df_mat > fmap.surface_levels[0], numpy.ones_like(df_mat)*1, numpy.zeros_like(df_mat) )\n\n mi = m1 * m2\n mu = m1 + m2\n\n if 0 :\n mid = VolumeData.Array_Grid_Data ( mi, realDMap.data.origin, realDMap.data.step, realDMap.data.cell_angles, name=\"inter\" )\n mud = VolumeData.Array_Grid_Data ( mu, realDMap.data.origin, realDMap.data.step, realDMap.data.cell_angles, name=\"union\" )\n\n nv = VolumeViewer.volume_from_grid_data ( mid )\n nv = VolumeViewer.volume_from_grid_data ( mud )\n\n\n nz_int = numpy.shape ( (mi).nonzero () )[1]\n nz_uni = numpy.shape ( (mu).nonzero () )[1]\n\n sm_score = float(nz_int) / float (nz_uni)\n\n print \" - intersection %d, union %d - sm: %.3f\" % (nz_int, nz_uni, sm_score)\n\n\n ndata = VolumeData.Array_Grid_Data ( mi, dmap.data.origin, dmap.data.step, dmap.data.cell_angles )\n\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n nv.name = os.path.splitext(dmap.name)[0] + \"_--_\" + fmap.name\n nv.openState.xform = dmap.openState.xform\n\n\n if closeDMap :\n chimera.openModels.close ( [dmap] )\n\n\n\n\n\ndef fit_segments_dialog ( create=False ) :\n\n from chimera import dialogs\n return dialogs.find ( Fit_Segments_Dialog.name, create=create )\n\n\ndef close_fit_segments_dialog ():\n\n from chimera import dialogs\n d = fit_segments_dialog ()\n if d :\n d.toplevel_widget.update_idletasks ()\n d.Close()\n d.toplevel_widget.update_idletasks ()\n\ndef show_fit_segments_dialog ():\n\n from chimera import dialogs\n d = fit_segments_dialog ( create = True )\n # Avoid transient dialog resizing when created and mapped for first time.\n d.toplevel_widget.update_idletasks ()\n d.enter()\n return d\n\n\ndef new_fit_segments_dialog ( closeExisting = True ):\n\n if closeExisting : close_fit_segments_dialog ()\n show_fit_segments_dialog ()\n\n\n# -----------------------------------------------------------------------------\n#\nfrom chimera import dialogs\ndialogs.register (Fit_Segments_Dialog.name, Fit_Segments_Dialog,\n replace = True)\n\n\n\n# -----------------------------------------------------------------------------\n#\ndef optimize_fits(fpoints, fpoint_weights, mlist, dmap,\n names = None, status_text = None,\n optimize = True, use_threads = False, task=None):\n\n from time import time\n c0 = time()\n\n darray = dmap.data.matrix()\n xyz_to_ijk_tf = dmap.data.xyz_to_ijk_transform\n\n if 0 or use_threads:\n # TODO: report status messages.\n print \" - in parallel!\"\n fits = parallel_fitting(fpoints, fpoint_weights,\n mlist, darray, xyz_to_ijk_tf, optimize)\n else:\n fits = []\n for i, Mi in enumerate(mlist):\n #if names:\n # print \"%d/%d : %s\" % ( i+1, len(mlist), names[i] )\n if task :\n task.updateStatus ( \"Fit %d/%d\" % (i+1, len(mlist)) )\n if status_text:\n status ( \"%s %d/%d\" % (status_text, i+1, len(mlist)) )\n Mfit, corr, stats = FitMap_T(fpoints, fpoint_weights, Mi, darray, xyz_to_ijk_tf, optimize = optimize)\n #print \"Fit \", i, \":\", \"Shift: \", stats['totShift'], \"Angle:\", stats['totAngle'], \"height\", stats['difCC'], \"Final\", corr\n fits.append((Mfit, corr, stats))\n\n c1 = time()\n print '%d fits took %.2f seconds' % (len(fits), c1-c0)\n\n return fits\n\n\n# -----------------------------------------------------------------------------\n#\ndef parallel_fitting(fpoints, fpoint_weights, mlist, darray, xyz_to_ijk_tf,\n optimize = True):\n\n #\n # Choose number of threads to match number of cores. Using more threads\n # creates large inefficiency (2x slower) in Python 2.7 due to context\n # switching overhead (<NAME> lecture).\n #\n # System usually reports twice actual number of cores due to hyperthreading.\n # Hyperthreading doesn't help if fitting tests so half that number.\n #\n import multiprocessing\n threads = multiprocessing.cpu_count()\n print 'parallel fitting using %d threads' % threads\n\n # Avoid periodic Python context switching.\n import sys\n original_check_interval = sys.getcheckinterval()\n sys.setcheckinterval(1000000000)\n\n # Define thread class for fitting.\n from threading import Thread\n class Fit_Thread(Thread):\n def __init__(self, mlist):\n Thread.__init__(self)\n self.mlist = mlist\n def run(self):\n self.fits = [FitMap_T(fpoints, fpoint_weights, m, darray,\n xyz_to_ijk_tf, optimize = optimize)\n for m in self.mlist]\n\n # Starts threads with each calculating an equal number of fits.\n n = len(mlist)\n g = [mlist[(n*c)/threads:(n*(c+1))/threads] for c in range(threads)]\n threads = [Fit_Thread(ml) for ml in g]\n for t in threads:\n t.start()\n\n # Wait for all threads to finish\n for t in threads:\n t.join()\n\n # Restore periodic context switching.\n sys.setcheckinterval(original_check_interval)\n\n # Collect fit results from all threads.\n fits = []\n for t in threads:\n for Mfit, corr, stats in t.fits:\n fits.append((Mfit, corr, stats))\n\n return fits\n\n\n# -----------------------------------------------------------------------------\n#\ndef FitMap_T ( fpoints, fpoint_weights, M, darray, xyz_to_ijk_transform,\n bTrans=True, bRot=True, optimize=True ) :\n\n xyz_to_ijk_tf = multiply_matrices(xyz_to_ijk_transform, M.tolist())\n\n if optimize:\n from FitMap import locate_maximum\n totShift = 0.0\n totAngle = 0.0\n map_values, outside = interpolate_volume_data(fpoints, xyz_to_ijk_tf, darray)\n initOlap, initCC = overlap_and_correlation ( fpoint_weights, map_values )\n\n for i in range (5) :\n move_tf, stats = locate_maximum(fpoints, fpoint_weights,\n darray, xyz_to_ijk_tf,\n max_steps = 1000,\n ijk_step_size_min = 0.01,\n ijk_step_size_max = 0.5,\n optimize_translation = bTrans,\n optimize_rotation = bRot,\n metric = 'sum product',\n request_stop_cb = None)\n\n xT, xR = xf_2_M ( chimera_xform ( move_tf ) )\n M = M * xT * xR\n corr = stats['correlation']\n\n #print ' \\t%d steps: d %.3g, r %.3g, cor %f' % (stats['steps'], stats['shift'], stats['angle'], corr )\n\n totShift = totShift + stats['shift']\n totAngle = totAngle + stats['angle']\n\n if ( stats['shift'] < 0.1 and stats['angle'] < 0.1 ) :\n break\n\n xyz_to_ijk_tf = multiply_matrices(xyz_to_ijk_transform, M.tolist())\n\n stats['totAngle'] = totAngle\n stats['totShift'] = totShift\n stats['difCC'] = corr - initCC\n\n else:\n map_values, outside = interpolate_volume_data(fpoints, xyz_to_ijk_tf,\n darray )\n olap, corr = overlap_and_correlation ( fpoint_weights, map_values )\n stats = {}\n\n stats['totAngle'] = 0.0\n stats['totShift'] = 0.0\n stats['difCC'] = 0.0\n\n return M, corr, stats\n\n\ndef molApplyT ( mol, T ) :\n\n xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3], True )\n # print xf\n\n mol.COM = chimera.Vector (0,0,0)\n\n for at in mol.atoms :\n c = xf.apply ( at.coord() )\n at.setCoord ( c )\n mol.COM = mol.COM + c.toVector()\n\n mol.COM = mol.COM / float ( len(mol.atoms) )\n\n#\n# Change atom coordinates so the center of mass is at the origin and\n# the principal axes are x, y and z. Make a compensating transformation\n# of the molecule coordinate system so the molecule does not move in the\n# graphics window.\n#\ndef centerMol ( sel_str ):\n\n sel = chimera.selection.OSLSelection (sel_str)\n mols = sel.molecules ()\n atoms = sel.atoms()\n\n if len(mols) == 0 :\n print \"Failed to center molecule\"\n return []\n\n if hasattr(mols[0], 'centered') :\n return mols\n\n umsg ( \"Centering %d structures, %d atoms\" % (len(mols), len(atoms) ) )\n\n points = get_atom_coordinates ( atoms, transformed = False )\n COM, U, S, V = prAxes ( points )\n\n # move COM to origin and align pr. axes with XYZ\n tAO = numpy.matrix ( [\n [ 1, 0, 0, -COM[0] ],\n [ 0, 1, 0, -COM[1] ],\n [ 0, 0, 1, -COM[2] ],\n [ 0, 0, 0, 1 ] ] )\n\n tAR = numpy.matrix ( [\n [ V[0,0], V[0,1], V[0,2], 0 ],\n [ V[1,0], V[1,1], V[1,2], 0 ],\n [ V[2,0], V[2,1], V[2,2], 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n # Adjust coordinate system so molecule does not appear to move.\n tf = invert_matrix((tAR*tAO).tolist()[:3])\n\n for fmol in mols :\n\n fmol.COM, fmol.U, fmol.S, fmol.V = COM, U, S, V\n\n print \"Mol %s .%d\" % (fmol.name, fmol.subid)\n molApplyT ( fmol, tAO )\n print \" - COM after translation:\", fmol.COM\n molApplyT ( fmol, tAR )\n print \" - COM after rotation:\", fmol.COM\n\n fmol.openState.localXform(chimera_xform(tf))\n\n fmol.mT = numpy.matrix ( [\n [ 1, 0, 0, 0 ],\n [ 0, 1, 0, 0 ],\n [ 0, 0, 1, 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n fmol.mR = numpy.matrix ( [\n [ 1, 0, 0, 0 ],\n [ 0, 1, 0, 0 ],\n [ 0, 0, 1, 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n fmol.M = fmol.mT * fmol.mR\n\n points = get_atom_coordinates ( atoms, transformed = False )\n\n for fmol in mols :\n fmol.COM, fmol.U, fmol.S, fmol.V = prAxes ( points )\n\n ppoints = points * fmol.U\n\n fmol.BoundRad = numpy.sqrt ( numpy.max ( numpy.sum ( numpy.square (ppoints), 1 ) ) )\n fmol.Extents = numpy.asarray ( numpy.max ( numpy.abs ( ppoints ), 0 ) )[0]\n\n fmol.Extents[0] = fmol.Extents[0] + 5.0\n fmol.Extents[1] = fmol.Extents[1] + 5.0\n fmol.Extents[2] = fmol.Extents[2] + 5.0\n\n fmol.centered = True\n\n umsg ( \"Centered %s .%d (radius %.2fA, extents %.2fA %.2fA %.2fA)\" % (\n fmol.name, fmol.subid, fmol.BoundRad, fmol.Extents[0], fmol.Extents[1], fmol.Extents[2] ) )\n\n return mols\n\n\ndef fit_points(fmap, useThreshold = True):\n\n mat = fmap.data.full_matrix()\n threshold = fmap.surface_levels[0]\n\n if useThreshold == False :\n threshold = -1e9\n print \" - not using threshold\"\n\n import _volume\n points = _volume.high_indices(mat, threshold)\n fpoints = points.astype(numpy.single)\n fpoint_weights = mat[points[:,2],points[:,1],points[:,0]]\n\n nz = numpy.nonzero( fpoint_weights )[0]\n if len(nz) < len (fpoint_weights) :\n fpoints = numpy.take( fpoints, nz, axis=0 )\n fpoint_weights = numpy.take(fpoint_weights, nz, axis=0)\n\n transform_vertices( fpoints, fmap.data.ijk_to_xyz_transform )\n\n if 0 : print \"FitPoints from %s with threshold %.4f, %d nonzero\" % (\n fmap.name, threshold, len(nz) )\n\n return fpoints, fpoint_weights\n\n\n\ndef move_fit_models(fmap, M, dmap_xform):\n\n tXO, tXR = xf_2_M ( dmap_xform )\n T = tXO * tXR * M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n\n print \"moving %d mols to fitted position\" % len(fmap.mols)\n for mol in fmap.mols :\n mol.openState.xform = xfA\n\n\ndef principle_axes_alignments ( points, flips, preM ):\n\n COM, U, S, V = prAxes ( points )\n\n comT = numpy.matrix ( [\n [ 1, 0, 0, COM[0] ],\n [ 0, 1, 0, COM[1] ],\n [ 0, 0, 1, COM[2] ],\n [ 0, 0, 0, 1 ] ] )\n\n mlist = []\n for j in range( len(flips) ) :\n\n af = flips[j]\n\n mR = numpy.matrix ( [\n [ af[0]*U[0,0], af[1]*U[0,1], af[2]*U[0,2], 0 ],\n [ af[0]*U[1,0], af[1]*U[1,1], af[2]*U[1,2], 0 ],\n [ af[0]*U[2,0], af[1]*U[2,1], af[2]*U[2,2], 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n M = comT * mR * preM\n mlist.append(M)\n\n return mlist\n\n#\n# Return list of rotation xforms uniformly distributed rotating about\n# N axis vectors and M angles about each axis.\n#\n# http://www.math.niu.edu/~rusin/known-math/97/spherefaq\n#\ndef uniform_rotation_angles(N, M) :\n\n thetas, phis = [], []\n from math import acos, sin, cos, sqrt, pi\n for k in range ( 1, N+1 ) :\n h = -1.0 + ( 2.0*float(k-1)/float(N-1) )\n phis.append ( acos(h) )\n thetas.append ( 0 if k == 1 or k == N else\n (thetas[k-2] + 3.6/sqrt(N*(1.0-h**2.0))) % (2*pi) )\n\n ralist = []\n for theta, phi in zip(thetas, phis):\n for m in range ( M ) :\n rot = 2*pi*float(m)/float(M)\n ralist.append((theta,phi,rot))\n\n return ralist\n\n\ndef rotation_from_angles(theta, phi, rot) :\n\n from math import sin, cos, pi\n v = chimera.Vector (sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi))\n xfR = chimera.Xform.rotation ( v, rot*180/pi )\n Mt, Mr = xf_2_M ( xfR )\n return Mr\n\n\ndef xf_2_M (xf) :\n\n X = ( numpy.matrix (xf.getOpenGLMatrix()) ).reshape([4,4]).transpose()\n\n tXO = numpy.matrix ( [\n [ 1, 0, 0, X[0,3] ],\n [ 0, 1, 0, X[1,3] ],\n [ 0, 0, 1, X[2,3] ],\n [ 0, 0, 0, 1 ] ] )\n\n tXR = numpy.matrix ( [\n [ X[0,0], X[0,1], X[0,2], 0 ],\n [ X[1,0], X[1,1], X[1,2], 0 ],\n [ X[2,0], X[2,1], X[2,2], 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n return [tXO, tXR]\n\n\ndef xf_2_MM (xf) :\n\n X = ( numpy.matrix (xf.getOpenGLMatrix()) ).reshape([4,4]).transpose()\n\n tXO = numpy.matrix ( [\n [ 1, 0, 0, X[0,3] ],\n [ 0, 1, 0, X[1,3] ],\n [ 0, 0, 1, X[2,3] ],\n [ 0, 0, 0, 1 ] ] )\n\n tXR = numpy.matrix ( [\n [ X[0,0], X[0,1], X[0,2], 0 ],\n [ X[1,0], X[1,1], X[1,2], 0 ],\n [ X[2,0], X[2,1], X[2,2], 0 ],\n [ 0, 0, 0, 1 ] ] )\n\n return tXO * tXR\n\n\n\n\ndef place_map_resample ( fmap, dmap, fnamesuf ) :\n\n # get bounds of points above threshold\n fpoints = grid_indices (fmap.data.size, numpy.single) # i,j,k indices\n transform_vertices ( fpoints, fmap.data.ijk_to_xyz_transform )\n mat = fmap.data.full_matrix ()\n fpoint_weights = numpy.ravel(mat).astype(numpy.single)\n threshold = fmap.surface_levels[0]\n ge = numpy.greater_equal(fpoint_weights, threshold)\n fpoints = numpy.compress(ge, fpoints, 0)\n fpoint_weights = numpy.compress(ge, fpoint_weights)\n nz = numpy.nonzero( fpoint_weights )[0]\n print \" - %d above %f in %s\" % (len(nz), threshold, fmap.name)\n #print \"points: \", fpoints\n #print \"weights: \", fpoint_weights\n\n transform_vertices ( fpoints, Matrix.xform_matrix( fmap.openState.xform ) )\n transform_vertices ( fpoints, Matrix.xform_matrix( dmap.openState.xform.inverse() ) )\n transform_vertices ( fpoints, dmap.data.xyz_to_ijk_transform )\n #print \"points in %s ref:\" % dmap.name, fpoints\n\n bound = 2\n li,lj,lk = numpy.min ( fpoints, axis=0 ) - (bound, bound, bound)\n hi,hj,hk = numpy.max ( fpoints, axis=0 ) + (bound, bound, bound)\n\n n1 = hi - li + 1\n n2 = hj - lj + 1\n n3 = hk - lk + 1\n\n print \" - bounds - %d %d %d --> %d %d %d --> %d %d %d\" % ( li, lj, lk, hi, hj, hk, n1,n2,n3 )\n\n #nmat = numpy.zeros ( (n1,n2,n3), numpy.float32 )\n #dmat = dmap.full_matrix()\n\n nn1 = int ( round (dmap.data.step[0] * float(n1) / fmap.data.step[0]) )\n nn2 = int ( round (dmap.data.step[1] * float(n2) / fmap.data.step[1]) )\n nn3 = int ( round (dmap.data.step[2] * float(n3) / fmap.data.step[2]) )\n\n O = dmap.data.origin\n print \" - %s origin:\" % dmap.name, O\n nO = ( O[0] + float(li) * dmap.data.step[0],\n O[1] + float(lj) * dmap.data.step[1],\n O[2] + float(lk) * dmap.data.step[2] )\n\n print \" - new map origin:\", nO\n\n nmat = numpy.zeros ( (nn1,nn2,nn3), numpy.float32 )\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, fmap.data.step, dmap.data.cell_angles )\n\n print \" - fmap grid dim: \", numpy.shape ( fmap.full_matrix() )\n print \" - new map grid dim: \", numpy.shape ( nmat )\n\n npoints = grid_indices ( (nn1, nn2, nn3), numpy.single) # i,j,k indices\n transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n\n dvals = fmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (nn3,nn2,nn1) )\n #f_mat = fmap.data.full_matrix()\n #f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n #df_mat = df_mat * f_mask\n\n\n\n fmap_base = os.path.splitext(fmap.name)[0]\n dmap_base = os.path.splitext(dmap.name)[0]\n fmap_path = os.path.splitext (fmap.data.path)[0]\n dmap_path = os.path.splitext (dmap.data.path)[0]\n\n\n ndata = VolumeData.Array_Grid_Data ( nmat, nO, fmap.data.step, dmap.data.cell_angles, name=(dmap_base + fnamesuf) )\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n\n nv.name = dmap_base + fnamesuf\n nv.openState.xform = dmap.openState.xform\n\n npath = dmap_path + fnamesuf\n nv.write_file ( npath, \"mrc\" )\n print \"Wrote \", npath\n\n return nv\n\n\n\n\ndef CopyMol ( mol ) :\n\n nmol = chimera.Molecule()\n nmol.name = mol.name\n\n aMap = dict()\n clr = ( rand(), rand(), rand() )\n\n for res in mol.residues :\n nres = nmol.newResidue (res.type, chimera.MolResId(res.id.chainId, res.id.position))\n # print \"New res: %s %d\" % (nres.id.chainId, nres.id.position)\n for at in res.atoms :\n nat = nmol.newAtom (at.name, chimera.Element(at.element.number))\n # todo: handle alt\n aMap[at] = nat\n nres.addAtom( nat )\n nat.setCoord ( at.coord() )\n nat.drawMode = nat.Sphere\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.display = True\n nat.altLoc = at.altLoc\n nat.occupancy = at.occupancy\n nat.bfactor = at.bfactor\n\n nres.isHelix = res.isHelix\n nres.isHet = res.isHet\n nres.isSheet = res.isSheet\n nres.isStrand = res.isStrand\n nres.ribbonDisplay = True\n nres.ribbonDrawMode = 2\n nres.ribbonColor = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 );\n\n for bond in mol.bonds :\n nb = nmol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = nb.Smart\n\n return nmol\n\n\ndef CopyMolX ( mol, xf ) :\n\n nmol = chimera.Molecule()\n nmol.name = mol.name\n\n aMap = dict()\n from random import random as rand\n clr = ( rand(), rand(), rand() )\n\n for res in mol.residues :\n #nres = nmol.newResidue (res.type, chimera.MolResId(res.id.chainId, res.id.position))\n nres = nmol.newResidue (res.type, chimera.MolResId(res.id.chainId, res.id.position))\n # print \"New res: %s %d\" % (nres.id.chainId, nres.id.position)\n for at in res.atoms :\n nat = nmol.newAtom (at.name, chimera.Element(at.element.number))\n aMap[at] = nat\n nres.addAtom( nat )\n nat.setCoord ( xf.apply(at.xformCoord()) )\n nat.altLoc = at.altLoc\n nat.occupancy = at.occupancy\n nat.bfactor = at.bfactor\n if res.isProt or res.isNA :\n nat.display = False\n else :\n nat.display = True\n nat.radius=1.46\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.drawMode = nat.EndCap\n\n nres.isHelix = res.isHelix\n nres.isHet = res.isHet\n nres.isSheet = res.isSheet\n nres.isStrand = res.isStrand\n nres.ribbonDisplay = True\n nres.ribbonDrawMode = 2\n nres.ribbonColor = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 );\n\n for bond in mol.bonds :\n nb = nmol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = nb.Smart\n\n return nmol\n\n\ndef CopyChain ( mol, nmol, cid, ncid, xf ) :\n\n if nmol == None :\n nmol = chimera.Molecule()\n nmol.name = mol.name + \"_sym\"\n\n aMap = dict()\n clr = ( rand(), rand(), rand() )\n\n from SWIM import SetBBAts\n SetBBAts ( nmol )\n\n ligandAtoms = []\n for res in nmol.residues :\n if not res.isProt and not res.isNA :\n for at in res.atoms :\n ligandAtoms.append ( at )\n\n print \" %d ligats\" % len(ligandAtoms)\n\n\n for res in mol.residues :\n\n if res.id.chainId == cid :\n\n isDuplicate = False\n if not res.isProt and not res.isNA :\n for at in res.atoms :\n atP = xf.apply(at.xformCoord())\n for ligAt in ligandAtoms :\n v = ligAt.coord() - atP\n if v.length < 0.2 :\n isDuplicate = True\n break\n if isDuplicate :\n break\n\n if isDuplicate :\n continue\n\n\n nres = nmol.newResidue (res.type, chimera.MolResId(ncid, res.id.position))\n # print \"New res: %s %d\" % (nres.id.chainId, nres.id.position)\n for at in res.atoms :\n nat = nmol.newAtom (at.name, chimera.Element(at.element.number))\n # todo: handle alt\n aMap[at] = nat\n nres.addAtom( nat )\n nat.setCoord ( xf.apply(at.xformCoord()) )\n nat.altLoc = at.altLoc\n nat.occupancy = at.occupancy\n nat.bfactor = at.bfactor\n if res.isProt or res.isNA :\n nat.display = False\n else :\n nat.display = True\n nat.radius=1.46\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.drawMode = nat.EndCap\n\n nres.isHelix = res.isHelix\n nres.isHet = res.isHet\n nres.isSheet = res.isSheet\n nres.isStrand = res.isStrand\n nres.ribbonDisplay = True\n nres.ribbonDrawMode = 2\n nres.ribbonColor = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 );\n\n for bond in mol.bonds :\n at1, at2 = bond.atoms\n if at1 in aMap and at2 in aMap :\n nb = nmol.newBond ( aMap[at1], aMap[at2] )\n nb.display = nb.Smart\n\n return nmol\n\n\n\n\n\ndef map_overlap_and_correlation (map1, map2, above_threshold):\n\n import FitMap\n olap, cor = FitMap.map_overlap_and_correlation ( v1, v2, above_threshold )[:2]\n return olap, cor\n\n\n\ndef overlap_and_correlation ( v1, v2 ):\n\n import FitMap\n olap, cor = FitMap.overlap_and_correlation ( v1, v2 )[:2]\n return olap, cor\n\n\n\ndef reportFitRegions(map_name, regs):\n\n r = ', '.join(str(reg.rid) for reg in regs[:5])\n if len(regs) > 5:\n r += '...'\n umsg ( \"Fitting %s to %d regions (%s)\" % ( map_name, len(regs), r ) )\n\n\ndef getMod ( name ) :\n\n import chimera\n mlist = chimera.openModels.list ()\n for mol in mlist :\n if mol.name == name :\n return mol\n return None\n\n\n\n\ndef ShapeMatchScore ( atoms, dmap, bPrint=False ) :\n\n #fmol = fmap.mol\n #print \"atoms from\", fmol.name\n #points = get_atom_coordinates ( fmol.atoms, transformed = True )\n\n print \"shape match of %d atoms with map %s\" % (len(atoms), dmap.name)\n points = get_atom_coordinates ( atoms, transformed = True )\n transform_vertices ( points, xform_matrix ( dmap.openState.xform.inverse() ) )\n points0 = points.copy()\n transform_vertices ( points, dmap.data.xyz_to_ijk_transform )\n #print \"points in %s ref:\" % dmap.name, fpoints\n\n bound = int ( numpy.ceil (3.0 * max(dmap.data.step)) ) + 2\n print \" - bound:\", bound\n lo = numpy.floor ( numpy.min ( points, axis=0 ) ) - (bound, bound, bound)\n hi = numpy.ceil ( numpy.max ( points, axis=0 ) ) + (bound, bound, bound)\n print \" - min:\", lo\n print \" - max:\", hi\n\n\n O = list ( dmap.data.origin )\n n = list ( dmap.data.size )\n s = dmap.data.step\n print \" - dmap size:\", n\n print \" - dmap O:\", O\n\n for i in (0,1,2) :\n if lo[i] < 0 :\n n[i] -= lo[i]\n O[i] += lo[i]*s[i]\n\n for i in (0,1,2) :\n if hi[i] > n[i] :\n n[i] = hi[i]\n\n print \" - dmap size:\", n\n print \" - dmap O:\", O\n\n nmat = numpy.ones ( (n[2], n[1], n[0]) )\n eps = 0.5 * numpy.sqrt ( (s[0] * s[0]) + (s[1] * s[1]) + (s[2] * s[2]) )\n ndata = VolumeData.Array_Grid_Data ( nmat, O, s, dmap.data.cell_angles )\n amap_data = VolumeData.zone_masked_grid_data ( ndata, points0, max(3.0, eps) )\n amat = amap_data.full_matrix()\n if 0 :\n amap = VolumeViewer.volume_from_grid_data ( amap_data )\n amap.name = dmap.name + \"_()_\" + atoms[0].molecule.name\n amap.openState.xform = dmap.openState.xform\n\n npoints = grid_indices ( (int(n[0]), int(n[1]), int(n[2])), numpy.single) # i,j,k indices\n transform_vertices ( npoints, ndata.ijk_to_xyz_transform )\n dvals = dmap.interpolated_values ( npoints, dmap.openState.xform )\n #dvals = numpy.where ( dvals > threshold, dvals, numpy.zeros_like(dvals) )\n #nze = numpy.nonzero ( dvals )\n\n nmat = dvals.reshape( (n[2], n[1], n[0]) )\n nmatm = numpy.where ( nmat > dmap.surface_levels[0], numpy.ones_like(nmat), numpy.zeros_like(nmat) )\n #df_mat = df_mat * f_mask\n\n if 0 :\n ndata = VolumeData.Array_Grid_Data ( nmatm, O, s, dmap.data.cell_angles )\n nmap = VolumeViewer.volume_from_grid_data ( ndata )\n nmap.name = dmap.name + \"_(2)\"\n nmap.openState.xform = dmap.openState.xform\n\n nmatm = nmatm.astype ( numpy.int )\n amat = amat.astype ( numpy.int )\n imat = nmatm & amat\n umat = nmatm | amat\n\n if 0 :\n ndata = VolumeData.Array_Grid_Data ( umat, O, s, dmap.data.cell_angles )\n nmap = VolumeViewer.volume_from_grid_data ( ndata )\n nmap.name = dmap.name + \"_(U)_\" + atoms[0].molecule.name\n nmap.openState.xform = dmap.openState.xform\n\n ndata = VolumeData.Array_Grid_Data ( imat, O, s, dmap.data.cell_angles )\n nmap = VolumeViewer.volume_from_grid_data ( ndata )\n nmap.name = dmap.name + \"_(I)_\" + atoms[0].molecule.name\n nmap.openState.xform = dmap.openState.xform\n\n\n nz_int = numpy.shape ( (imat).nonzero () )[1]\n nz_uni = numpy.shape ( (umat).nonzero () )[1]\n\n sm_score = float(nz_int) / float (nz_uni)\n\n print \" - intersection %d, union %d - sm: %.3f\" % (nz_int, nz_uni, sm_score)\n\n return sm_score\n\n\n\ndef makeMap ( sel_str, res, gridSpacing, clr, map_name ) :\n\n cmd = \"molmap %s %.3f sigmaFactor 0.187 gridSpacing %.3f replace false\" % (\n sel_str, res, gridSpacing )\n #print \">>>\", cmd\n chimera.runCommand ( cmd )\n\n mv = None\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n #print \" - found\", mod.name\n mv = mod\n mv.name = map_name\n if 0 :\n #print \" - saving to:\", map_name\n mv.write_file ( map_name, \"mrc\" )\n xf = mv.openState.xform\n #print \" - closing:\", map_name\n chimera.openModels.close ( mv )\n mv = VolumeViewer.open_volume_file ( map_name )[0]\n #print \" - opened:\", mv.name\n mv.openState.xform = xf\n break\n\n if mv == None :\n umsg (\"Map not generated.\")\n return\n\n mv.surface_levels[0] = 0.001\n\n ro = VolumeViewer.volume.Rendering_Options()\n mv.update_surface ( False, ro )\n for sp in mv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n sp.color = ( clr[0], clr[1], clr[2], clr[3] )\n\n return mv\n\n\ndef cc_by_residue ( fmap, dmap, w ) :\n\n rccs = []\n rmap = None\n rmap_pos = None\n rpoints, rpoint_weights = None, None\n if hasattr ( fmap, \"mols\" ) :\n for mol in fmap.mols :\n for ri, res in enumerate ( mol.residues ) :\n\n try :\n cat = res.atomsMap[\"CA\"][0]\n except :\n continue\n\n xf = None\n if rmap == None :\n rmap = makeMap ( \"#%d:%d@CA\" % (mol.id, res.id.position)\n , 16.0, 1.0, (.5, .5, .5, 1.0), \"resmap\" )\n rmap_pos = cat.coord()\n rpoints, rpoint_weights = fit_points(rmap)\n xf = rmap.openState.xform\n\n else :\n #new_rmap_pos = cat.coord()\n d = cat.coord() - rmap_pos\n xf = rmap.openState.xform\n xf.multiply ( chimera.Xform.translation ( d ) )\n\n rmap_values = dmap.interpolated_values ( rpoints, xf )\n olap, corr = overlap_and_correlation ( rpoint_weights, rmap_values )\n #print \" - overlap: %f, cross-correlation: %f\" % (olap, corr)\n #chimera.openModels.close ( rmap )\n rccs.append ( corr )\n #print corr,\n\n fp = open ( \"ff_prcc_w%d.txt\" % w, \"a\" )\n fp.write ( \"%s\" % fmap.mols[0].name )\n for i, cc in enumerate ( rccs ) :\n if w == 1 :\n fp.write ( \"\\t%f\" % cc )\n else :\n wscores = rccs [ max(i-w, 0) : min(i+w,len(rccs)) ]\n wscore = float ( sum ( wscores ) ) / float( len(wscores) )\n fp.write ( \"\\t%f\" % wscore )\n fp.write ( \"\\n\" )\n fp.close ()\n if rmap : chimera.openModels.close ( rmap )\n\n\n\n\ndef RandColorChains ( m ) :\n\n ct = {}\n for r in m.residues: ct[r.id.chainId] = 1\n clist = ct.keys()\n clist.sort()\n chains_clrs = {}\n cnames = \"\"\n\n for ci, cid in enumerate ( clist ) :\n clr = ( rand()*.7, rand()*.7, rand()*.7 )\n #print \"- %s: clr(%.2f, %.2f, %.2f)\" % (cid, clr[0], clr[1], clr[2])\n chains_clrs[cid] = chimera.MaterialColor ( clr[0], clr[1], clr[2], 1.0 )\n cnames = cnames + cid\n\n print \"%s - color ribbon for %d chains -\" % ( m.name, len(cnames) ), cnames\n\n # color atoms\n for r in m.residues :\n clr = chains_clrs[r.id.chainId]\n r.ribbonDrawMode = 2\n r.ribbonColor = clr\n r.ribbonDisplay = True\n for at in r.atoms :\n at.display = False\n at.color = clr\n\n return chains_clrs\n\n\n\ndef MapStats ( dmap, aboveZero = True ) :\n\n print \"map: %s\" % (dmap.name)\n\n MapDataStats ( dmap.data )\n\n\ndef MapDataStats ( data, aboveZero = True ) :\n\n mat = data.full_matrix ()\n\n if aboveZero :\n mat = numpy.where ( mat >= 0.0, mat, numpy.zeros_like(mat) )\n\n weights = mat.ravel()\n #ge = numpy.greater_equal(weights, 0.0)\n #weights = numpy.compress(ge, weights)\n #nz = numpy.nonzero( weights )[0]\n\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n #print \" -\", len(nz), \" nonzero\"\n print \" - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n\n\ndef NormalizeMap ( dmap ) :\n\n print \"Normalizing map: %s\" % (dmap.name)\n\n ndata = NormalizeData ( dmap.data )\n\n try : nv = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nv = VolumeViewer.volume.volume_from_grid_data ( ndata )\n\n nv.name = os.path.splitext(dmap.name)[0] + \"_norm.mrc\"\n nv.openState.xform = dmap.openState.xform\n\n return nv\n\n\n\ndef NormalizeData ( data ) :\n\n O = data.origin\n mat = data.full_matrix ()\n\n mat = numpy.where ( mat >= 0.0, mat, numpy.zeros_like(mat) )\n\n weights = mat.ravel()\n #ge = numpy.greater_equal(weights, 0.0)\n #weights = numpy.compress(ge, weights)\n #nz = numpy.nonzero( weights )[0]\n\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n print \" - initial - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n #mat0 = mat0 - savg\n mat0 = mat / sdev\n #mat0 = mat / smax\n\n weights = mat0.ravel()\n #ge = numpy.greater_equal(weights, 0.0)\n #weights = numpy.compress(ge, weights)\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n print \" - normalized - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n return VolumeData.Array_Grid_Data ( mat0, O, data.step, data.cell_angles )\n\n\ndef NormalizeMat ( mat ) :\n\n #mat = numpy.where ( mat >= 0.0, mat, numpy.zeros_like(mat) )\n\n weights = mat.ravel()\n #ge = numpy.greater_equal(weights, 0.0)\n #weights = numpy.compress(ge, weights)\n #nz = numpy.nonzero( weights )[0]\n\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n print \" - initial - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n #mat0 = mat0 - savg\n mat0 = mat / sdev\n #mat0 = mat / smax\n\n weights = mat0.ravel()\n #ge = numpy.greater_equal(weights, 0.0)\n #weights = numpy.compress(ge, weights)\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n print \" - normalized - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n return mat0\n\n\n\ndef OneMinusOneMap ( dmap ) :\n\n thr = dmap.surface_levels[0]\n\n umsg (\"Generating 1/-1 map for \" + dmap.name + \" thr: %.3f\" % thr)\n\n m2 = None\n\n\n if 0 :\n m1 = dmap.data.full_matrix()\n m2 = numpy.where ( m1 > thr, numpy.ones_like(m1)*1, numpy.ones_like(m1)*-1.0 )\n else :\n\n m0 = dmap.data.full_matrix()\n inside_start = numpy.where ( m0 > thr, numpy.ones_like(m0)*1, numpy.zeros_like(m0) )\n outside_mask = numpy.where ( m0 < thr, numpy.ones_like(m0)*1, numpy.zeros_like(m0) )\n\n gvm = inside_start.copy();\n for i in range (numit) :\n nv_1 = numpy.roll(gvm, 1, axis=0)\n nv_2 = numpy.roll(gvm, -1, axis=0)\n nv_3 = numpy.roll(gvm, 1, axis=1)\n nv_4 = numpy.roll(gvm, -1, axis=1)\n nv_5 = numpy.roll(gvm, 1, axis=2)\n nv_6 = numpy.roll(gvm, -1, axis=2)\n gvm = 1.0/6.0 * ( nv_1 + nv_2 + nv_3 + nv_4 + nv_5 + nv_6 )\n gvm = outside_mask * gvm + inside_start\n\n\n from VolumeData import Array_Grid_Data\n mgrid = Array_Grid_Data ( m2, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, name=\"map_one_minus_one\")\n\n import VolumeViewer\n #nv = VolumeViewer.volume_from_grid_data ( mgrid, show_data = False, show_dialog = False )\n return VolumeViewer.volume_from_grid_data ( mgrid )\n\n\n\ndef MapStats ( dmap, aboveZero = False ) :\n\n print \"Map Stats: %s\" % (dmap.name)\n\n mat = dmap.data.full_matrix ()\n\n if aboveZero :\n mat = numpy.where ( mat >= 0.0, mat, numpy.zeros_like(mat) )\n\n weights = mat.ravel()\n #ge = numpy.greater_equal(weights, 0.0)\n #weights = numpy.compress(ge, weights)\n #nz = numpy.nonzero( weights )[0]\n\n smin = numpy.min (weights)\n sdev = numpy.std (weights)\n savg = numpy.average(weights)\n smax = numpy.max(weights)\n\n #print \" -\", len(nz), \" nonzero\"\n print \" - range: %.3f -> %.3f, avg=%.3f, sdev=%.3f\" % (smin, smax, savg, sdev)\n\n return savg, sdev\n\n\n\ndef AddNoiseToMap ( mv, mean, stdev ) :\n\n print \"\\n---adding noise mean:\",mean, \" stdev:\", stdev, \"---\\n\"\n\n nvm = mv.full_matrix()\n #f_mask = numpy.where ( nvm > 0, numpy.zeros_like(nvm), numpy.ones_like(nvm) )\n\n from numpy.random import standard_normal as srand\n s=mv.data.size\n\n noisem = srand ( (s[2],s[1],s[0]) ) * stdev - (numpy.ones_like(nvm) * mean)\n ngvm = noisem + nvm\n\n ndata = VolumeData.Array_Grid_Data ( ngvm, mv.data.origin, mv.data.step, mv.data.cell_angles )\n try : nvg = VolumeViewer.volume.add_data_set ( ndata, None )\n except : nvg = VolumeViewer.volume.volume_from_grid_data ( ndata )\n nvg.name = mv.name\n\n chimera.openModels.close ( [mv] )\n return nvg\n", "id": "5402148", "language": "Python", "matching_score": 11.498190879821777, "max_stars_count": 6, "path": "Segger/fit_dialog.py" }, { "content": "from Segger import dev_menus, timing\n\nclass Fit_Devel:\n\n def add_devel_menus(self, fmenu):\n \n if dev_menus:\n fmenu.add_separator()\n for lbl, var, val in (\n #(\"By principal axes\", self.rotaSearch, 0),\n #(\"By rotation\", self.rotaSearch, 1),\n ):\n fmenu.add_radiobutton(label = lbl, variable = var, value = val)\n for lbl, var in ((\"Chains = models\", self.UseAllMods),\n ):\n fmenu.add_checkbutton(label = lbl, variable = var)\n for lbl, cmd in (\n (\"Export fit scores\", self.ExportFitScores),\n #(\"Replicate with Bio-Matrices\", self.StrucBioMT),\n (\"Sim chain maps\", self.SimChainMaps),\n (\"Make chain maps\", self.StrucChainMaps),\n (\" - show chain maps\", self.StrucShowChainMaps),\n (\" - hide chain maps\", self.StrucHideChainMaps),\n (\" - close chain maps\", self.StrucCloseChainMaps),\n (\" - overlap regions\", self.StrucChMapsOvRegs),\n (\" - close 1 chain map\", self.StrucCloseChainMap),\n #(\" - delete chain maps\", self.StrucDelChainMaps),\n #(\" - show chain/regions\", self.ShowChRegs),\n (\"Segmentation accuracy\", self.SegAccuracy),\n (\"Fit RMSD\", self.FitRMSD),\n #(\"Sel Chains\", self.GetSelectedChains),\n (\"Align to selected chain\", self.AlignToSel),\n (\"Extract proteins\", self.ExtractProteins),\n #(\"Shape match with selected regions\", self.SelRegsShapeScore),\n #(\"Adjust threshold for best match\", self.SelRegsOptimizeShapeScore),\n #(\"Best shape score\", self.StrucBestShapeScore),\n (\"Group regions\", self.StrucGroupRegions),\n (\"Next group\", self.NextRGroup),\n #(\"Find group w/selected\", self.FindGroupFromSelRegs),\n #(\"Get fits\", self.GetFits),\n #(\"Next fit\", self.NextFit),\n #(\"Best fit around selected\", self.FitMapToRSelGroups),\n #(\"Fit ALL around selected\", self.FitMapsToRegionsAroundSel),\n #(\"Fit ALL structures\", self.FitMapsToRGroups),\n #(\"Displayed volume\", self.StrucMapVolume),\n (\"Fit to map (local)\", self.FitSMapToDMap),\n (\"Zero density map\", self.ZeroDMap_with_FMap),\n (\"Extract density\", self.TakeDMap_with_FMap),\n (\"Masked map\", self.MaskedMap),\n (\"Next bio Matrix\", self.NextBioMt),\n (\"All bio matrices\", self.GoBioMt),\n ):\n fmenu.add_command(label = lbl, command = cmd)\n\n \n\n def ExportFitScores ( self ) :\n\n num = self.fit_listbox.size()\n if num == 0 :\n umsg ( \"No fits to export\" )\n return\n \n ccs = []\n for i in range ( num ) :\n le = self.fit_listbox.get ( i )\n toks = le.split ( \" \" )\n cc = None\n for t in toks :\n try :\n cc = float ( t )\n #print cc,\n break\n except :\n #print \"[\" + t + \"]\",\n pass\n #print \"\"\n ccs.append ( cc )\n\n def save ( okay, dialog, ccs = ccs ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n path = paths[0]\n f = open ( path, \"a\" )\n for cc in ccs :\n f.write ( \"%f\\t\" % cc )\n f.write ( \"\\n\" )\n f.close ()\n umsg ( \"Wrote %d fits to %s\" % ( len(ccs), path ) )\n\n\n idir = None\n ifile = None\n\n mol = self.list_fits[0][0].mols[0]\n mmap = self.list_fits[0][1]\n \n if hasattr(mol, 'openedAs'):\n import os.path\n idir, ifile = os.path.split(mol.openedAs[0])\n base, suf = os.path.splitext(ifile)\n map_base, map_suf = os.path.splitext( mmap.name )\n ifile = base + \"_fits_in_%s\" % map_base\n \n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Fit Scores',\n filters = [('TXT', '*.txt', '.txt')],\n initialdir = idir, initialfile = ifile, command = save ) \n\n def SimChainMaps ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n map_name = os.path.splitext ( dmap.name ) [0]\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n res, grid = None, None\n\n self.SetResolution()\n\n try : res = float ( self.simRes.get() )\n except : print \"Invalid number entered for resolution:\", self.simRes.get(); return\n\n try : grid = float ( self.simGridSp.get() )\n except : grid = res / 3.0\n\n mol = getMod ( self.struc.get() )\n if mol == None : print \"Structure\", self.struc.get(), \"not found\"; return\n\n try : mol.chain_colors\n except : mol.chain_colors = RandColorChains ( mol )\n \n print \"Simulating %d chain maps for %s, res %.3f, grid %.3f\" % (\n len(mol.chain_colors.keys()), mol.name, res, grid)\n\n\n dmap.fitted_mols = []\n\n for cid, clr in mol.chain_colors.iteritems() :\n\n cname = map_name + \"_\" + cid + \".mrc\"\n\n mv = getMod ( cname )\n\n if mv == None :\n\n sel_str = \"#%d:.%s\" % (mol.id, cid)\n print \"%s [%s]\" % (cname, sel_str),\n\n cmd = \"molmap %s %f sigmaFactor 0.187 gridSpacing %f replace false\" % ( sel_str, res, grid )\n print \" -\", cmd\n chimera.runCommand ( cmd )\n\n for mod in chimera.openModels.list() :\n ts = mod.name.split()\n if len(ts) > 1 and mod.name.find(\"map\") >=0 and mod.name.find(\"res\") >=0 :\n print \" - saving to:\", path + cname\n mod.write_file ( path + cname, \"mrc\" )\n chimera.openModels.close ( mod )\n break\n\n mv = VolumeViewer.open_volume_file ( path + cname )[0]\n print \" - loaded:\", mv.name\n\n class FakeMolecule:\n def __init__(self, fmap):\n self.fmap = fmap\n\n fmol = FakeMolecule ( mv )\n \n dmap.fitted_mols.append ( fmol )\n\n #gv.imap = imap\n #gv.name = cname\n #gv.chain_id = cid\n #dmap.chain_maps.append ( gv )\n\n \n\n\n def StrucChainMaps ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n mols = []\n if self.UseAllMods.get() :\n for mod in chimera.openModels.list() :\n if type(mod) == chimera.Molecule :\n mols.append ( mod )\n\n else :\n for m in self.StructuresToFit():\n mols.append ( m )\n m.chain_colors = RandColorChains ( m )\n\n if len(mols) == 0 :\n print \"No structures\"; return\n\n self.MakeChainMaps ( mols, dmap )\n print \"- %d chain or unit maps\" % len ( dmap.chain_maps )\n\n\n\n def StrucShowChainMaps ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n print \"%s - showing %d chain maps\" % (dmap.name, len(dmap.chain_maps))\n for chm in dmap.chain_maps :\n chm.display = True\n\n\n def StrucHideChainMaps ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n print \"%s - hiding %d chain maps\" % (dmap.name, len(dmap.chain_maps))\n for chm in dmap.chain_maps :\n chm.display = False\n\n\n def StrucCloseChainMaps ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n \n print \"%s - closing %d chain maps\" % (dmap.name, len(dmap.chain_maps))\n\n for chm in dmap.chain_maps :\n chm.close()\n\n dmap.chain_maps = []\n\n\n\n def StrucDelChainMaps ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n print \"%s - deleting %d chain maps\" % (dmap.name, len(dmap.chain_maps))\n for chm in dmap.chain_maps :\n try : os.remove ( path + chm.name )\n except : print \" - could not delete\", chm.name\n chm.close() \n\n dmap.chain_maps = []\n\n\n def StrucChMapsOvRegs ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n print \"%s - %d chain maps\" % (dmap.name, len(dmap.chain_maps))\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n if len(smod.regions) == 0 : print \" - no regions!\"; return\n\n\n for mn in [\"2AW4.pdb\", \"2AVY.pdb\"] :\n m = getMod (mn)\n if m :\n for res in m.residues :\n res.ribbonDisplay = False\n\n\n for chm in dmap.chain_maps :\n try : chm.display = False\n except : continue\n\n\n for chm in dmap.chain_maps :\n\n try : chm.display = False\n except : continue\n\n if chm.chain_id == \"A\" : continue\n if chm.chain_id == \"B\" and chm.mol.name == \"2AW4.pdb\" : continue\n\n chm.display = True\n print \"%s - %s\" % (chm.name, chm.chain_id)\n\n oregs = self.ShowOverlappingRegions ()\n chimera.viewer.viewAll ()\n\n\n chm.display = False\n\n if len(oregs) == 0 :\n print \" - NO overlapping regions\"\n continue\n\n\n ress = chimera.selection.OSLSelection( \"#%d:.%s\" % (chm.mol.id, chm.chain_id) ).residues()\n for res in ress : res.ribbonDisplay = True\n\n mname = os.path.splitext ( chm.mol.name )[0]\n fname = mname + \"_%s.pdb\" % chm.chain_id\n fmol = getMod ( fname )\n if fmol == None :\n fmol = chimera.openModels.open ( path + fname )[0]\n fmol.ch_colors = RandColorChains ( fmol )\n\n self.struc.set ( fname )\n self.StrucCenter ()\n self.simRes.set ( \"14\" )\n self.simGridSp.set ( \"2\" )\n self.GenStrucMap ()\n sim_dmap = self.MoleculeMap()\n sim_dmap.chmap = chm\n\n self.rotaSearch.set ( 0 )\n self.FitMapToSelRGroup ()\n rmsd = RMSD ( ress, fmol )\n print \"\\nRMSD: %f \" % rmsd,\n tag = chm.chain_id\n\n msk_dmap = None\n \n if rmsd > 15.0 :\n msk_dmap = self.MaskMapWRegions ()\n self.FitMapToSelRGroup ()\n rmsd = RMSD ( ress, fmol )\n print \"\\nRMSD: %f \" % rmsd,\n tag = chm.chain_id + \"m\"\n\n if rmsd > 15.0 :\n self.rotaSearch.set ( 1 )\n self.FitMapToSelRGroup ()\n rmsd = RMSD ( ress, fmol )\n print \"\\nRMSD: %f \" % rmsd,\n tag = chm.chain_id + \"r\"\n\n if rmsd > 15.0 :\n self.rotaSearch.set ( 1 )\n self.FitMapToSelRGroup ( msk_dmap )\n rmsd = RMSD ( ress, fmol )\n print \"\\nRMSD: %f \" % rmsd,\n tag = chm.chain_id + \"rm\"\n\n\n if rmsd < 15.0 :\n\n print \"_________ YES _______\\n\"\n\n self.SelRegsOptimizeShapeScore ()\n oregs = self.ShowOverlappingRegions ()\n self.SelRegsOptimizeShapeScore ()\n oregs = self.ShowOverlappingRegions ()\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n log_file = path + \"ribo_fits_sms_corr.txt\"\n print \"SMS log to\", log_file\n fpl = open ( log_file, 'a' )\n fpl.write ( \"%s %f \" % (tag, sim_dmap.sms) )\n fpl.close ()\n\n if len(oregs) == 1 :\n oregs[0].placed = True\n if len(oregs) >= 2 :\n nreg = smod.join_regions ( oregs )\n\n else :\n\n print \"_________ NO _______\\n\"\n\n\n #sim_dmap.close()\n sim_dmap.display = False\n\n if msk_dmap : msk_dmap.close()\n\n self.StrucCloseChainMap()\n\n #return\n \n\n def StrucCloseChainMap ( self ) :\n\n chm = self.MoleculeMap(create = False)\n if chm :\n print \" --- closing \", chm.mol.name\n chimera.openModels.close ( [chm.mol] )\n print \" --- closing \", chm.chmap.name\n chm.chmap.close()\n print \" --- closing \", chm.name\n chm.close()\n\n\n\n def GoBioMt ( self ) :\n\n fmap = self.MoleculeMap()\n if fmap == None : return False\n try : mol = fmap.mol\n except : \"no mol\"; return False\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n smod.bioM_groups = {}\n\n mol.bio_mt_at = 0\n\n for r in smod.surfacePieces :\n r.max_ov = 0.0\n r.max_ov_cid = None\n r.max_ov_bioM = None\n\n smod.p_ctrs = [] \n\n while self.NextBioMt () :\n\n self.FitSMapToDMap ()\n\n smod.bio_mt_at = mol.bio_mt_at\n smod.bioM_groups[mol.bio_mt_at] = self.ShowChRegs ()\n\n #self.ZeroDMap_with_FMap ()\n\n\n if 1 :\n atoms = chimera.selection.OSLSelection( \"#%d:.%s\" % (fmap.mol.id, 'P') ).atoms()\n points = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n print \" - %d points\" % len(points)\n com = numpy.sum(points, axis=0) / len(points)\n comv = numpy.array ( [ [ com[0], com[1], com[2] ] ], numpy.float32 )\n _contour.affine_transform_vertices( comv, Matrix.xform_matrix(fmap.openState.xform) )\n _contour.affine_transform_vertices( comv, Matrix.xform_matrix(smod.openState.xform.inverse()) )\n\n print com[0], com[1], com[2]\n print comv[0][0], comv[0][1], comv[0][2]\n\n C = chimera.Vector ( comv[0][0], comv[0][1], comv[0][2] )\n smod.p_ctrs.append ( C )\n\n #return\n\n\n def StrucBioMT ( self ) :\n\n if len(self.struc.get()) == 0 :\n umsg ( \"Please select a structure first\" )\n return\n mol = getMod ( self.struc.get() )\n if mol == None :\n umsg ( \"%s not open\" % self.struc.get() )\n return\n\n\n mats = BioMatrices ( mol )\n\n umsg ( \"%s - %d matrices\" % ( mol.name, len(mats) ) )\n\n\n\n\n def NextBioMt ( self ) :\n\n fmap = self.MoleculeMap()\n if fmap == None : return False\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n try : mol = fmap.mol\n except : \"no mol\"; return False\n\n try : mol.bio_mts\n except : mol.bio_mts = BioMatrices ( mol )\n\n print \" - %d bio matrices\" % len(mol.bio_mts)\n\n try : mol.bio_mt_at = mol.bio_mt_at + 1\n except : mol.bio_mt_at = 1\n\n print \"_______________ at matrix %d _______________\" % mol.bio_mt_at\n\n try : amat = mol.bio_mts[ mol.bio_mt_at ]\n except : print \" - no such matrix\"; return False\n\n fmap.M = fmap.M0 * am_2_M(amat)\n\n # orthogonalize M\n T = fmap.M; xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3], True )\n mt, mr = xf_2_M ( xf )\n fmap.M = mt * mr\n\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * fmap.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3], True )\n fmap.openState.xform = xfA\n fmap.mol.openState.xform = xfA\n\n try : mol.chain_maps\n except : mol.chain_maps = []\n for chm in mol.chain_maps : chm.openState.xform = xfA\n\n return True\n\n\n\n def ShowChRegs ( self ) :\n\n mol = getMod ( self.struc.get() )\n if mol == None : print self.struc.get(), \"not open\"; return\n print \"Structure:\", mol.name\n\n try : chain_colors = mol.chain_colors\n except : chain_colors = RandColorChains ( mol )\n mol.chain_colors = chain_colors\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n try : mol.chain_maps\n except : self.MakeChainMaps ( mol, dmap )\n print \"- %d chain maps\" % len ( mol.chain_maps )\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n print \"- %d regions\" % len ( smod.surfacePieces )\n\n\n # for sp in smod.surfacePieces : sp.display = False\n\n rgroups = {}\n rgroups['all'] = []\n ov_regs = []\n\n for chmap in mol.chain_maps :\n\n # if chmap.chain_id == 'A' or chmap.chain_id == 'B' : continue\n # chmap.display = True\n\n regs = self.OverlappingRegions ( dmap, chmap, smod )\n\n ov_regs = ov_regs + regs\n\n if len(regs) == 0 :\n print \" - no regions found\";\n return\n\n # rgroups[chmap.chain_id] = regs\n\n #sms = self.ShapeMatchScore ( chmap, dmap, regs )\n #print \" - chain %s, map overlaps %d regions - %.4f\" % ( chmap.chain_id, len(regs), sms )\n\n for r in regs :\n #r.display = True\n #r.color = chmap.surf_color\n rgroups['all'].append ( r )\n\n # break\n\n if 0 :\n # for overlapping regs, which chain do they overlap the most\n for r in ov_regs :\n max_ov = 0.0\n max_ov_chm = None\n for chm in mol.chain_maps :\n imap = self.MapIndexesInMap ( dmap, chm ) \n ipoints = r.points()\n noverlap = 0\n for i,j,k in ipoints :\n if (i,j,k) in imap:\n noverlap += 1\n\n ov = float(noverlap) / r.point_count()\n if ov > max_ov :\n max_ov = ov\n max_ov_chm = chm\n\n try : rgroups[max_ov_chm.chain_id]\n except : rgroups[max_ov_chm.chain_id] = []\n rgroups[max_ov_chm.chain_id].append ( r )\n\n return rgroups\n\n\n def ExtractProteins ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n if len(self.struc.get()) == 0 : umsg (\"Please select a structure first\"); return\n fmol = getMod ( self.struc.get() )\n if fmol == None : umsg ( \"%s not open\" % self.struc.get() ); return\n\n try : fmol.ch_colors\n except : fmol.ch_colors = RandColorChains ( fmol )\n\n mname = os.path.splitext ( fmol.name )[0]\n\n print \"%s - %d chains\" % (fmol.name, len(fmol.ch_colors.keys()))\n\n for cid, clr in fmol.ch_colors.iteritems () :\n\n print cid,\n cmol = copyMolChain ( None, fmol, cid, cid, None, clr.rgba() )\n cmol.name = \"%s_%s.pdb\" % (mname, cid)\n\n print \" - %d atoms\" % len(cmol.atoms)\n\n if len(cmol.atoms) > 0 :\n print \" - writing\", path + cmol.name\n chimera.PDBio().writePDBfile ( [cmol], path + cmol.name )\n\n\n print \"\"\n\n def ZeroDMap_with_FMap ( self ) :\n \n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = self.MoleculeMap()\n if fmap == None : print 'Choose a molecule'; return\n\n print \"Taking %s away from %s\" % ( fmap.name, dmap.name )\n rname = dmap.name + '_-_' + fmap.name\n\n mmc = getMod ( rname )\n\n if mmc == None :\n mmc = dmap.writable_copy ()\n mmc.name = rname\n print \" - cloned\", dmap.name\n else :\n print \" - found\", mmc.name\n\n self.ZeroOverlappingRegion ( mmc, fmap )\n\n\n\n\n\n def ZeroOverlappingRegion ( self, ref_map, mask_map ) :\n\n thr = mask_map.surface_levels[0]\n mm = mask_map.data.matrix()\n mm = numpy.where ( mm > thr, mm, numpy.zeros_like(mm) )\n\n nze = numpy.nonzero ( mm )\n nzs = numpy.array ( [nze[2], nze[1], nze[0]] )\n\n # the copy is needed! otherwise the _contour.afine_transform does not work for some reason\n points = numpy.transpose ( nzs ).astype(numpy.float32)\n\n print \" - %s - %d points above %.3f\" % ( mask_map.name, len(points), thr )\n\n # transform to index reference frame of ref_map\n f1 = mask_map.data.ijk_to_xyz_transform\n f2 = Matrix.xform_matrix ( mask_map.openState.xform )\n f3 = Matrix.xform_matrix ( ref_map.openState.xform.inverse() )\n f4 = ref_map.data.xyz_to_ijk_transform\n\n tf = Matrix.multiply_matrices( f2, f1 )\n tf = Matrix.multiply_matrices( f3, tf )\n tf = Matrix.multiply_matrices( f4, tf )\n\n _contour.affine_transform_vertices ( points, tf )\n\n\n mm = ref_map.data.matrix()\n n1, n2, n3 = ref_map.data.size[0], ref_map.data.size[1], ref_map.data.size[2]\n\n for fi, fj, fk in points :\n\n for i in [ int(numpy.floor(fi)), int(numpy.ceil(fi)) ] :\n for j in [ int(numpy.floor(fj)), int(numpy.ceil(fj)) ] :\n for k in [ int(numpy.floor(fk)), int(numpy.ceil(fk)) ] :\n if i < 0 or i >= n3 : continue\n if j < 0 or j >= n2 : continue\n if k < 0 or k >= n1 : continue\n mm[k,j,i] = 0.0\n\n\n\n def TakeDMap_with_FMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = self.MoleculeMap()\n if fmap == None : print 'Choose a molecule'; return\n\n print \"Taking densities from %s with %s\" % ( dmap.name, fmap.name )\n rname = fmap.name + '_-_' + dmap.name\n\n #mmc = fmap.writable_copy ( require_copy = True )\n #mmc.name = rname\n #print \" - cloned\", fmap.name\n\n\n n1, n2, n3 = fmap.data.size[0], fmap.data.size[1], fmap.data.size[2]\n f_points = VolumeData.grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices\n _contour.affine_transform_vertices( f_points, fmap.data.ijk_to_xyz_transform )\n\n d_vals = dmap.interpolated_values ( f_points, fmap.openState.xform )\n df_mat = d_vals.reshape( (n3,n2,n1) )\n\n f_mat = fmap.data.full_matrix()\n f_mask = numpy.where ( f_mat > fmap.surface_levels[0], numpy.ones_like(f_mat), numpy.zeros_like(f_mat) )\n\n df_mat = df_mat * f_mask\n\n df_data = VolumeData.Array_Grid_Data ( df_mat, fmap.data.origin, fmap.data.step, fmap.data.cell_angles )\n \n try : df_v = VolumeViewer.volume.add_data_set ( df_data, None )\n except : df_v = VolumeViewer.volume.volume_from_grid_data ( df_data )\n\n df_v.name = rname\n df_v.openState.xform = fmap.openState.xform\n\n\n\n def NextFit ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n self.fit_at = self.fit_at + 1\n sms, o = self.fits[self.fit_at]\n\n print \"Fit %d, %s, corr %.3f, sms %.3f, regions\" % (self.fit_at, o.mname, o.cor, o.sms),\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for r in smod.surfacePieces : r.display = False\n for r in o.regs : r.display = True; print r.rid,\n\n print \"\"\n\n fmol = getMod ( o.mname )\n if fmol == None : print \"Fitted mol %s not open\" % o.mname; return\n\n fmap = None\n for m in chimera.openModels.list() :\n # TODO: Don't use \"centered\" to decide what maps to fit.\n try : m.mols[0].centered\n except : continue\n if m.mols[0].name == o.mname :\n fmap = m\n m.display, m.mols[0].display = True, True\n else :\n m.display, m.mols[0].display = False, False\n\n self.struc.set ( fmap.mols[0].name )\n\n if fmap == None :\n print \"Fitted map corresponding to %s not open\" % o.mname; return\n\n print \"Found fitted map\", fmap.name \n\n fmap.M = o.M\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * o.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n\n def BestFits0 ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n print \" - path:\", path\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n for r in smod.surfacePieces : r.display = True\n\n self.GetFits ()\n regs_claimed = {}\n dmap.fitted_mols = []\n\n for i, sms_o in enumerate ( self.fits ) :\n\n sms, o = sms_o\n\n print \"%d - %s - corr %.3f, sm_corr %.3f, regions, dv %.3f\" % (\n i+1, o.mname, o.cor, o.sms, o.dv),\n\n bRegionsClaimed = False\n for r in o.regs :\n print r.rid,\n if regs_claimed.has_key ( r.rid ) :\n bRegionsClaimed = True\n print \"*\",\n else :\n r.display = False\n regs_claimed[r.rid] = True\n\n print \"\"\n\n if bRegionsClaimed :\n print \" - one or more regions claimed\"\n break\n\n nsp = smod.join_regions ( o.regs )\n self.nRegions.set ( len(smod.surfacePieces) )\n\n fmol = getMod ( o.mname )\n if fmol == None : print \" - fitted mol %s not open\" % o.mname; return\n\n fmap = fitMap ( o.mname )\n if fmap == None : print \" - fitted map not open\"\n\n fmap.M = o.M\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * o.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n if 0 :\n self.saFitMapToPoints ( fmap, nsp.tpoints, dmap )\n fmap.sms = self.ShapeMatchScore ( fmap.fmol.atoms, dmap, [nsp] )\n\n self.add_fit ( fmap, dmap )\n\n\n\n\n def AlignToSel ( self ) :\n\n if len(self.struc.get()) == 0 : print \"Please select a structure first\"; return\n m = getMod ( self.struc.get() )\n if m == None : print self.struc.get(), \"not open\"; return\n\n m_cid = None\n to_cid = None\n m_to = None\n\n sela = chimera.selection.currentContents()[0]\n for sel_at in sela :\n try : cid = sel_at.residue.id.chainId\n except : pass\n\n if m_cid == None and sel_at.molecule == m :\n m_cid = cid\n print \"Found selected atom in chain %s in %s\" % (cid, m.name)\n elif m_to == None and sel_at.molecule != m :\n m_to = sel_at.molecule\n to_cid = cid\n print \"Found selected atom in chain %s in %s\" % (cid, m_to.name)\n\n if m_cid == None or m_to == None or to_cid == None :\n print \"Please select at least two atoms in the corresponding structures to align\"\n return\n \n print \"Aligning %s chain %s to %s chain %s\" % (m.name, m_cid, m_to.name, to_cid)\n\n m_seq = m.sequences ( asDict=True ) [m_cid]\n to_seq = m_to.sequences ( asDict=True ) [to_cid]\n\n print \" -- %d residues -- %d residues\" % ( len(m_seq.residues), len(to_seq.residues) )\n\n m_ats, to_ats = AlignChains ( m_seq, to_seq )\n xf, rmsd = chimera.match.matchAtoms ( to_ats, m_ats )\n\n m_points = _multiscale.get_atom_coordinates ( m_ats, transformed = True )\n f_points = _multiscale.get_atom_coordinates ( to_ats, transformed = True )\n vss = numpy.square ( numpy.subtract ( m_points, f_points ) )\n sums = numpy.sum ( numpy.sum ( vss, axis=1 ) )\n armsd = numpy.sqrt ( sums / float ( len(m_points) ) )\n\n print \" - %d aligned - RMSD: %.4f, RMSD as placed: %.4f\" % ( len(m_ats), rmsd, armsd )\n\n mxf = m_to.openState.xform\n mxf.multiply ( xf )\n m.openState.xform = mxf\n\n\n fmap = None\n for om in chimera.openModels.list() :\n try : om_mol = om.mol\n except : continue\n if om_mol == m : fmap = om; break\n\n if fmap == None :\n print \" - no map found for struc\"\n return\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n print \"\"\n umsg ( \"Fitting %s and %s into %s\" % ( fmap.name, m.name, dmap.name ) )\n\n fxf = m.openState.xform\n dxf = dmap.openState.xform\n\n mxf = dxf.inverse()\n mxf.multiply ( fxf ) # fxf = dxf * mxf\n\n tXO, tXR = xf_2_M ( mxf )\n fmap.M = tXO * tXR\n\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * fmap.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n fmap.M0 = fmap.M\n\n\n\n def FitRMSD ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n print \"Map: %s\" % dmap.name\n try : print \" - %d fitted molecules\" % len(dmap.fitted_mols)\n except : print \" - no fitted molecules found\"; return\n\n\n if len(self.struc.get()) == 0 : print \"Please select a structure first\"; return\n m = getMod ( self.struc.get() )\n if m == None : print self.struc.get(), \"not open\"; return\n\n # try : m.ch_colors\n # except : m.ch_colors = RandColorChains ( m )\n # print \"%s - %d chains -\" % ( m.name, len(m.ch_colors) ), m.ch_colors.keys()\n\n print \" - aligning %s (%d chains) to %d fit mols\" % (\n m.name, len(m.sequences()), len(dmap.fitted_mols) )\n\n\n mseqs = m.sequences()\n fmol_msi = {}\n m_atoms = []\n f_atoms = []\n\n for msi, m_seq in enumerate ( mseqs ) :\n\n min_fmi = None\n min_rmsd = 1e99\n min_m_atoms = None\n min_f_atoms = None\n\n print \"Chain %s - %d residues\" % ( m_seq.chain, len(m_seq.residues) )\n\n for fmi, fm in enumerate ( dmap.fitted_mols ) :\n\n try : fmol_msi[fmi]; continue\n except : pass\n\n f_seq = fm.sequences()[0]\n print \" - %d/%d %s, %d residues\" % (\n fmi+1, len(dmap.fitted_mols), dmap.fitted_mols[fmi].name, len(f_seq.residues) ),\n\n m_ats, f_ats = AlignChains ( m_seq, f_seq )\n xf, rmsd = chimera.match.matchAtoms ( m_ats, f_ats )\n m_points = _multiscale.get_atom_coordinates ( m_ats, transformed = True )\n f_points = _multiscale.get_atom_coordinates ( f_ats, transformed = True )\n vss = numpy.square ( numpy.subtract ( m_points, f_points ) )\n sums = numpy.sum ( numpy.sum ( vss, axis=1 ) )\n armsd = numpy.sqrt ( sums / float ( len(m_points) ) )\n\n print \" %d aligned - RMSD: %.4f, RMSD as placed: %.4f\" % (\n len(m_ats), rmsd, armsd )\n\n if armsd < min_rmsd :\n min_rmsd = armsd\n min_fmi = fmi\n min_m_atoms = m_ats\n min_f_atoms = f_ats\n\n if min_fmi != None :\n print \" - min is %d - %s\" % (min_fmi, dmap.fitted_mols[min_fmi].name)\n fmol_msi[min_fmi] = msi\n m_atoms = m_atoms + min_m_atoms\n f_atoms = f_atoms + min_f_atoms\n else :\n print \" - no maps left to align!\"\n\n print \"\"\n \n # m.chain_fitmol = {}\n for fmi, msi in fmol_msi.iteritems () :\n print \"Chain %s -- fit mol %s\" % ( mseqs[msi].chain, dmap.fitted_mols[fmi].name )\n # m.chain_fitmol [ mseqs[msi].chain ] = dmap.fitted_mols[fmi]\n\n\n print \"\"\n\n xf, rmsd = chimera.match.matchAtoms ( m_atoms, f_atoms )\n m_points = _multiscale.get_atom_coordinates ( m_atoms, transformed = True )\n f_points = _multiscale.get_atom_coordinates ( f_atoms, transformed = True )\n vss = numpy.square ( numpy.subtract ( m_points, f_points ) )\n sums = numpy.sum ( numpy.sum ( vss, axis=1 ) )\n armsd = numpy.sqrt ( sums / float ( len(m_points) ) )\n\n print \"Total %d residues aligned - RMSD: %.4f, RMSD as placed: %.4f\" % (\n len(m_atoms), rmsd, armsd )\n\n\n\n\n def FitRMSDNoRef ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n print \"Map: %s\" % dmap.name\n try : print \" - %d fitted molecules\" % len(dmap.fitted_mols)\n except : print \" - no fitted molecules found\"; return\n\n\n if len(self.struc.get()) == 0 : print \"Please select a structure first\"; return\n m = getMod ( self.struc.get() )\n if m == None : print self.struc.get(), \"not open\"; return\n\n # try : m.ch_colors\n # except : m.ch_colors = RandColorChains ( m )\n # print \"%s - %d chains -\" % ( m.name, len(m.ch_colors) ), m.ch_colors.keys()\n\n print \" - aligning %s (%d chains) to %d fit mols\" % (\n m.name, len(m.sequences()), len(dmap.fitted_mols) )\n\n\n mseqs = m.sequences()\n cmap = []\n for msi, m_seq in enumerate ( mseqs ) :\n\n min_fid = None\n min_rmsd = 1e99\n min_txf = None\n cmap.append ( '?' )\n\n for fmi, fm in enumerate ( dmap.fitted_mols ) :\n\n bIncluded = False\n for cfmi in cmap :\n if cfmi == fmi : bIncluded = True; break\n if bIncluded : continue\n\n cmap[msi] = fmi\n # print cmap,\n\n mats,fats = [],[]\n for i in range ( len(cmap) ) :\n cid = cmap[i]\n # print cid, fmols[i].name,\n for ca_posi, cat in ch_cas[cid].iteritems() :\n mats.append ( cat.coord() )\n fats.append ( f_ch_cas[i][ca_posi].coord() )\n\n # print \"matching %d to %d ca atoms\" % ( len(fats), len(mats) )\n # txf, rmsd = chimera.match.matchAtoms(fats, mats)\n txf, rmsd = chimera.match.matchPositions(fats, mats)\n # print \"- rmsd:\", rmsd\n if rmsd < min_rmsd :\n min_rmsd = rmsd\n min_cid = chains[i2]\n min_txf = txf\n\n print \"- pos %d is %s, rmsd %f\" % (i1, min_cid, min_rmsd),\n cmap[i1] = min_cid\n print cmap\n # return\n\n for at in m.atoms :\n c = min_txf.apply ( at.coord() )\n at.setCoord ( c )\n\n print cmap\n\n\n\n\n def PlaceBestFits ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n print \" - path:\", path\n\n map_name = os.path.splitext ( dmap.name )[0]\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n self.GetFits ()\n\n dmap.fitted_mols = []\n\n scores = []\n\n for i, sms_o in enumerate ( self.fits ) :\n\n if i >= int ( self.numFits.get() ) : break\n\n sms, o = sms_o\n\n print \"%d/%d - %s - corr %.3f, sm_corr %.3f, dv %.3f\" % (\n i+1, len(self.fits), o.mname, o.cor, o.sms, o.dv)\n\n fmol = getMod ( o.mname )\n if fmol == None : print \" - fitted mol %s not found\" % o.mname; return\n\n fmap = fitMap ( o.mname )\n if fmap == None : print \" - fitted map for %s not found\" % fmol.name; return\n\n fmap.M = o.M\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * o.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n self.SaveFit ( fmap )\n \n\n if 0 :\n oregs = self.OverlappingRegions ( dmap, fmap, smod )\n sms = self.ShapeMatchScore ( fmap.mol.atoms, dmap, oregs )\n print \" ---- sms %.4f --- \" % (sms)\n scores.append ( sms )\n\n # if joinRegs : smod.join_regions ( regs )\n\n\n if 0 :\n log_f = path + map_name + \"_fits_acc.txt\"\n print \"\\nAccuracies log file:\", log_f\n\n try : fp = open ( log_f, \"a\" )\n except : print \" - could not open log file\"; return\n fp.write ( \"%f %f %f \" % ( min(scores), max(scores), sum(scores)/float(len(scores))) )\n for sm_score in scores : fp.write ( \"%f \" % sm_score )\n fp.write ( \"\\n\" )\n fp.close ()\n\n\n def GetFits ( self ) :\n\n smod = self.CurrentSegmentation()\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n dmap_name = os.path.splitext ( dmap.name )[0]\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n log_f = path + \"%s_fits.txt\" % (dmap_name)\n print \"Log file: %s\" % log_f\n\n\n class ClusterEntry :\n def __init__ (self, COM, o) :\n self.COM = chimera.Vector ( COM[0], COM[1], COM[2] )\n self.o = o\n\n class Cluster :\n def __init__ (self) :\n self.entries = []\n self.COM = chimera.Vector (0,0,0)\n\n def AddEntry ( self, new_e ) :\n self.entries.append ( [new_e.o.sms, new_e] )\n\n # compute the new COM\n self.COM = chimera.Vector (0,0,0)\n for o, e in self.entries :\n self.COM = self.COM + e.COM\n self.COM = self.COM / float ( len(self.entries) )\n\n class Fit:\n def __init__ ( self, mname, cor, sms, dv ):\n self.mname = mname\n self.cor = cor\n self.sms = sms\n self.dv = dv\n\n try : fp = open ( log_f, 'r' )\n except :\n print \"Could not open log file:\", log_f\n return\n\n self.clusters = []\n\n li = 0\n while 1 :\n li = li + 1\n line = fp.readline()\n if len(line) == 0 : break\n\n n = line.split()\n\n o = Fit ( n[0], float(n[1]), float(n[2]), float(n[3]) )\n\n # get M\n mi = 4\n M = numpy.matrix ( [ [ float(n[mi+ 0]), float(n[mi+ 1]), float(n[mi+ 2]), float(n[mi+ 3]) ],\n [ float(n[mi+ 4]), float(n[mi+ 5]), float(n[mi+ 6]), float(n[mi+ 7]) ],\n [ float(n[mi+ 8]), float(n[mi+ 9]), float(n[mi+10]), float(n[mi+11]) ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n # orthogonalize M\n T = M ; xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3], True )\n mt, mr = xf_2_M ( xf )\n o.M = mt * mr\n\n COM = chimera.Vector ( M[0,3], M[1,3], M[2,3] )\n #print COM\n\n # get MI\n mi = mi+12\n MI = numpy.matrix ( [ [ float(n[mi+ 0]), float(n[mi+ 1]), float(n[mi+ 2]), float(n[mi+ 3]) ],\n [ float(n[mi+ 4]), float(n[mi+ 5]), float(n[mi+ 6]), float(n[mi+ 7]) ],\n [ float(n[mi+ 8]), float(n[mi+ 9]), float(n[mi+10]), float(n[mi+11]) ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n # orthogonalize MI\n T = MI ; xf = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3], True )\n mt, mr = xf_2_M ( xf )\n MI = mt * mr\n\n mi = mi+12\n\n o.regs = []\n for rmi in range ( mi, len(n) ) :\n ri = int ( n[rmi] )\n o.regs.append(smod.id_to_region[ri])\n # print \"%s - corr %.3f, shape score %.3f, regions:\" % (o.mname, o.cor, o.sms), o.regs\n\n cluster = None\n for cl in self.clusters :\n v = cl.COM - COM\n if v.length < 6.0 :\n cluster = cl\n\n if cluster == None :\n #print \"+++ Creating new cluster: \",\n cluster = Cluster()\n self.clusters.append ( cluster )\n\n cluster.AddEntry ( ClusterEntry (COM, o) )\n #print \"- cluster COM (%f, %f, %f)\" % (cluster.COM[0], cluster.COM[1], cluster.COM[2])\n\n # self.fits.append ( [o.sms, o] )\n\n fp.close()\n\n print \"____________ %d alignments, %d clusters ____________\" % ( li, len(self.clusters) )\n\n self.fits = []\n\n for cl in self.clusters :\n cl.entries.sort()\n cl.entries.reverse()\n sms, cle = cl.entries[0]\n self.fits.append ( [cle.o.cor, cle.o] )\n\n self.fits.sort()\n self.fits.reverse()\n\n log_f = path + \"%s_fits_sorted.txt\" % (dmap_name)\n print \"Writing fits to: %s\" % log_f\n lfp = open ( log_f, \"w\" )\n\n for i, sms_o in enumerate ( self.fits ) :\n sms, o = sms_o\n\n if i < 20 :\n print \"%d - %s - correlation %.3f, dv %.3f\" % (\n i+1, o.mname, o.cor, o.dv),\n #for r in o.regs : print r.rid,\n print \"\"\n\n lfp.write ( \"%d - structure: %s, cross-correlation: %.3f, dVolume: %.3f\\n\" % (i+1, o.mname, o.cor, o.dv) )\n\n print \"\"\n\n lfp.close ()\n\n self.fit_at = -1\n\n\n def StrucMapVolume ( self ) :\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n vol = _surface.enclosed_volume ( *(fmap.surfacePieces[0].geometry) )[0]\n print fmap.name + \" volume: %f\" % vol\n\n print \" - surface levels\",fmap.surface_levels\n thr = fmap.surface_levels[0]\n\n mm = fmap.data.matrix()\n mmab = numpy.where ( mm > thr, numpy.ones_like(mm), numpy.zeros_like(mm) )\n nz = numpy.shape ( numpy.nonzero ( mmab ) )[1]\n vvol = fmap.data.step[0] * fmap.data.step[1] * fmap.data.step[2]\n tvol = vvol * float(nz)\n print \"%s - %d above %f, volume %.3f\" % (fmap.name, nz, thr, tvol)\n\n vvol = fmap.data.step[0] * fmap.data.step[1] * fmap.data.step[2]\n\n tvol = vvol * float(nz)\n print \" - %d above %f, volume %.3f\" % (nz, thr, tvol)\n\n\n\n def MaskedMap ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"Please select a density map\"; return\n\n fmap = self.MoleculeMap()\n if fmap == None : print 'Choose a molecule'; return\n\n\n imap, f_COM, f_bRad = self.MapMaskedMapIndexes ( dmap, fmap, True )\n\n\n \n\n def MapMaskedMapIndexes ( self, ref_map, mask_map, bAddMap=False ) :\n\n f1 = ref_map.data.ijk_to_xyz_transform\n f2 = xform_matrix( ref_map.openState.xform )\n tf = multiply_matrices( f2, f1 )\n\n n1, n2, n3 = ref_map.data.size[0], ref_map.data.size[1], ref_map.data.size[2]\n m_points = VolumeData.grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices\n transform_vertices( m_points, tf )\n\n a_vals = mask_map.interpolated_values ( m_points, chimera.Xform() )\n mm = a_vals.reshape( (n3,n2,n1) )\n\n thr = mask_map.surface_levels[0]\n mm = numpy.where ( mm > thr, mm, numpy.zeros_like(mm) )\n print \"- in masked map, %d above %.3f\" % ( numpy.shape(mm.nonzero())[1], thr )\n\n if bAddMap :\n mmd = VolumeData.Array_Grid_Data (mm, ref_map.data.origin, ref_map.data.step, ref_map.data.cell_angles, ref_map.data.rotation)\n gv = VolumeViewer.volume_from_grid_data ( mmd )\n gv.name = ref_map.name + \"---\" + mask_map.name\n #gv.openState.xform = dmap.openState.xform\n\n nze = numpy.nonzero ( mm )\n print \" - index values of %d non-zero points\" % len ( nze[0] )\n\n imap = {}\n for ei, i in enumerate ( nze[0] ) :\n j = nze[1][ei]\n k = nze[2][ei]\n\n try : mi = imap[i]\n except : mi = {}; imap[i] = mi\n\n try : mij = mi[j]\n except : mij = {}; mi[j] = mij\n\n mij[k] = 1\n\n\n nzs = numpy.array ( [nze[2], nze[1], nze[0]] )\n points = numpy.cast[numpy.float32] ( numpy.transpose (nzs) )\n transform_vertices( points, f1 )\n\n com = numpy.sum(points, axis=0) / len(points)\n C = chimera.Vector ( com[0], com[1], com[2] )\n comv = numpy.ones_like ( points ) * com\n points = points - comv\n bRad = numpy.sqrt ( numpy.max ( numpy.sum ( numpy.square (points), 1 ) ) )\n\n return imap, C, bRad\n\n\n\n def ShapeMaskedCorr ( self, fmap, dmap, regs ) :\n\n points = numpy.concatenate ( [r.map_points()\n for r in regs], axis=0 )\n\n #tf = xform_matrix ( dmap.openState.xform )\n #transform_vertices(points, tf)\n\n sg = VolumeData.zone_masked_grid_data ( dmap.data, points, 0.5 )\n\n #gv = volume_from_grid_data ( sg, None )\n #gv.name = 'regions_mask'\n #gv.openState.xform = dmap.openState.xform\n\n nz = numpy.shape ( numpy.nonzero ( sg.matrix() ) )[1]\n if len(points) != nz :\n print \"mask failed [%d points, %d masked]\" % ( len(points), nz )\n return 0.0\n\n r_mask_matrix = sg.matrix()\n\n fmapm = fmap.data.matrix()\n f_weights = numpy.ravel(fmapm).astype(numpy.single)\n\n size = list(fmapm.shape)\n size.reverse()\n f_points = VolumeData.grid_indices(size, numpy.single) # i,j,k indices\n\n thr = fmap.surface_levels[0]\n ge = numpy.greater_equal(f_weights, thr)\n f_points = numpy.compress(ge, f_points, 0)\n f_weights = numpy.compress(ge, f_weights)\n\n from Matrix import multiply_matrices as MM\n from Matrix import xform_matrix as XFM\n\n p2m_transform = MM ( dmap.data.xyz_to_ijk_transform,\n MM ( XFM ( dmap.openState.xform.inverse() ),\n MM ( XFM ( fmap.openState.xform ), fmap.data.ijk_to_xyz_transform ) ) )\n\n #transform_vertices(f_points, tf)\n\n m_weights, outside = VolumeData.interpolate_volume_data (\n f_points, p2m_transform, r_mask_matrix, 'linear' )\n\n o, shape_cor = overlap_and_correlation ( f_weights, m_weights )\n\n print \" * shape map-masked correlation %.3f\" % shape_cor\n\n return shape_cor\n\n\n\n def OptShapeScore ( self, fmap, dmap, regs ) :\n\n thr_at = fmap.surface_levels[0]\n sms = self.ShapeMatchScore ( fmap, dmap, regs )\n\n umsg ( \"Optimizing threshold for %s - %.2f/%.4f\" % (fmap.name, thr_at, sms) )\n\n while 1 :\n fmap.surface_levels[0] = thr_at + .01\n new_sms = self.ShapeMatchScore ( fmap, dmap, regs )\n\n if new_sms > sms :\n thr_at = fmap.surface_levels[0]\n sms = new_sms\n #print \"- %.2f/%.4f\" % (thr_at, sms),\n \n ro = VolumeViewer.volume.Rendering_Options()\n fmap.update_surface ( False, ro )\n for sp in fmap.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n\n umsg ( \"Optimizing threshold for %s - %.2f/%.4f\" % (fmap.name, thr_at, sms) )\n\n else :\n fmap.surface_levels[0] = thr_at\n break\n\n while 1 :\n fmap.surface_levels[0] = thr_at - .01\n new_sms = self.ShapeMatchScore ( fmap, dmap, regs )\n if new_sms > sms :\n thr_at = fmap.surface_levels[0]\n sms = new_sms\n print \"- %.2f/%.4f\" % (thr_at, sms),\n\n ro = VolumeViewer.volume.Rendering_Options()\n fmap.update_surface ( False, ro )\n for sp in fmap.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n\n umsg ( \"Optimizing threshold for %s - %.2f/%.4f\" % (fmap.name, thr_at, sms) )\n\n else :\n fmap.surface_levels[0] = thr_at\n break\n\n #print \"| %.2f/%.4f\" % (thr_at, sms)\n return sms\n\n\n\n def OptShapeScoreRegs ( self, fmap, dmap, regs ) :\n\n thr_at = dmap.surface_levels[0]\n sms = self.ShapeMatchScore ( fmap, dmap, regs )\n\n print \", optz regs - %.2f/%.4f\" % (thr_at, sms),\n\n while 1 :\n dmap.surface_levels[0] = thr_at + .01\n new_sms = self.ShapeMatchScore ( fmap, dmap, regs )\n if new_sms > sms :\n thr_at = dmap.surface_levels[0]\n sms = new_sms\n print \"- %.2f/%.4f\" % (thr_at, sms),\n else :\n break\n\n ro = VolumeViewer.volume.Rendering_Options()\n dmap.update_surface ( False, ro )\n for sp in dmap.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n\n while 1 :\n dmap.surface_levels[0] = thr_at - .01\n new_sms = self.ShapeMatchScore ( fmap, dmap, regs )\n if new_sms > sms :\n thr_at = dmap.surface_levels[0]\n sms = new_sms\n print \"- %.2f/%.4f\" % (thr_at, sms),\n else :\n break\n\n ro = VolumeViewer.volume.Rendering_Options()\n dmap.update_surface ( False, ro )\n for sp in dmap.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n\n dmap.surface_levels[0] = thr_at\n print \"| %.2f/%.4f\" % (thr_at, sms)\n return sms\n\n\n def StrucBestShapeScore ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n # first find out which region overlaps the most\n \n smod = self.CurrentSegmentation()\n if smod == None : return\n if len(smod.regions) == 0 : print \" - no regions!\"; return\n\n max_overlap_num = 0\n max_overlap_reg = None\n\n print \"Region with highest overlap: \",\n\n for region in smod.regions :\n\n noverlap = 0\n for i,j,k in region.points() :\n try : noverlap += imap[k][j][i]\n except : continue\n\n if noverlap > max_overlap_num :\n max_overlap_num = noverlap\n max_overlap_reg = region\n\n if max_overlap_num == 0 :\n print \"no region overlaps\"\n return None\n\n else :\n print \"%d (%d)\" % (max_overlap_reg.rid, max_overlap_num) \n sms = self.ShapeMatchScore ( fmap.fmol.atoms, dmap, [max_overlap_reg] )\n print \" - shape match : %f\" % sms\n\n return sms\n\n\n\n\n def SelRegsShapeScore ( self ) :\n\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n fmap = self.MoleculeMap()\n if fmap == None:\n return\n\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select a region\" );\n return\n\n print \"Shape score of %s to %d regions:\" % (fmap.name, len(regs)),\n for r in regs :\n print r.rid,\n print \"\"\n\n fmap.sms = self.ShapeMatchScore ( fmap.fmol.atoms, dmap, regs )\n ov, fmap.fit_score = map_overlap_and_correlation ( fmap, dmap, True )\n\n #self.ShapeMaskedCorr ( fmap, dmap, regs )\n #self.OptShapeScore ( fmap, dmap, regs )\n\n umsg ( \"Shape match score: %.3f, cross-correlation: %.3f\" % (fmap.sms, fmap.fit_score) )\n\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n log_file = path + smod.name + \"_fits_sms.txt\"\n print \"SMS to\", log_file\n\n fpl = open ( log_file, 'a' )\n fpl.write ( \"%s %f %f\\n\" % (fmap.name, fmap.sms, fmap.fit_score) )\n fpl.close ()\n\n\n\n\n def SelRegsOptimizeShapeScore ( self ) :\n\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n fmap = self.MoleculeMap()\n if fmap == None:\n return\n\n\n regs = smod.selected_regions()\n if len(regs)==0 :\n umsg ( \"Please select a region\" );\n return\n\n print \"Shape score of %s to %d regions:\" % (fmap.name, len(regs)),\n for r in regs :\n print r.rid,\n print \"\"\n\n # fmap.sms = self.ShapeMatchScore ( fmap, dmap, regs )\n # self.ShapeMaskedCorr ( fmap, dmap, regs )\n # fmap.sms = self.OptShapeScore ( fmap, dmap, regs )\n\n umsg ( \"Optimized shape match score: %.3f\" % fmap.sms )\n\n\n\n def SegAccuracy ( self, tag = \"_acc\", joinRegs=True ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n map_name = os.path.splitext ( dmap.name )[0]\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n cmaps = dmap.chain_maps\n print \"\\nMapping %d regions -> %d chain maps...\" % (\n len(smod.regions), len(cmaps) )\n\n if len(smod.regions) == 0 : print \" - no regions!\"; return\n if len(cmaps) == 0 : print \" - no chain maps!\"; return\n\n ch_regs = {}\n\n for reg_i, region in enumerate ( smod.regions ) :\n\n max_overlap_num = 0\n max_overlap_cmap = None\n\n ipoints = region.points()\n\n for cm in cmaps :\n\n noverlap = 0\n for i,j,k in ipoints :\n try : noverlap += cm.imap[k][j][i]\n except : continue\n\n if noverlap > max_overlap_num :\n max_overlap_num = noverlap\n max_overlap_cmap = cm\n\n if max_overlap_num == 0 :\n print \"%d of %d - %d %d points - \" % (reg_i+1, len(smod.regions), region.rid, len(ipoints) )\n\n else :\n # print \"%d/%d - %d %d points - max overlap %d with %s\" % (reg_i+1, len(smod.regions), region.rid, len(ipoints), max_overlap_num, max_overlap_cmap.name )\n c = max_overlap_cmap.surf_color\n region.color = c\n\n try : ch_regs[max_overlap_cmap].append ( region )\n except : ch_regs[max_overlap_cmap] = [ region ]\n\n thr = dmap.surface_levels[0]\n scores = []\n\n for cm in cmaps :\n print cm.name,\n if ch_regs.has_key ( cm ) == False :\n print \" - no regions!\"\n continue\n\n regs = ch_regs[cm]\n print \" - %d regions\" % len(regs),\n\n sms = self.ShapeMatchScore ( cm.atoms, dmap, regs )\n print \" - sms %.4f\" % (sms)\n\n if sms > 0.3 and len(regs) < 10 :\n scores.append ( sms )\n\n else :\n print \"sms low: %.3f, nregs: %d\" % (sms, len(regs))\n\n if joinRegs : smod.join_regions ( regs )\n\n\n map_name = os.path.splitext ( dmap.name ) [0]\n path = os.path.dirname ( dmap.data.path ) + os.path.sep\n\n log_f = path + map_name + tag + \".txt\"\n print \"\\nAccuracies log file:\", log_f\n\n res = NumberFromName ( dmap.name, 'r' );\n if res == None : res = 0.0\n print \" - resolution: %.0f, %d scores\" % ( res, len(scores) )\n\n try : fp = open ( log_f, \"a\" )\n except : print \" - could not open log file\"; return\n fp.write ( \"%f %f %f %f \" % ( res, min(scores), max(scores), sum(scores)/float(len(scores))) )\n for sm_score in scores : fp.write ( \"%f \" % sm_score )\n fp.write ( \"\\n\" )\n fp.close ()\n\n\n\n def ShapeMatchScore ( self, atoms, dmap, regs, bPrint=False ) :\n\n #fmol = fmap.mol\n #print \"atoms from\", fmol.name\n #points = get_atom_coordinates ( fmol.atoms, transformed = True )\n\n print \"shape match of %d atoms\" % len(atoms)\n points = get_atom_coordinates ( atoms, transformed = True )\n\n tfd = xform_matrix( dmap.openState.xform.inverse() )\n #tff = xform_matrix( fmap.openState.xform.inverse() )\n #tf = multiply_matrices( tff, tfd )\n transform_vertices( points, tfd )\n\n sg_fmap = VolumeData.zone_masked_grid_data ( dmap.data, points, max(3.0,dmap.data.step[0]) )\n\n #gv = VolumeViewer.volume_from_grid_data ( sg_fmap )\n #gv.name = 'registered_structure_region'\n #gv.openState.xform = dmap.openState.xform\n\n\n points = numpy.concatenate ( [r.map_points() for r in regs], axis=0 )\n\n sg_dmap = VolumeData.zone_masked_grid_data ( dmap.data, points, dmap.data.step[0] / 2.0 )\n #gv = VolumeViewer.volume.volume_from_grid_data ( sg_dmap )\n #gv.name = 'regions_mask'\n\n regs_m = sg_dmap.matrix()\n regs_f = sg_fmap.matrix()\n regs_mz = numpy.where ( regs_m > dmap.surface_levels[0], numpy.ones_like(regs_m), numpy.zeros_like(regs_m) )\n regs_fz = numpy.where ( regs_f > dmap.surface_levels[0], numpy.ones_like(regs_f), numpy.zeros_like(regs_f) )\n \n nz = numpy.shape ( numpy.nonzero ( regs_mz ) )[1]\n print \"regions %d nonzero, %d points\" % (nz, len(points))\n\n if nz != len(points) :\n print \"mask failed - %d of %d pts nonzero\" % (nz, len(points))\n\n nz = numpy.shape ( numpy.nonzero ( regs_fz ) )[1]\n print \"struct. %d nonzero, %d atoms\" % (nz, len(atoms))\n\n nz_int = numpy.shape ( (regs_mz * regs_fz).nonzero () )[1]\n nz_uni = numpy.shape ( (regs_mz + regs_fz).nonzero () )[1]\n\n sm_score = float(nz_int) / float (nz_uni)\n\n if 1 or bPrint : print \" - intersection %d, union %d\" % (nz_int, nz_uni)\n\n print \" * shape match score %.3f\" % sm_score\n\n return sm_score\n\n\n\n\n def MakeChainMaps ( self, mols, dmap ) :\n\n try : print len(dmap.chain_maps), \"chain maps so far\"\n except : dmap.chain_maps = []\n\n\n if len(mols) == 1 :\n\n mol = mols[0]\n print \"Making chain maps for %s in %s:\" % (mol.name, dmap.name)\n\n for cid, clr in mol.chain_colors.iteritems() :\n\n map_name = os.path.splitext ( dmap.name )[0]\n cname = map_name + \"_\" + cid\n sel_str = \"#%d:.%s\" % (mol.id, cid)\n print \"%s [%s]\" % (cname, sel_str),\n\n gv = self.SelStrucMap ( sel_str, mol, dmap, clr.rgba() )\n if gv :\n gv.name = cname\n gv.chain_id = cid\n dmap.chain_maps.append ( gv )\n gv.mol = mol\n\n #return\n\n\n else :\n\n print \"Making struc maps for %d strucs\" % len(mols)\n\n for mol in mols :\n\n map_name = os.path.splitext ( dmap.name )[0]\n mol_name = os.path.splitext ( mol.name )[0]\n cname = map_name + \"_\" + mol_name\n sel_str = \"#%d\" % (mol.id)\n print \"%s [%s]\" % (cname, sel_str),\n \n clr = (rand(), rand(), rand(), 1.0)\n gv = self.SelStrucMap ( sel_str, mol, dmap, clr )\n gv.name = cname\n gv.chain_id = mol_name\n dmap.chain_maps.append ( gv )\n\n\n def SelStrucMap ( self, sel_str, mol, dmap, color ) :\n\n atoms = chimera.selection.OSLSelection( sel_str ).atoms()\n\n if len(atoms) == 0 :\n print \"- empty\"\n return None\n \n points = get_atom_coordinates ( atoms, transformed = True )\n transform_vertices ( points, xform_matrix(dmap.openState.xform.inverse()) )\n\n step = chimera.Vector ( dmap.data.step[0], dmap.data.step[1], dmap.data.step[2] )\n sg = zone_masked_grid_data ( dmap.data, points, 2.0 )\n\n sgm = sg.matrix()\n #sgmt = numpy.where ( sgm > 0, sgm, numpy.ones_like(sgm)*1e99 )\n #min_d = numpy.min ( sgmt )\n\n #sgmt = numpy.where ( sgm > 0, numpy.ones_like(sgm), numpy.zeros_like(sgm) )\n #sgd = VolumeData.Array_Grid_Data (sgmt, dmap.data.origin, dmap.data.step, dmap.data.cell_angles, dmap.data.rotation)\n\n gv = volume_from_grid_data ( sg )\n gv.openState.xform = dmap.openState.xform\n\n dvals = dmap.interpolated_values ( points, mol.openState.xform )\n min_d = numpy.min ( dvals ) / 2.0\n min_d = dmap.surface_levels[0]\n\n print \" - min d: %.4f,\" % min_d,\n\n gv.region = ( gv.region[0], gv.region[1], [1,1,1] )\n\n gv.display = True\n gv.surface_levels = [ min_d ]\n #gv.surface_levels = [20.0]\n gv.surface_colors = [ color ]\n gv.surf_color = color\n ro = Rendering_Options()\n ro.surface_smoothing = True\n #ro.smoothing_factor = .25\n #ro.smoothing_iterations = 2\n gv.update_surface ( False, ro )\n gv.atoms = atoms\n\n for sp in gv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n\n cm = gv.data.matrix()\n\n nze = numpy.nonzero ( cm )\n print \"%d NZ \\\\\" % len ( nze[0] ),\n\n cmt = numpy.where ( cm > min_d, cm, numpy.zeros_like(cm) )\n nze = numpy.nonzero ( cmt )\n print \" %d NZ\" % len ( nze[0] )\n gv.d_thr = min_d\n\n gv.imap = {}\n for ei, i in enumerate ( nze[0] ) :\n j = nze[1][ei]\n k = nze[2][ei]\n\n try : mi = gv.imap[i]\n except : mi = {}; gv.imap[i] = mi\n\n try : mij = mi[j]\n except : mij = {}; mi[j] = mij\n\n mij[k] = 1\n\n\n com = numpy.sum(points, axis=0) / len(points)\n gv.COM = chimera.Vector ( com[0], com[1], com[2] )\n comv = numpy.ones_like ( points ) * com\n points_v = points - comv\n gv.bRad = numpy.sqrt ( numpy.max ( numpy.sum ( numpy.square (points_v), 1 ) ) )\n\n return gv\n\n\n def FitSMapToDMap ( self ) :\n\n fmap = self.MoleculeMap()\n if fmap == None : return\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n print \"Fitting %s to %s\" % ( fmap.name, dmap.name )\n\n ov, corr = self.FitMapLocal ( fmap, dmap )\n\n tXO, tXR = xf_2_M ( dmap.openState.xform )\n T = tXO * tXR * fmap.M\n xfA = chimera.Xform.xform ( T[0,0], T[0,1], T[0,2], T[0,3], T[1,0], T[1,1], T[1,2], T[1,3], T[2,0], T[2,1], T[2,2], T[2,3] )\n fmap.openState.xform = xfA\n for mol in fmap.mols : mol.openState.xform = xfA\n\n try : fmap.mols.chain_maps\n except : fmap.mol.chain_maps = []\n for chm in fmap.mol.chain_maps : chm.openState.xform = xfA\n\n\n def FitMapLocal ( self, fmap, dmap ) :\n\n f_m = fmap.data.full_matrix(); size = list(f_m.shape); size.reverse()\n fmap.fpoints = grid_indices(size, numpy.single) # i,j,k indices\n transform_vertices( fmap.fpoints, fmap.data.ijk_to_xyz_transform )\n fmap.fpoint_weights = numpy.ravel(f_m).astype(numpy.single)\n\n threshold = fmap.surface_levels[0]\n #threshold = .3 * max ( numpy.ravel(f_m).astype(numpy.single) )\n\n ge = numpy.greater_equal(fmap.fpoint_weights, threshold)\n fmap.fpoints = numpy.compress(ge, fmap.fpoints, 0)\n fmap.fpoint_weights = numpy.compress(ge, fmap.fpoint_weights)\n nz = numpy.nonzero( fmap.fpoint_weights )[0]\n\n print \" - threshold %.4f, %d nonzero\" % ( threshold, len(nz) )\n\n if len(nz) < len (fmap.fpoint_weights) :\n fmap.fpoints = numpy.take( fmap.fpoints, nz, axis=0 )\n fmap.fpoint_weights = numpy.take(fmap.fpoint_weights, nz, axis=0)\n\n mass = numpy.sum(fmap.fpoint_weights, dtype=numpy.single)\n fmap.rotation_center = numpy.dot(fmap.fpoint_weights,fmap.fpoints) / mass\n\n olap, cor = self.FitMap_T ( fmap, dmap, num_tries=1 )\n\n fmap.fpoints = None\n fmap.fpoint_weights = None\n fmap.rotation_center = None\n\n return olap, cor\n\n def StrucGroupRegions ( self ) :\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n fmap = self.MoleculeMap()\n if fmap == None : print 'Choose a molecule'; return\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n\n regs = smod.selected_regions()\n\n if len(regs)==0 :\n print \"\\nGrouping all regions\"\n tvol = self.MapVolume ( fmap )\n smod.rgroups = self.GroupAllRegions ( smod, tvol )\n\n elif len(regs) == 1 :\n tvol = self.MapVolume ( fmap )\n bRad = -1.0 # self.MapBoundingRad ( fmap )\n\n print \"\\nMaking groups around region %d - target vol %.3f, b-Rad %.3f\" % (regs[0].rid, tvol, bRad)\n smod.rgroups, maxDepthReached = self.GroupAroundReg ( smod, regs[0], tvol, bRad )\n print \" - depth reached: %d\" % maxDepthReached\n\n\n else :\n print \"Please select no regions (for global groups) or one region (for local groups)\"\n return\n\n if len ( smod.rgroups ) == 0 :\n print \"No groups result!\"\n return\n\n smod.rgroups.sort()\n smod.rgroup_at = 0\n dv, regs = smod.rgroups[smod.rgroup_at]\n print \"Group %d of %d - dv %f, regions:\" % (smod.rgroup_at+1, len(smod.rgroups), dv),\n for r in regs : print r.rid,\n print \"\"\n\n for sp in smod.surfacePieces :\n if regs.count ( sp.region ) > 0 : sp.display = True\n else : sp.display = False\n\n\n\n def NextRGroup ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n smod.rgroup_at = smod.rgroup_at + 1\n\n if smod.rgroup_at >= len(smod.rgroups) : smod.rgroup_at = 0\n\n dv, regs = smod.rgroups[smod.rgroup_at]\n\n print \"Group %d/%d - dv %f, regions:\" % (smod.rgroup_at+1, len(smod.rgroups), dv),\n for r in regs : print r.rid,\n print \"\"\n\n for sp in smod.surfacePieces :\n if regs.count ( sp.region ) > 0 : sp.display = True\n else : sp.display = False\n\n\n\n def FindGroupFromSelRegs ( self ) :\n\n smod = self.CurrentSegmentation()\n if smod == None : return\n\n print \"\\nStructure:\",\n fmap = self.MoleculeMap()\n if fmap == None : print 'Choose a molecule'; return\n tvol = self.MapVolume ( fmap )\n\n\n regs = smod.selected_regions() \n if len(regs)==0 : print \"no selected regions found\"; return\n\n print \"Finding group for %d selected regions:\" % ( len(regs) ),\n regs_vol = 0.0\n for r in regs :\n print r.rid,\n regs_vol = regs_vol + r.enclosed_volume()\n print \"\"\n\n dv = abs ( tvol - regs_vol ) / tvol;\n print \" - total regions volume: %.3f - DV %.5f\" % (regs_vol, dv)\n\n points = numpy.concatenate ( [r.map_points()\n for r in regs], axis=0 )\n\n com = numpy.sum(points, axis=0) / len(points)\n C = chimera.Vector ( com[0], com[1], com[2] )\n comv = numpy.ones_like ( points ) * com\n points = points - comv\n regs_bRad = numpy.sqrt ( numpy.max ( numpy.sum ( numpy.square (points), 1 ) ) )\n\n print \" - COM: %.3f %.3f %.3f, bounding rad %.3f\" % ( C[0], C[1], C[2], regs_bRad )\n\n\n print \"Searching %d groups\" % len(smod.rgroups)\n gi = 0\n for dv, gregs in smod.rgroups :\n gi = gi + 1\n if len(regs) != len (gregs) : continue\n allIn = True\n for r in regs :\n if gregs.count ( r ) == 0 : allIn = False; break\n if allIn :\n print \" - FOUND! %d - dv %f - regs\" % (gi, dv),\n for r in gregs : print r.rid,\n print \"\"\n\n print \" - done searching\"\n\n\n\n def FitMapsToRegionsAroundSel ( self ) :\n\n print \"_______________________________________________________________\"\n\n dmap = segmentation_map()\n if dmap == None : print \"No segmentation map\"; return\n\n smod = self.CurrentSegmentation()\n if smod is None : return\n\n\n sregs = smod.selected_regions() \n if len(sregs) != 1 : print \"please selected 1 region\"; return\n\n if timing: t0 = clock()\n\n for sp in smod.surfacePieces :\n if sp.region == sregs[0].region :\n sp.display = True\n clr = sp.region.color\n sp.color = ( clr[0], clr[1], clr[2], REG_OPACITY )\n else :\n sp.display = False\n\n\n self.FitOpenMapsToGroupsAround ( smod, sregs[0].region, dmap )\n\n if timing:\n t1 = clock()\n print \"Time: %.1f sec\" % (t1 - t0)\n\n\n def MapBoundingRad ( self, fmap ) :\n \n fmol = fmap.mol\n points = get_atom_coordinates ( fmol.atoms, transformed = False )\n\n print \"%s (%s)\\n - COM: %.3f %.3f %.3f, bounding rad %.3f\" % ( fmap.name, fmol.name, C[0], C[1], C[2], bRad )\n\n return bRad\n\n\n\ndef fitMap(name):\n # GP - I'm not sure what this function is for - the centerMol function has changed\n # and the code below won't work\n for m in chimera.openModels.list() :\n if hasattr(m, 'mol') and m.mols[0].name == name :\n if not hasattr(m.mols[0], 'centered'):\n centerMol ( m.mols[0] )\n return m\n return None\n\n\n\ndef NumberFromName ( name, tag ) :\n\n #name = os.path.splitext ( name ) [0]\n if name.rfind('.mrc') >= 0 : name = name [ : name.rfind('.mrc') ]\n ts = name.split (\"_\")\n num = None\n\n for t in ts :\n if t[0:len(tag)] == tag :\n try :\n num = float ( t[len(tag):] )\n break\n except :\n continue\n\n return num\n\n\n\ndef BioMatrices ( m ) :\n\n matrices = {}\n\n for rm in m.pdbHeaders['REMARK'] :\n s = rm.split()\n if len(s) == 8 and s[2].find('BIOMT') == 0 :\n # print s\n mi = int ( s[3] )\n ri = int ( s[2][5] ) - 1\n try : matrices[mi]\n except : matrices[mi] = numpy.zeros ( (3,4), numpy.float32 )\n\n matrices[mi][ri][0] = float ( s[4] )\n matrices[mi][ri][1] = float ( s[5] )\n matrices[mi][ri][2] = float ( s[6] )\n matrices[mi][ri][3] = float ( s[7] )\n\n return matrices \n\n\n\ndef RMSD ( self, ress, fmol ) :\n\n N, D, posr = 0.0, 0.0, {}\n for r in ress : posr[r.id.position] = r\n for r in fmol.residues :\n v = r.atomsMap[\"CA\"][0].xformCoord() - posr[r.id.position].atomsMap[\"CA\"][0].xformCoord()\n N, D = N + 1, D + (v.length * v.length)\n\n return numpy.sqrt ( D / N )\n\n\ndef AlignChains ( ref, match ) :\n\n import MatchMaker\n import MatchMaker.prefs\n import chimera.misc\n\n gapOpen = MatchMaker.prefs.defaults[MatchMaker.prefs.GAP_OPEN]\n gapExtend = MatchMaker.prefs.defaults[MatchMaker.prefs.GAP_EXTEND]\n ksdsspCache = set()\n ssFraction = MatchMaker.prefs.defaults[MatchMaker.prefs.SS_MIXTURE]\n ssMatrix = MatchMaker.prefs.defaults[MatchMaker.prefs.SS_SCORES]\n alignKw = { 'ssFraction': ssFraction, 'ssMatrix': ssMatrix, 'computeSS': False }\n\n # print \"Aligning %d res to %d res\" % ( len(ref), len(match) )\n\n score, s1, s2 = MatchMaker.align( ref, match, 'BLOSUM-62', \"nw\", gapOpen, gapExtend, ksdsspCache, **alignKw)\n\n refAtoms, matchAtoms = [], []\n\n for i in range( len(s1) ) :\n if s1[i] == \".\" or s2[i] == \".\": continue\n refRes = s1.residues[s1.gapped2ungapped(i)]\n matchRes = s2.residues[s2.gapped2ungapped(i)]\n refAtom = chimera.misc.principalAtom(refRes)\n if not refAtom: continue\n matchAtom = chimera.misc.principalAtom(matchRes)\n if not matchAtom: continue\n if refAtom.name != matchAtom.name:\n # nucleic P-only trace vs. full nucleic\n if refAtom.name != \"P\":\n try : refAtom = refAtom.residue.atomsMap[\"P\"][0]\n except KeyError: continue\n else:\n try : matchAtom = matchAtom.residue.atomsMap[\"P\"][0]\n except KeyError: continue\n refAtoms.append(refAtom)\n matchAtoms.append(matchAtom)\n\n # print \" - aligned %d atoms\" % len (refAtoms)\n\n if len ( refAtoms ) < 3 :\n print \"too few atoms to perform 3D alignment\"\n return None\n\n return refAtoms, matchAtoms\n\n\n\ndef copyMolChain ( nmol, mol, cid, new_cid, xf, clr ) :\n\n if nmol == None :\n nmol = chimera.Molecule()\n nmol.name = \"complex\"\n\n print \"Copying chain %s (%s) to %s\" % (cid, mol.name, new_cid)\n aMap = dict()\n for res in mol.residues :\n if res.id.chainId == cid :\n nres = nmol.newResidue(res.type,\n chimera.MolResId(new_cid, res.id.position))\n # print \"New res: %s %d\" % (nres.id.chainId, nres.id.position)\n for at in res.atoms :\n nat = nmol.newAtom (at.name, chimera.Element(at.element.number))\n aMap[at] = nat\n nres.addAtom( nat )\n if xf : nat.setCoord ( xf.apply( at.coord() ) )\n else : nat.setCoord ( at.coord() )\n nat.drawMode = nat.Sphere\n nat.color = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 )\n nat.display = True\n\n nres.isHelix = res.isHelix\n nres.isHet = res.isHet\n nres.isSheet = res.isSheet\n nres.isStrand = res.isStrand\n nres.ribbonDisplay = True\n nres.ribbonDrawMode = 2\n nres.ribbonColor = chimera.MaterialColor( clr[0], clr[1], clr[2], 1.0 );\n\n for bond in mol.bonds :\n if (bond.atoms[0].residue.id.chainId == cid and\n bond.atoms[1].residue.id.chainId == cid) :\n nb = nmol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = True\n\n return nmol\n\n\n\ndef TransferChainMap ( a, m ) :\n\n print \" - tr %s\" % ( a.name ),\n\n new_v = m.writable_copy()\n new_v.name = a.name + \"_tr\"\n mm = new_v.data.full_matrix()\n\n n1, n2, n3 = m.data.size[0], m.data.size[1], m.data.size[2]\n an1, an2, an3 = a.data.size[0], a.data.size[1], a.data.size[2]\n\n f_i2s = m.data.ijk_to_xyz_transform\n f_t = xform_matrix( m.openState.xform )\n tf = multiply_matrices( f_t, f_i2s )\n\n m_points = grid_indices( (n1,n2,n3), numpy.single ) # i,j,k indices\n transform_vertices( m_points, tf )\n\n a_vals = a.interpolated_values ( m_points, chimera.Xform() )\n mm[:,:,:] = a_vals.reshape( mm.shape )\n\n gv = new_v\n gv.region = ( gv.region[0], gv.region[1], [1,1,1] )\n\n gv.display = True\n #gv.surface_levels = [ dmap.surface_levels[0] ]\n gv.surface_levels = [ a.d_thr ]\n gv.surf_color = ( rand(), rand(), rand(), 1.0 )\n gv.surface_colors = [ gv.surf_color ]\n ro = Rendering_Options()\n #ro.surface_smoothing = True\n #ro.smoothing_factor = .25\n #ro.smoothing_iterations = 10\n gv.update_surface ( False, ro )\n\n for sp in gv.surfacePieces :\n v, t = sp.geometry\n if len(v) == 8 and len(t) == 12 : sp.display = False\n\n\n mmN = numpy.where ( mm > a.d_thr, mm, numpy.zeros_like(mm) )\n nze = numpy.nonzero ( mmN )\n print \"%d NZ\" % len ( nze[0] )\n\n gv.imap = {}\n for ei, i in enumerate ( nze[0] ) :\n j = nze[1][ei]\n k = nze[2][ei]\n\n try : mi = gv.imap[i]\n except : mi = {}; gv.imap[i] = mi\n\n try : mij = mi[j]\n except : mij = {}; mi[j] = mij\n\n mij[k] = 1\n\n nzs = numpy.array ( [nze[2], nze[1], nze[0]] )\n points = numpy.transpose ( nzs ).astype(numpy.float32)\n transform_vertices ( points, m.data.ijk_to_xyz_transform )\n\n com = numpy.sum(points, axis=0) / len(points)\n gv.COM = chimera.Vector ( com[0], com[1], com[2] )\n comv = numpy.ones_like ( points ) * com\n points_v = points - comv\n gv.bRad = numpy.sqrt ( numpy.max ( numpy.sum ( numpy.square (points_v), 1 ) ) )\n\n return gv\n\n\ndef am_2_M ( X ) :\n\n M = numpy.matrix ( [\n [ X[0,0], X[0,1], X[0,2], X[0,3] ],\n [ X[1,0], X[1,1], X[1,2], X[1,3] ],\n [ X[2,0], X[2,1], X[2,2], X[2,3] ],\n [ 0, 0, 0, 1 ] ] )\n\n return M\n\n\ndef RandColorChains ( m ) :\n\n ct = {}\n for r in m.residues: ct[r.id.chainId] = 1\n clist = ct.keys()\n clist.sort()\n chains_clrs = {}\n cnames = \"\"\n\n for ci, cid in enumerate ( clist ) :\n clr = ( rand()*.7, rand()*.7, rand()*.7 )\n #print \"- %s: clr(%.2f, %.2f, %.2f)\" % (cid, clr[0], clr[1], clr[2])\n chains_clrs[cid] = chimera.MaterialColor ( clr[0], clr[1], clr[2], 1.0 )\n cnames = cnames + cid\n\n print \"%s - color ribbon for %d chains -\" % ( m.name, len(cnames) ), cnames\n\n # color atoms\n for r in m.residues :\n clr = chains_clrs[r.id.chainId]\n r.ribbonDrawMode = 2\n r.ribbonColor = clr\n r.ribbonDisplay = True\n for at in r.atoms :\n at.display = False\n at.color = clr\n\n return chains_clrs\n\n\n\n\n \n", "id": "3679471", "language": "Python", "matching_score": 6.177891254425049, "max_stars_count": 6, "path": "Segger/fit_devel.py" }, { "content": "\n\nimport chimera\nimport random\nimport numpy\nimport _multiscale\nimport VolumeData\nimport FitMap\nimport _contour\nimport Matrix\nimport _distances\n\nfrom chimera.resCode import nucleic3to1\nfrom chimera.resCode import protein3to1, protein1to3\nprotein3to1['HSD'] = protein3to1['HIS']\nprotein3to1['HSE'] = protein3to1['HIS']\n\nnucleic1to3 = { 'G':'GUA', 'A':'ADE', 'U':'URA', 'C':'CYT' }\n\n\natomColors = {'C' : chimera.MaterialColor (0.565,0.565,0.565),\n 'Cbb' : chimera.MaterialColor (0.2,0.6,0.2),\n 'S' : chimera.MaterialColor (1.000,1.000,0.188),\n 'O' : chimera.MaterialColor (1.000,0.051,0.051),\n 'N' : chimera.MaterialColor (0.188,0.314,0.973),\n 'P' : chimera.MaterialColor (1.0, 0.502, 0.0),\n 'H' : chimera.MaterialColor (0.9,.9,.9),\n ' ' : chimera.MaterialColor (0.2,1,.2),\n \"MG\" : chimera.MaterialColor (0,1,0),\n \"NA\" : chimera.MaterialColor (.6,.3,.6),\n \"CL\" : chimera.MaterialColor (.2,.6,.2),\n \"CA\" : chimera.MaterialColor (.4,.4,.6),\n \"ZN\" : chimera.MaterialColor (.2,.8,.2),\n \"MN\" : chimera.MaterialColor (.4,.4,.6),\n \"FE\" : chimera.MaterialColor (.4,.4,.6),\n \"CO\" : chimera.MaterialColor (.4,.4,.6),\n \"NI\" : chimera.MaterialColor (.4,.4,.6)\n}\n\n\n\nM_PI = numpy.pi\nTWOPI = 2.0 * numpy.pi\n\nclass Params :\n\n def __init__ ( self ) :\n\n self.atoms = {}\n self.bonds = {}\n self.linkBonds = {}\n self.angles = {}\n self.linkAngles = {}\n self.torsions = {}\n self.planes = {}\n\n print \"init params\"\n\n\nclass RefineParams :\n\n def __init__ ( self ) :\n\n self.bonds = []\n self.angles = []\n self.torsions = []\n self.planes = []\n\n\nmrPar = None\n\n\ndef GetProps ( l, label, props, toMap ) :\n global mrPar\n\n ts = l.split()\n\n if 1 or len (ts) == len(props) :\n #rtype, a1, a2, a3, angle, esd, exception, descr = ts\n\n par = {}\n for i in range ( len(props) ) :\n\n prop = props[i]\n if i >= len(ts) :\n print \" -x- %s : length mismatch\" % label\n print l,\n print props\n break\n\n val = \"\"\n if ts[i][0] == '\"' :\n while i < len(ts) :\n if len(val) == 0 :\n val = ts[i]\n else :\n val += \" \" + ts[i]\n if val[-1] == '\"' :\n break\n i += 1\n val = val.replace ( '\"', '' )\n #print l,\n #print \" - val:\", val\n else :\n val = ts[i]\n\n par[prop] = val\n\n try :\n numI = int ( val )\n par[prop] = numI\n except :\n pass\n\n try :\n numF = float ( val )\n par[prop] = numF\n except :\n pass\n\n if \"comp_id\" in par :\n rtype = par[\"comp_id\"]\n if not rtype in toMap :\n toMap[rtype] = []\n toMap[rtype].append ( par )\n elif \"link_id\" in par :\n #rtype = par[\"link_id\"]\n linkI = \"%d\" % len(toMap)\n toMap[linkI] = par\n else :\n print \" -x- %s : no comp_id\" % label\n print l,\n print props\n\n else :\n print \" -x- %s : length mismatch\" % label\n print l,\n print props\n\n\ndef GetBond ( l ) :\n global mrPar\n ts = l.split()\n if len (ts) == 11 :\n rtype, a1, a2, order, aromatic, stereo, ordinal, dist, esd, exception, descr = ts\n dist = float ( dist )\n esd = float ( esd )\n\n #if rtype == \"ALA\" :\n # print rtype, a1, a2, dist, esd\n\n descr = descr.replace ( '\"', \"\" )\n if not rtype in mrPar.bonds :\n mrPar.bonds[rtype] = []\n mrPar.bonds[rtype].append ( [a1, a2, dist, esd, descr] )\n\n\ndef GetLinkBond ( l ) :\n global mrPar\n ts = l.split()\n\n #print l,\n\n if len (ts) > 4 :\n a1, a2, dist, esd = ts[0], ts[1], ts[2], ts[3]\n dist = float ( dist )\n esd = float ( esd )\n\n descr = \"\"\n for i in range ( 4, len(ts) ) :\n descr = descr + ts[i] + \" \"\n descr = descr[0:-1].replace( '\"', '' )\n\n #print descr\n\n mrPar.linkBonds[descr] = [a1, a2, dist, esd]\n\n else :\n #print \"?\"\n pass\n\n\n\ndef GetAngle ( l ) :\n global mrPar\n ts = l.split()\n\n rtype, a1, a2, a3, angle, esd, exception, descr = [None] * 8\n\n if len (ts) == 6 :\n rtype, a1, a2, a3, angle, esd = ts\n elif len (ts) == 8 :\n rtype, a1, a2, a3, angle, esd, exception, descr = ts\n descr = descr.replace ( '\"', \"\" )\n else :\n print \" -x- angle:\", l,\n return\n\n angle = float ( angle ) * numpy.pi / 180.0\n esd = float ( esd ) * numpy.pi / 180.0\n\n #if rtype == \"ALA\" :\n # print rtype, a1, a2, a3, angle, esd\n\n if not rtype in mrPar.angles :\n mrPar.angles[rtype] = []\n mrPar.angles[rtype].append ( [a1, a2, a3, angle, esd, descr] )\n\n\ndef GetLinkAngle ( l ) :\n global mrPar\n ts = l.split()\n if len (ts) > 5 :\n a1, a2, a3, angle, esd, descr = ts[0], ts[1], ts[2], ts[3], ts[4], \"\"\n angle = float ( angle ) * numpy.pi / 180.0\n esd = float ( esd ) * numpy.pi / 180.0\n\n for i in range ( 5, len(ts) ) :\n descr = descr + ts[i] + \" \"\n\n descr = descr[0:-1].replace( '\"', '' )\n\n if not descr in mrPar.linkAngles :\n mrPar.linkAngles[descr] = {}\n\n mrPar.linkAngles[descr][\"%s_%s_%s\"%(a1,a2,a3)] = [angle, esd]\n #print \" - angle\", \"%s_%s_%s : %.3f, %.3f\"%(a1,a2,a3,angle,esd)\n #mrPar.linkAngles[descr].append ( [a1, a2, a3, angle, esd] )\n\n\ndef GetTorsion ( l ) :\n global mrPar\n ts = l.split()\n if len (ts) == 9 :\n # LYS chi1 N CA CB CG 180.0 15.0 3\n rtype, torId, a1, a2, a3, a4, angle, esd, period = ts\n angle = float ( angle ) * numpy.pi / 180.0\n esd = float ( esd ) * numpy.pi / 180.0\n period = int ( period )\n\n #if rtype == \"ARG\" or rtype == \"ALA\" :\n # print rtype, torId, a1, a2, a3, a4, angle, esd, period\n\n if not rtype in mrPar.torsions :\n mrPar.torsions[rtype] = []\n mrPar.torsions[rtype].append ( [a1, a2, a3, a4, angle, esd, period] )\n\n\n\ndef GetPlane ( l ) :\n\n global mrPar\n ts = l.split()\n\n if len (ts) == 4 :\n # ARG 1 CD 0.02\n rtype, planeId, a1, esd = ts\n esd = float ( esd )\n\n #if rtype == \"ASP\" or rtype == \"ASP\" :\n # print rtype, planeId, a1, esd\n\n if not rtype in mrPar.planes :\n mrPar.planes[rtype] = []\n mrPar.planes[rtype].append ( [rtype, planeId, a1, esd] )\n\n\n\n\ndef ReadParams ( resType ) :\n\n global mrPar\n\n if mrPar == None :\n mrPar = Params ()\n\n import os\n dir_path = os.path.dirname(os.path.realpath(__file__))\n #print dir_path\n\n li = 0\n\n paramFileName = \"\"\n if resType in protein3to1 or resType in nucleic3to1 :\n paramFileName = \"standard_geometry.cif\"\n else :\n paramFileName = resType.lower() + \".cif\"\n\n\n fpath = os.path.join ( dir_path, \"_param\" )\n fpath = os.path.join ( fpath, paramFileName )\n\n if not os.path.isfile ( fpath ) :\n print \" - could not find param file: %s\" % fpath\n return\n\n print \" - params from: %s\" % fpath\n\n ctype = None\n\n fp = open ( fpath )\n while 1 :\n\n l = fp.readline(); li+=1;\n if not l : break\n\n if l[0:len(\"loop_\")] == \"loop_\" :\n\n #print \"\\n%d:\" % li,\n props, ctype = [], None\n\n while 1 :\n l = fp.readline(); li+=1;\n if not l : break\n\n if l[0:1] == \"_\" :\n ctype, prop = l.split(\".\")\n props.append ( prop.strip() )\n else :\n #print \"%s:%s\" % (ctype, \",\".join(props))\n #print \"\"\n break\n\n if l[0] == \"#\" :\n continue\n\n if not l : break\n\n if ctype == \"_chem_comp_atom\" :\n GetProps ( l, ctype, props, mrPar.atoms )\n\n elif ctype == \"_chem_comp_bond\" :\n #GetBond ( l, props )\n GetProps ( l, ctype, props, mrPar.bonds )\n\n elif ctype == \"_chem_link_bond\" :\n #GetLinkBond ( l, props )\n GetProps ( l, ctype, props, mrPar.linkBonds )\n\n elif ctype == \"_chem_comp_angle\" :\n #GetAngle ( l, props )\n GetProps ( l, ctype, props, mrPar.angles )\n\n elif ctype == \"_chem_link_angle\" :\n #GetLinkAngle ( l, props )\n GetProps ( l, ctype, props, mrPar.linkAngles )\n\n\n elif ctype == \"_chem_comp_tor\" :\n #GetTorsion ( l, props )\n GetProps ( l, ctype, props, mrPar.torsions )\n\n elif ctype == \"_chem_comp_plane_atom\" :\n #GetPlane ( l, props )\n GetProps ( l, ctype, props, mrPar.planes )\n\n\n fp.close()\n\n if 0 :\n print li, \"lines\"\n print \"atoms ->\", len(mrPar.atoms)\n print \"bonds ->\", len(mrPar.bonds)\n print \"link bonds ->\", len(mrPar.linkBonds)\n print \"angles ->\", len(mrPar.angles)\n print \"link angles ->\", len(mrPar.linkAngles)\n print \"torsions ->\", len(mrPar.torsions)\n print \"planes ->\", len(mrPar.planes)\n\n\n\n\ndef ResParams ( res, refPar ) :\n\n global mrPar\n\n if mrPar == None :\n mrPar = Params ()\n\n if res.type not in mrPar.atoms :\n ReadParams ( res.type )\n\n\n if res.type in mrPar.bonds :\n for prop in mrPar.bonds[res.type] :\n\n a1, a2 = prop[\"atom_id_1\"], prop[\"atom_id_2\"]\n dist, esd = prop[\"value_dist\"], prop[\"value_dist_esd\"]\n #a1, a2, dist, esd, descr = b\n\n\n useIt = False\n descr = \"\"\n\n if not \"description\" in prop :\n #print \" - descr not found in bond\"\n useIt = True\n\n else :\n\n descr = prop[\"description\"]\n\n if res.type == \"CYS\" :\n if descr == \".\" :\n useIt = True\n elif descr == 'SH' :\n useIt = True\n\n elif res.type == \"HIS\" :\n if descr == \".\" :\n useIt = True\n elif descr == 'HISE' :\n useIt = True\n\n elif res.type == \"PRO\" :\n if descr == \".\" :\n useIt = True\n elif descr == 'trans' :\n useIt = True\n\n elif res.type == \"U\" or res.type == \"G\" or res.type == \"A\" or res.type == \"C\" :\n if descr == \".\" or descr == \"s\" :\n useIt = True\n elif descr == \"C3'-endo\" : #RNA\n useIt = True\n elif descr == \"(H)C3'-end\" : # ?\n useIt = True\n\n else :\n useIt = True\n\n if useIt :\n\n if not a1 in res.atomsMap :\n #print \" - %s.%d.%s - bond: %s-%s - %s ! a1 not found\" % (res.type, res.id.position, res.id.chainId, a1, a2, descr)\n continue\n\n if not a2 in res.atomsMap :\n #print \" - %s.%d.%s - bond: %s-%s - %s ! a2 not found\" % (res.type, res.id.position, res.id.chainId, a1, a2, descr)\n continue\n\n at1 = res.atomsMap[a1][0]\n at2 = res.atomsMap[a2][0]\n\n #print \" - %s.%d.%s - bond: %s-%s - %s - %.3f %.3f\" % (res.type, res.id.position, res.id.chainId, a1, a2, descr, dist, esd)\n refPar.bonds.append ( [at1, at2, dist, esd] )\n\n else :\n print \" - no bonds for res %d.%s:%s\" % (res.id.position, res.id.chainId, res.type)\n\n\n if res.type in mrPar.angles :\n for prop in mrPar.angles[res.type] :\n\n a1, a2, a3 = prop[\"atom_id_1\"], prop[\"atom_id_2\"], prop[\"atom_id_3\"]\n angle, esd = prop[\"value_angle\"], prop[\"value_angle_esd\"]\n\n angle_rad = angle * numpy.pi / 180.0\n esd_rad = esd * numpy.pi / 180.0\n\n useIt = False\n descr = \"\"\n\n if not \"description\" in prop :\n #print \" - descr not found in angle prop\"\n useIt = True\n\n else :\n descr = prop[\"description\"]\n\n if res.type == \"CYS\" :\n if descr == \".\" :\n useIt = True\n elif descr == \"SH\" :\n useIt = True\n\n elif res.type == \"HIS\" :\n if descr == \".\" :\n useIt = True\n elif descr == \"HISE\" :\n useIt = True\n\n elif res.type == \"PRO\" :\n if descr == \".\" :\n useIt = True\n elif descr == \"trans\" :\n useIt = True\n\n elif res.type == \"U\" or res.type == \"G\" or res.type == \"A\" or res.type == \"C\" :\n if descr == \".\" or descr == \"s\" :\n useIt = True\n elif descr == \"C3'-endo\" : #RNA\n useIt = True\n #elif descr == \"(P)O5'-C5'\" : # ?\n # useIt = True\n elif descr == \"small\" : # ?\n useIt = True\n\n\n else :\n useIt = True\n\n if useIt :\n\n if not a1 in res.atomsMap :\n #print \" - %s.%d.%s - angle: %s-%s-%s - %s ! a1 not found\" % (res.type, res.id.position, res.id.chainId, a1, a2, a3, descr)\n continue\n\n if not a2 in res.atomsMap :\n #print \" - %s.%d.%s - angle: %s-%s-%s - %s ! a2 not found\" % (res.type, res.id.position, res.id.chainId, a1, a2, a3, descr)\n continue\n\n if not a3 in res.atomsMap :\n #print \" - %s.%d.%s - angle: %s-%s-%s - %s ! a3 not found\" % (res.type, res.id.position, res.id.chainId, a1, a2, a3, descr)\n continue\n\n at1 = res.atomsMap[a1][0]\n at2 = res.atomsMap[a2][0]\n at3 = res.atomsMap[a3][0]\n refPar.angles.append ( [at1, at2, at3, angle_rad, esd_rad] )\n\n #print \" - %s.%d.%s - angle: %s-%s-%s - %s - %.3f %.3f\" % (res.type, res.id.position, res.id.chainId, a1, a2, a3, descr, angle, esd)\n\n else :\n print \" - angles for res %d.%s:%s not found\" % (res.id.position, res.id.chainId, res.type)\n\n\n if res.type in mrPar.torsions :\n for P in mrPar.torsions[res.type] :\n\n a1, a2, a3, a4 = P[\"atom_id_1\"], P[\"atom_id_2\"], P[\"atom_id_3\"], P[\"atom_id_4\"]\n angle, esd, period = P[\"value_angle\"], P[\"value_angle_esd\"], P[\"period\"]\n\n #a1, a2, a3, a4, angle, esd, period = a\n angle_rad = angle * numpy.pi / 180.0\n esd_rad = esd * numpy.pi / 180.0\n\n at1, at2, at3, at4 = None, None, None, None\n if a1 in res.atomsMap :\n at1 = res.atomsMap[a1][0]\n if a2 in res.atomsMap :\n at2 = res.atomsMap[a2][0]\n if a3 in res.atomsMap :\n at3 = res.atomsMap[a3][0]\n if a4 in res.atomsMap :\n at4 = res.atomsMap[a4][0]\n if at1 and at2 and at3 and at4 :\n refPar.torsions.append ( [at1, at2, at3, at4, angle_rad, esd_rad, period] )\n #print \" -t: \", at1.name, at2.name, at3.name, angle_rad, esd_rad, period\n else :\n #print \" - torsions for res %d.%s:%s not found\" % (res.id.position, res.id.chainId, res.type)\n pass\n\n\n if res.type in mrPar.planes :\n\n planeAtoms = []\n\n for P in mrPar.planes[res.type] :\n #print a\n planeId, a1, esd = P[\"plane_id\"], P[\"atom_id\"], P[\"dist_esd\"]\n\n at1 = None\n if a1 in res.atomsMap :\n at1 = res.atomsMap[a1][0]\n\n if at1 :\n planeAtoms.append ( [at1, esd] )\n #if log : print \" -p: \", At(at1), planeId, esd\n else :\n #print \" - atom %s not found for plane, in res %d %s chain %s\" % (a1, res.id.position, res.type, res.id.chainId)\n pass\n\n if len (planeAtoms) >= 3 :\n refPar.planes.append ( planeAtoms )\n #print \" res %d.%s:%s - %d atoms in plane \" % (res.id.position, res.id.chainId, res.type, len(planeAtoms) )\n\n #else :\n # print \" - res %d.%s:%s - no planes\" % (res.id.position, res.id.chainId, res.type)\n\n\n\ndef CheckConnectDiS ( at1, refPar ) :\n\n P = None\n for linkI, lpar in mrPar.linkBonds.iteritems() :\n if lpar['link_id'] == \"SG\" and lpar[\"atom_id_1\"] == \"SG\" and lpar[\"link_id\"] == \"CYS\" :\n P = lpar\n\n if P :\n a1, a2, dist, esd = P['atom_id_1'], P['atom_id_2'], P['value_dist'], P['value_dist_esd']\n\n if not a1 in r1.atomsMap :\n print \" - %s-%s [%s] link at %s-%d-%s - %s at not found\" % (a1, a2, lpar['link_id'], r1.type, r1.id.position, r1.id.chainId, a1)\n return\n\n if not a2 in r2.atomsMap :\n print \" - %s-%s [%s] link at %s-%d-%s - %s at not found\" % (a1, a2, lpar['link_id'], r2.type, r2.id.position, r2.id.chainId, a2)\n return\n\n print \" - diS link bond %s-%s [%s] %d.%s - %d.%s\" % (a1, a2, lpar['link_id'], r1.id.position, r1.id.chainId, r2.id.position, r2.id.chainId)\n\n at1 = r1.atomsMap[a1][0]\n at2 = r2.atomsMap[a2][0]\n\n refPar.bonds.append ( [at1, at2, dist, esd] )\n\n\ndef ConnectRes (r1, r2, refPar) :\n\n for linkI, lpar in mrPar.linkBonds.iteritems() :\n\n P = None\n if r2.type in protein3to1 and r1.type in protein3to1 :\n if r2.type == \"GLY\" :\n if lpar['link_id'] == \"GLY\" :\n P = lpar\n elif r2.type == \"PRO\" :\n if lpar['link_id'] == \"PRO\" :\n P = lpar\n else :\n if lpar['link_id'] == \"NOT GLY PRO\" :\n P = lpar\n\n elif r2.type in nucleic3to1 and r1.type in nucleic3to1 :\n if lpar['link_id'] == \"NUC-ACID-ALL\" :\n P = lpar\n\n\n\n if P :\n a1, a2, dist, esd = P['atom_id_1'], P['atom_id_2'], P['value_dist'], P['value_dist_esd']\n\n if not a1 in r1.atomsMap :\n print \" - %s-%s [%s] link at %s-%d-%s - %s at not found\" % (a1, a2, lpar['link_id'], r1.type, r1.id.position, r1.id.chainId, a1)\n continue\n\n if not a2 in r2.atomsMap :\n print \" - %s-%s [%s] link at %s-%d-%s - %s at not found\" % (a1, a2, lpar['link_id'], r2.type, r2.id.position, r2.id.chainId, a2)\n continue\n\n #print \" - link bond %s-%s [%s] link at %s-%d-%s\" % (a1, a2, lpar['link_id'], r1.type, r1.id.position, r1.id.chainId)\n\n at1 = r1.atomsMap[a1][0]\n at2 = r2.atomsMap[a2][0]\n\n refPar.bonds.append ( [at1, at2, dist, esd] )\n\n\n for linkI, lpar in mrPar.linkAngles.iteritems() :\n\n a1, a2, a3 = lpar['atom_id_1'], lpar['atom_id_2'], lpar['atom_id_3']\n\n P = None\n\n if r2.type in protein3to1 and r1.type in protein3to1 :\n if r2.type == \"GLY\" :\n if lpar['link_id'] == \"GLY\" :\n P = lpar\n elif r2.type == \"PRO\" :\n isTrans = True\n if lpar['link_id'] == \"PRO\" :\n P = lpar\n else :\n if isTrans :\n if lpar['link_id'] == \"PRO-TRANS\" :\n P = lpar\n else :\n if lpar['link_id'] == \"PRO-CIS\" :\n #P = lpar\n #print \" - adding PRO C-N link at %s-%d-%s\" % (r1.type, r1.id.position, r1.id.chainId)\n pass\n else :\n if lpar['link_id'] == \"NOT GLY PRO\" :\n P = lpar\n if lpar['link_id'] == \"NOT PRO GLY\" :\n P = lpar\n\n elif r2.type in nucleic1to3 and r1.type in nucleic1to3 :\n if lpar['link_id'] == \"NA s\" :\n P = lpar\n if lpar['link_id'] == \"NA small\" :\n P = lpar\n\n if P :\n\n angle, esd = P['value_angle'], P['value_angle_esd']\n\n angle_rad = angle * numpy.pi / 180.0\n esd_rad = esd * numpy.pi / 180.0\n\n rr1, rr2, rr3 = r1, None, r2\n if a1 == \"CA\" or a1 == \"O\" :\n rr2 = r1\n else :\n rr2 = r2\n\n if not a1 in rr1.atomsMap :\n print \" - %s-%s-%s [%s] at %s-%d-%s ! a1 not found\" % (a1, a2, a3, lpar['link_id'], rr1.type, rr1.id.position, rr1.id.chainId)\n continue\n\n if not a2 in rr2.atomsMap :\n print \" - %s-%s-%s [%s] at %s-%d-%s ! a2 not found\" % (a1, a2, a3, lpar['link_id'], rr2.type, rr2.id.position, rr2.id.chainId)\n continue\n\n if not a3 in rr3.atomsMap :\n print \" - %s-%s-%s [%s] at %s-%d-%s ! a3 not found\" % (a1, a2, a3, lpar['link_id'], rr3.type, rr3.id.position, rr3.id.chainId)\n continue\n\n print \" - link angle %s-%s-%s [%s] at %s-%d-%s - %.3f\" % (a1, a2, a3, lpar['link_id'], r1.type, r1.id.position, r1.id.chainId, angle_rad*180.0/numpy.pi)\n\n at1 = rr1.atomsMap[a1][0]\n at2 = rr2.atomsMap[a2][0]\n at3 = rr3.atomsMap[a3][0]\n\n refPar.angles.append ( [at1, at2, at3, angle_rad, esd_rad] )\n\n\n\nrefPar = None\nrefAtoms = None\nrefPos = None\nrefGrad = None\nrefAtM = None\ngMaxDs = [0.0,0.0,0.0]\ngMaxG = None\n\ndef RefStart ( ress, dmap ) :\n\n global refPar\n global refAtoms, refPos, refGrad, refAtM\n\n #print \"Refining %d residues\" % len(ress)\n\n atomsMap = {}\n molMap = {}\n resMap = {}\n\n for r in ress :\n molMap[r.molecule] = 1\n resMap[r] = 0\n for at in r.atoms :\n atomsMap[at] = 1\n\n\n\n bonds = []\n # resisdues mapped by mol_id, chain_id, and residue id\n mi_ci_ri = {}\n for mol in molMap.keys() :\n\n #for bond in mol.bonds :\n # if bond.atoms[0] in atomsMap and bond.atoms[1] in atomsMap :\n # bonds.append ( bond )\n\n if not mol.id in mi_ci_ri :\n mi_ci_ri[mol.id] = {}\n\n for res in mol.residues :\n if not res.id.chainId in mi_ci_ri[mol.id] :\n mi_ci_ri[mol.id][res.id.chainId] = {}\n if res.id.position in mi_ci_ri[mol.id][res.id.chainId] :\n print \"multiple residues with position %d in %d.%s\" % (res.id.position, mol.id, res.id.chainId)\n print \" %s - %s\" % (mi_ci_ri[mol.id][res.id.chainId][res.id.position].type, res.type)\n else :\n mi_ci_ri[mol.id][res.id.chainId][res.id.position] = res\n\n\n # stores if the next res from any residue has had parameters added yet\n #addedParForNextRes = {}\n\n # all parameters\n refPar = RefineParams ()\n\n for res in ress :\n\n mol = res.molecule\n\n for at in res.atoms :\n atomsMap[at] = 1\n at.M = 1.0\n\n ResParams ( res, refPar )\n\n #print \" - %d.%s %s - %d bonds, %d angles\" % (r.id.position, r.id.chainId, r.type, len(rBonds), len(rAngles))\n\n if res.type in protein3to1 or res.type in nucleic1to3 :\n\n if res.id.position-1 in mi_ci_ri[mol.id][res.id.chainId] :\n prevRes = mi_ci_ri[mol.id][res.id.chainId][res.id.position-1]\n\n if prevRes in resMap :\n # connects from prevRes to this one\n pass\n else :\n ConnectRes ( prevRes, res, refPar )\n\n for at in prevRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n\n if res.id.position+1 in mi_ci_ri[mol.id][res.id.chainId] :\n nextRes = mi_ci_ri[mol.id][res.id.chainId][res.id.position+1]\n\n ConnectRes ( res, nextRes, refPar )\n\n if not nextRes in resMap :\n for at in nextRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n if 0 and res.type == \"CYS\" :\n CheckConnectDiS ()\n atO1 = res.atomsMap[\"OG\"][0]\n if len(atO1.bonds) == 2 :\n atO2 = None\n for b in atO1.bonds :\n if b.otherAtom(atO1).name == \"OG\" :\n atO2 = b.otherAtom(atO1)\n if atO2 :\n add = False\n if atO2.residue in resMap :\n # add from lowest to highest to avoid adding twice\n if atO2.residue.id.position < atO1.residue.id.position :\n add = True\n else :\n add = True\n if add :\n pass\n\n\n\n if res.type == \"NAG\" :\n C1 = res.atomsMap[\"C1\"][0]\n for b in C1.bonds :\n\n bondedAt = b.otherAtom(C1)\n bondedRes = bondedAt.residue\n\n if bondedRes.type == \"ASN\" and bondedAt.name == \"ND2\" :\n refPar.bonds.append ( [C1, bondedAt, 1.45, 0.02] )\n CG = bondedRes.atomsMap[\"CG\"][0]\n refPar.angles.append ( [CG, bondedAt, C1, 124.7*numpy.pi/180.0, 5.0*numpy.pi/180.0] )\n print \" - NAG-ASN(%s) bond\" % bondedAt.name\n if not bondedRes in resMap :\n for at in bondedRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n elif bondedRes.type == \"NAG\" and bondedAt.name == \"O4\" :\n refPar.bonds.append ( [C1, bondedAt, 1.433, 0.02] )\n C4 = bondedRes.atomsMap[\"C4\"][0]\n refPar.angles.append ( [C4, bondedAt, C1, 118.567*numpy.pi/180.0, 5.0*numpy.pi/180.0] )\n print \" - NAG-NAG (%s) bond\" % bondedAt.name\n if not bondedRes in resMap :\n for at in bondedRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n if res.type == \"BMA\" :\n C1 = res.atomsMap[\"C1\"][0]\n for b in C1.bonds :\n\n bondedAt = b.otherAtom(C1)\n bondedRes = bondedAt.residue\n\n if bondedRes.type == \"NAG\" and bondedAt.name == \"O4\" :\n refPar.bonds.append ( [C1, bondedAt, 1.433, 0.02] )\n C4 = bondedRes.atomsMap[\"C4\"][0]\n refPar.angles.append ( [C4, bondedAt, C1, 109.147*numpy.pi/180.0, 5.0*numpy.pi/180.0] )\n print \" - BMA-NAG (%s) bond\" % bondedAt.name\n if not bondedRes in resMap :\n for at in bondedRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n\n if res.type == \"MAN\" :\n C1 = res.atomsMap[\"C1\"][0]\n for b in C1.bonds :\n\n bondedAt = b.otherAtom(C1)\n bondedRes = bondedAt.residue\n\n if bondedRes.type == \"BMA\" and bondedAt.name == \"O6\" :\n refPar.bonds.append ( [C1, bondedAt, 1.425, 0.02] )\n C6 = bondedRes.atomsMap[\"C6\"][0]\n refPar.angles.append ( [C6, bondedAt, C1, 115.695*numpy.pi/180.0, 5.0*numpy.pi/180.0] )\n print \" - MAN-BMA (%s) bond\" % bondedAt.name\n if not bondedRes in resMap :\n for at in bondedRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n elif bondedRes.type == \"BMA\" and bondedAt.name == \"O3\" :\n refPar.bonds.append ( [C1, bondedAt, 1.475, 0.02] )\n C3 = bondedRes.atomsMap[\"C3\"][0]\n refPar.angles.append ( [C3, bondedAt, C1, 110.731*numpy.pi/180.0, 5.0*numpy.pi/180.0] )\n print \" - MAN-BMA (%s) bond\" % bondedAt.name\n if not bondedRes in resMap :\n for at in bondedRes.atoms :\n atomsMap[at] = 1\n at.M = 0.0\n\n\n\n if dmap and not hasattr ( dmap, 'maxd' ) :\n M = dmap.data.full_matrix()\n avg, std = numpy.average(M), numpy.std(M)\n #maxM = numpy.max(M)\n #minM = numpy.min(M)\n dmap.maxd = min ( avg+std*10.0, numpy.max(M) )\n dmap.mind = max ( avg-std*1.0, numpy.min(M) )\n\n refAtoms = atomsMap.keys()\n refPos = numpy.zeros ( [len(refAtoms),3] )\n refGrad = numpy.zeros ( [len(refAtoms),3] )\n refAtM = numpy.zeros ( [len(refAtoms),3] )\n\n\n\n #global gAtId\n atId = GetLastId () + 1\n for i, at in enumerate(refAtoms) :\n\n at.i = i\n #refPos[i] = at.coord()\n refAtM[i] = [at.M, at.M, at.M]\n\n if not hasattr ( at, 'coords' ) :\n at.coords = {}\n #at.coordAt = len( at.coords )\n at.coords[atId] = at.coord()\n\n\n print \"Coord id is %d\" % atId\n global gAtId\n gAtId = atId\n\n #global refAtomsPos0\n #refAtomsPos0 = _multiscale.get_atom_coordinates ( refAtoms, transformed = False )\n\n\ndef GetLastId () :\n lastId = -1\n for m in chimera.openModels.list(modelTypes = [chimera.Molecule]) :\n for at in m.atoms :\n if hasattr ( at, 'coords' ) :\n lastId = max ( lastId, max ( at.coords.keys() ) )\n return lastId\n\ngAtId = None\n\n\ndef RefBack () :\n\n global gAtId\n if gAtId == None :\n gAtId = GetLastId ()\n\n if gAtId >= 0 :\n for m in chimera.openModels.list(modelTypes = [chimera.Molecule]) :\n for at in m.atoms :\n if hasattr ( at, 'coords' ) :\n if gAtId in at.coords :\n at.setCoord ( at.coords[gAtId] )\n\n print \"Set coord id %d\" % gAtId\n gAtId = gAtId - 1\n\n\n\n\nimport threading\nimport time\nimport Queue\n\nclass RefThread ( threading.Thread ):\n def __init__(self, outQueue, inQueue, dmap, mapF):\n threading.Thread.__init__(self)\n self.dmap = dmap\n self.mapF = mapF\n self.inQueue = inQueue\n self.outQueue = outQueue\n\n def run(self):\n #print \"Thread start\"\n\n while 1 :\n\n try:\n msg = self.inQueue.get(0)\n\n except Queue.Empty:\n time.sleep(0.1)\n #print \"Thread done\"\n continue\n\n if msg == \"stop\" :\n print \" - thread msg:\", msg\n break\n elif msg == \"go\" :\n startt = time.time()\n RefStep ( self.dmap, self.mapF )\n dur = time.time() - startt\n\n e = RefE ( self.dmap )\n msg2 = \"%.3fs / \" % dur + e\n self.outQueue.put( msg2 )\n else :\n print \" - thread msg: ? \", msg\n break\n\n\n\ndef RefStep ( dmap, mapF ) :\n\n global refPar\n global refAtoms, refPos, refGrad, refAtM\n\n #mapF = float ( mapFStr.get() )\n #print mapF\n #if dmap :\n # print \" - in map %s\" % dmap.name\n #mapF = 0.1\n\n for at in refAtoms :\n #at.G = chimera.Vector(0,0,0)\n #-at.P = at.coord()\n #at.P = chimera.Point(0,0,0)\n #-at.G = chimera.Vector(0,0,0)\n refPos[at.i] = at.coord()\n refGrad[at.i] = [0,0,0]\n\n\n #if log : print \"\"\n\n if 1 :\n for b in refPar.bonds :\n at1, at2, D, esd = b\n BondG ( at1, at2, D, esd, F=0.1 )\n\n #if log : print \"\"\n\n if 1 :\n for a in refPar.angles :\n at1, at2, at3, A, esd = a\n AngleG ( at1, at2, at3, A, esd, F=0.1 )\n #break\n\n if 1 :\n for t in refPar.torsions :\n at1, at2, at3, at4, angle, esd, period = t\n\n #if at1.name == \"N\" :\n #print \"tor: %s, %s, %s, %s - angle %.3f, per %d\" % ( At(at1), at2.name, at3.name, at4.name, angle*180.0/numpy.pi, period )\n #RotDih ( at1, at2, at3, at4, angle, esd, period, dmap )\n\n TorsionG ( at1, at2, at3, at4, angle, esd, period, F=0.001 )\n\n if 1 :\n for planeAtoms in refPar.planes :\n PlaneG ( planeAtoms, F=0.1 )\n pass\n\n if dmap :\n for at in refAtoms :\n MapG ( at, dmap, F=mapF )\n #break\n pass\n\n\n global gMaxDs, gMaxG\n\n maxD = 0.0\n\n #for at in refAtoms :\n # #at.setCoord ( at.coord() + at.G )\n # if not hasattr ( at, 'M' ) :\n # print \" - xMx - at %s\" % At(at)\n # else :\n # #-gl = at.G.length\n # #-if gl > gMaxG :\n # #- gMaxG = gl\n # #-at.P = at.P + at.G * at.M\n # #at.setCoord ( at.P )\n # pass\n # #pass\n\n gMaxDs[0] = gMaxDs[1]\n gMaxDs[1] = gMaxDs[2]\n gMaxDs[2] = gMaxG\n\n gMaxG = numpy.sqrt ( max ( numpy.sum ( refGrad*refGrad, axis=1 ) ) )\n\n scaleF = 1.0\n if gMaxG > 10.0 :\n scaleF = (10.0 / gMaxG)\n refGrad = refGrad * scaleF\n #gMaxG = numpy.sqrt ( max ( numpy.sum ( refGrad*refGrad, axis=1 ) ) )\n\n refPos = refPos + (refGrad * refAtM)\n\n\n\ndef RefPut () :\n global refAtoms, refPos\n for at in refAtoms :\n p = refPos[at.i]\n at.setCoord ( chimera.Point(*p) )\n\n\n\ndef RefE ( dmap ) :\n\n global refPar\n global refAtoms\n\n dB = 0.0\n if 1 :\n for b in refPar.bonds :\n at1, at2, D, esd = b\n dB += BondG ( at1, at2, D, esd, F=0 )\n\n #if log : print \" - b %s _ %s %.3f (%.3f)\" % (At(at1), At(at2), D, d)\n\n #if log : print \"\"\n\n dA = 0.0\n if 1 :\n for a in refPar.angles :\n\n at1, at2, at3, A, esd = a\n dA += AngleG ( at1, at2, at3, A, esd, F=0 )\n\n #if log : print \" - a %s _ %s _ %s %.3f (%.3f)\" % (At(at1), At(at2), At(at3), A, d)\n\n dTor = 0.0\n if 1 :\n for t in refPar.torsions :\n at1, at2, at3, at4, angle, esd, period = t\n\n #if at1.name == \"N\" :\n # print \"tor: %s, %s, %s, %s - angle %.3f, per %d\" % ( At(at1), at2.name, at3.name, at4.name, angle*180.0/numpy.pi, period )\n dTor += TorsionG ( at1, at2, at3, at4, angle, esd, period, F=0 )\n\n dPlanes = 0.0\n if 1 :\n for planeAtoms in refPar.planes :\n dPlanes += PlaneG ( planeAtoms, F=0 )\n\n\n dMap = 0.0\n if dmap :\n for at in refAtoms :\n dMap += MapG ( at, dmap, F=0 )\n\n #if dmap :\n # RefDisp ( refAtoms, dmap )\n\n #if log : print \"\"\n #maxG = max ( numpy.sum ( refGrad*refGrad, axis=1 ) )\n global gMaxG\n\n estr = \"[%d] atoms, [%d] bonds:%.3f, [%d] angles:%.3f, [%d] torsions:%.3f, [%d] planes:%.3f, map:%.4f maxG:%.5f\" % ( len(refAtoms),\n len(refPar.bonds), dB, len(refPar.angles), dA, len(refPar.torsions), dTor, len(refPar.planes), dPlanes, dMap, gMaxG )\n\n return estr\n\n\n\n\ndef MapG ( at, dmap, F=0.1 ) :\n\n global refAtoms, refPos, refGrad\n\n #print at.P, type(at.P)\n #tp = at.P\n ##tp = at.molecule.openState.xform.apply ( at.P )\n ##tp = dmap.openState.xform.inverse().apply ( tp )\n #P = tp.data()\n\n P = refPos[at.i]\n\n if F <= 0.0 :\n return dmap.interpolated_values ( [P], at.molecule.openState.xform ) [0]\n\n dx, dy, dz = dmap.data.step\n pts = numpy.array ( [P] * 6 )\n\n #print pts\n\n #dx *= 0.1\n\n pts[0][0] -= dx; pts[1][0] += dx\n pts[2][1] -= dx; pts[3][1] += dx\n pts[4][2] -= dx; pts[5][2] += dx\n\n #print pts, type(pts)\n\n dmap.ctr = numpy.array(dmap.data.origin) + numpy.array(dmap.data.size)/2.0*numpy.array(dmap.data.step)\n\n\n vs = dmap.interpolated_values ( pts, at.molecule.openState.xform )\n\n #print mapvs, type(mapvs)\n\n vs = (vs - dmap.mind)/(dmap.maxd-dmap.mind)\n\n #print mapvs, type(mapvs)\n\n dx2 = 1.0/(dx*2.0)\n\n G = chimera.Vector ( (vs[1] - vs[0])*dx2, (vs[3] - vs[2])*dx2, (vs[5] - vs[4])*dx2 )\n #print G\n\n #G = dmap.ctr - numpy.array( tp )\n #G = chimera.Vector ( *G )\n\n #G = dmap.openState.xform.apply ( G )\n #G = at.molecule.openState.xform.inverse().apply ( G )\n #print G\n\n #at.dg = G\n #at.G += G * F\n refGrad[at.i] += G * F\n\n\n\n\ndef RefDisp ( ats, dmap, mname = \"RefDisp\" ) :\n\n import axes; reload ( axes )\n\n smod = None\n for m in chimera.openModels.list() :\n if m.name == mname :\n smod = m\n\n if smod :\n for sp in smod.surfacePieces :\n smod.removePiece ( sp )\n\n else :\n import _surface\n smod = _surface.SurfaceModel()\n smod.name = mname\n chimera.openModels.add ( [smod] )\n\n\n #axes.SphereMesh (r=0.3, div=10, color=(.9,.5,.5,1.0), pos=dmap.ctr, mol=smod)\n\n\n for at in ats :\n\n if hasattr ( at, 'dg' ) :\n\n I = smod.openState.xform.inverse()\n P = I.apply(at.xformCoord())\n G = I.apply(at.molecule.openState.xform.apply(at.dg))\n\n #axes.AddArrow4 ( P, G, G.length, clr=(.5,.8,.5,1), rad=0.1, mol=smod, hrad=0.15, hlen=0.05 )\n if at.dg.length > 1e-3 :\n axes.AddArrow4 ( at.P, at.dg, at.dg.length, clr=(.5,.8,.5,1), rad=0.1, mol=smod, hrad=0.15, hlen=0.05 )\n\n\n if hasattr ( at, 'toPlaneV' ) :\n\n #axes.AddArrow4 ( P, G, G.length, clr=(.5,.8,.5,1), rad=0.1, mol=smod, hrad=0.15, hlen=0.05 )\n if at.toPlaneV.length > 1e-3 :\n axes.AddArrow4 ( at.P, at.toPlaneV, at.toPlaneV.length, clr=(.5,.8,.5,1), rad=0.1, mol=smod, hrad=0.15, hlen=0.05 )\n #axes.AddArrow4 ( at.com, at.toPlaneV, at.toPlaneV.length, clr=(.7,.2,.5,1), rad=0.1, mol=smod, hrad=0.15, hlen=0.05 )\n\n\n\ndef RotDih ( at1, at2, at3, at4, angle, esd, period, dmap ) :\n\n A = diha ( at1, at2, at3, at4 )\n print \" - start: %.3f\" % A\n\n for a in range ( 0, 360, 10 ) :\n\n print \"%.1f\\t\" % a,\n\n TorsionG ( at1, at2, at3, at4, angle, esd, period, F=1.0 )\n\n bx = at3.P - at2.P; bx.normalize()\n xf = chimera.Xform.translation (at3.P.toVector()*1.0)\n xf.multiply ( chimera.Xform.rotation ( bx, 10.0 ) )\n xf.multiply ( chimera.Xform.translation (at3.P.toVector()*-1.0) )\n\n trp = xf.apply ( at4.P )\n #at4.setCoord ( trp )\n AddSpherePts ( [ trp.data() ], (0,1,0,1), .1, mname = \"RAD points\" )\n\n trp = at3.molecule.openState.xform.apply ( trp )\n\n mapv = dmap.interpolated_values ( [trp.data()], dmap.openState.xform )\n print \"\\t%.3f\" % ( mapv[0] )\n\n\n\ndef diha ( a1, a2, a3, a4 ) :\n #n1 = vnorm ( a1.coord(), a2.coord(), a3.coord() )\n #n2 = vnorm ( a2.coord(), a3.coord(), a4.coord() )\n #return numpy.arccos ( n2 * n1 * -1.0 ) * 180.0 / numpy.pi\n\n # http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates\n b1 = a2.coord() - a1.coord()\n b2 = a3.coord() - a2.coord()\n b3 = a4.coord() - a3.coord()\n\n n1 = chimera.cross ( b1, b2 ); n1.normalize()\n n2 = chimera.cross ( b2, b3 ); n2.normalize()\n m1 = chimera.cross ( n1, b2 ); m1.normalize()\n\n x = n1 * n2\n y = m1 * n2\n\n return -1.0 * numpy.arctan2 ( y, x) * 180.0 / numpy.pi\n\n\ndef dihap ( a1, a2, a3, a4 ) :\n #n1 = vnorm ( a1.coord(), a2.coord(), a3.coord() )\n #n2 = vnorm ( a2.coord(), a3.coord(), a4.coord() )\n #return numpy.arccos ( n2 * n1 * -1.0 ) * 180.0 / numpy.pi\n\n # http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates\n b1 = a2 - a1\n b2 = a3 - a2\n b3 = a4 - a3\n\n n1 = chimera.cross ( b1, b2 ); n1.normalize()\n n2 = chimera.cross ( b2, b3 ); n2.normalize()\n m1 = chimera.cross ( n1, b2 ); m1.normalize()\n\n x = n1 * n2\n y = m1 * n2\n\n return -1.0 * numpy.arctan2 ( y, x) * 180.0 / numpy.pi\n\n\ndef At ( at ) :\n return \"%d.%s(%s)_%s\" % (at.residue.id.position, at.residue.id.chainId, at.residue.type, at.name)\n\n\n\ndef AddSpherePts ( pts, clr, rad, mname = \"RAD points\" ) :\n\n from chimera import elements, Coord, Atom, MolResId\n\n ptsMol = None\n for m in chimera.openModels.list() :\n if m.name == mname :\n ptsMol = m\n\n res = None\n if ptsMol == None:\n from chimera import Molecule, openModels\n ptsMol = Molecule()\n ptsMol.name = mname\n ptsMol.isRealMolecule = False\n openModels.add ( [ptsMol], noprefs = True )\n res = ptsMol.newResidue('marker', chimera.MolResId('1', 1) )\n else :\n res = ptsMol.residues[0]\n\n for pt in pts :\n a = ptsMol.newAtom('', elements.H)\n res.addAtom(a)\n\n a.setCoord ( chimera.Point(*pt) ) # ( chimera.Point(*xyz) )\n a.radius = rad\n a.drawMode = Atom.Sphere\n a.color = chimera.MaterialColor ( *clr )\n a.surfaceCategory = 'markers'\n\n\n\n\n\ndef BondG ( at1, at2, D, esd, F=0.1 ) :\n\n #v = at1.P - at2.P\n\n global refAtoms, refPos, refGrad\n pv = refPos[at1.i] - refPos[at2.i]\n v = chimera.Vector( *pv )\n\n d = D - v.length\n\n #if log : print \" - b0 %s-%s %.3f/%.3f\" % (at1.name, at2.name, D, d)\n\n if F <= 0.0 :\n return d * d\n\n\n if v.length < 0.01 :\n v = chimera.Vector ( random.random(), random.random(),random.random() )\n\n v.normalize()\n #at1.G += v * d * F\n #at2.G -= v * d * F\n #at1.P = at1.P + v * d * 0.1 * at1.M\n #at2.P = at2.P - v * d * 0.1 * at2.M\n refGrad[at1.i] += v * d * F\n refGrad[at2.i] -= v * d * F\n\n\n\ndef AngleG_ (at1, at2, at3, theta0, esd, F=0.001) :\n\n global refAtoms, refPos, refGrad\n pv = refPos[at1.i] - refPos[at2.i]\n\n r12 = refPos[at1.i] - refPos[at2.i]\n r32 = refPos[at3.i] - refPos[at2.i]\n\n r12 = chimera.Vector(*r12); d12 = r12.length\n r32 = chimera.Vector(*r32); d32 = r32.length\n\n #r12 = at1.P - at2.P; d12 = r12.length\n #r32 = at3.P - at2.P; d32 = r32.length\n\n cos_theta = (r12 * r32)/(d12*d32);\n #theta0 = a->dEqAngle;\n theta = numpy.arccos(cos_theta);\n diff = theta - theta0\n\n\n #print \" - a %s _ %s _ %s %.3f/%.3f (%.3f)\" % (At(at1), At(at2), At(at3), theta0, theta, diff)\n\n #energy = k *diff*diff;\n #dEAngles += energy;\n\n if F <= 0.0 :\n return diff*diff\n\n d12inv = 1.0 / d12;\n d32inv = 1.0 / d32;\n\n # Calculate constant factor 2k(theta-theta0)/sin(theta)\n sin_theta = numpy.sqrt(1.0 - cos_theta*cos_theta)\n #diff *= (-2.0 * k) / sin_theta;\n diff = diff * (-2.0) / sin_theta;\n c1 = diff * d12inv\n c2 = diff * d32inv\n\n # Calculate the actual forces\n force1 = (r12*(d12inv*cos_theta) - r32*d32inv)*c1\n force2 = force1\n force3 = (r32*(d32inv*cos_theta) - r12*d12inv)*c2\n force2 += force3\n force2 *= -1;\n\n at1.G += force1 * F\n at2.G += force2 * F\n at3.G += force3 * F\n\n\n\n\ndef AngleG (at1, at2, at3, theta0, esd, F=0.001) :\n\n #rij = at1.P - at2.P\n #rik = at3.P - at2.P\n\n global refAtoms, refPos, refGrad\n\n rij = refPos[at1.i] - refPos[at2.i]; rij = chimera.Vector (*rij)\n rik = refPos[at3.i] - refPos[at2.i]; rik = chimera.Vector (*rik)\n\n lij, lik, = rij.length, rik.length\n if lij < 1e-3 or lik < 1e-3 :\n return 0.0\n\n rij.normalize()\n rik.normalize()\n\n cost = rij * rik\n t = numpy.arccos(cost);\n d = t - theta0\n ge = 2.0 * d / numpy.sin(t);\n\n if F <= 0.0 :\n return d * d\n\n\n i0 = -rik[0]/lij - rij[0]/lik - cost * ( -rij[0]/lij - rik[0]/lik )\n i1 = -rik[1]/lij - rij[1]/lik - cost * ( -rij[1]/lij - rik[1]/lik )\n i2 = -rik[2]/lij - rij[2]/lik - cost * ( -rij[2]/lij - rik[2]/lik )\n\n j0 = rik[0]/lij - cost * ( -rij[0]/lij )\n j1 = rik[1]/lij - cost * ( rij[1]/lij )\n j2 = rik[2]/lij - cost * ( -rij[2]/lij )\n\n k0 = rij[0]/lik - cost * ( rik[0]/lik )\n k1 = rij[1]/lik - cost * ( rik[1]/lik )\n k2 = rij[2]/lik - cost * ( rik[2]/lik )\n\n fi, fj, fk = chimera.Vector(i0,i1,i2), chimera.Vector(j0,j1,j2), chimera.Vector(k0,k1,k2)\n\n #at1.G += fj * (ge * F)\n #at2.G += fi * (ge * F)\n #at3.G += fk * (ge * F)\n\n refGrad[at1.i] += fj * (ge * F)\n refGrad[at2.i] += fi * (ge * F)\n refGrad[at3.i] += fk * (ge * F)\n\n\n\ndef TorsionG ( at1, at2, at3, at4, angle, esd, period, F=0.01 ) :\n\n\t# Calculate the vectors between atoms\n\t#Vector3d pos0 = d->a1->pos, pos1 = d->a2->pos, pos2 = d->a3->pos, pos3 = d->a4->pos;\n\t#Vector3d r12 = pos0-pos1, r23 = pos1-pos2, r34 = pos2-pos3;\n\n #r12 = at1.P - at2.P;\n #r23 = at2.P - at3.P;\n #r34 = at3.P - at4.P\n\n global refAtoms, refPos, refGrad\n r12 = refPos[at1.i] - refPos[at2.i]; r12 = chimera.Vector (*r12)\n r23 = refPos[at2.i] - refPos[at3.i]; r23 = chimera.Vector (*r23)\n r34 = refPos[at3.i] - refPos[at4.i]; r34 = chimera.Vector (*r34)\n\n\n\t# Calculate the cross products and distances\n\t#Vector3d A = r12.Cross(r23); double rA = A.Length();\n\t#Vector3d B = r23.Cross(r34); double rB = B.Length();\n\t#Vector3d C = r23.Cross(A); double rC = C.Length();\n A = chimera.cross ( r12, r23 ); rA = A.length\n B = chimera.cross ( r23, r34 ); rB = B.length\n C = chimera.cross ( r23, A ); rC = C.length\n\n if rA < 1e-3 or rB < 1e-3 or rC < 1e-3 :\n return 0.0\n\n\t# Calculate the sin and cos\n\t#double cos_phi = (A.Dot(B))/(rA*rB);\n\t#double sin_phi = (C.Dot(B))/(rC*rB);\n cos_phi = A * B / (rA * rB)\n sin_phi = C * B / (rC * rB)\n\n\t#double phi= -atan2(sin_phi,cos_phi);\n phi = - numpy.arctan2 ( sin_phi, cos_phi )\n\n\n K=0;\t\t# energy\n K1=0;\t\t# force\n\n\t# get the dihedral information\n\t#int multiplicity = (int) d->values.size();\n #multiplicity = 0\n\n\t# Loop through the multiple parameter sets for this\n\t# bond. We will only loop more than once if this\n\t# has multiple parameter sets from Charmm22\n\t#for (int mult_num=0; mult_num<multiplicity; mult_num++)\n\t#{\n\t\t# get angle information\n\t\t#double k = d->values[mult_num].dFC;\n\t\t#double delta = d->values[mult_num].dPhase;\n\t\t#int n = d->values[mult_num].dPeriodicity;\n\n delta = angle\n n = period\n k = 1.0\n diff = 0\n\n\t# Calculate the energy\n if n > 0 :\n \t# Periodicity is greater than 0, so use cos form\n \t#K += k*(1+numpy.cos(n*phi - delta));\n \t#K1 += -n*k*numpy.sin(n*phi - delta);\n\n d = n*phi - delta\n \tK += k*(1+numpy.cos(d))\n \tK1 += -n*k*numpy.sin(d)\n\n #minDiff = 1e9\n #for i in range (-(n+1), n+1) :\n # if i != 0 :\n # d = i*phi - delta\n # ad = abs(d)\n # if ad < minDiff :\n # minDiff = ad\n # diff = d\n\n else :\n\t\t# Periodicity is 0, so just use the harmonic form\n diff = phi - delta\n if diff < -M_PI :\n diff += TWOPI\n elif diff > M_PI :\n diff -= TWOPI;\n\n K += k*diff*diff;\n K1 += 2.0*k*diff;\n\n #print \"tor: %s, %s, %s, %s - angle %.3f / %.3f, per %d\" % ( At(at1), at2.name, at3.name, at4.name, angle, phi*180/numpy.pi, period )\n\n #print \"%.3f\\t%.3f\" % (phi*180/numpy.pi, (-K+1.0)),\n\n\n #print \" - tor %s %s %s %s - per %d, phi %.2f (%.2f), diff %.2f -- %.3f\" % (At(at1), at2.name, at3.name, at4.name, period, phi*180.0/numpy.pi, angle*180.0/numpy.pi, diff*180.0/numpy.pi, K)\n\n\n #dEDihedrals += K;\n #}\n\n if F <= 0.0 :\n return K\n\n # using F instead of k here\n K1 *= F\n\n #Vector3d f1,f2,f3;\n\n # Normalize B\n rB = 1.0/rB;\n B *= rB;\n\n\t# Next, we want to calculate the forces. In order\n\t# to do that, we first need to figure out whether the\n\t# sin or cos form will be more stable. For this,\n\t# just look at the value of phi\n #print \"sin_phi - %.3f - %.3f\" % (sin_phi, abs(sin_phi))\n if abs(sin_phi) > 0.1 :\n\t\t# use the sin version to avoid 1/cos terms\n\t\t# Normalize A\n rA = 1.0/rA;\n A *= rA;\n dcosdA = (cos_phi*A-B)*rA;\n dcosdB = (cos_phi*B-A)*rB;\n\n #print \"sin\"\n\n K1 = K1/sin_phi;\n\n f1, f2, f3 = chimera.Vector(), chimera.Vector(), chimera.Vector()\n\n f1.x = K1*(r23.y*dcosdA.z - r23.z*dcosdA.y)\n f1.y = K1*(r23.z*dcosdA.x - r23.x*dcosdA.z)\n f1.z = K1*(r23.x*dcosdA.y - r23.y*dcosdA.x)\n\n f3.x = K1*(r23.z*dcosdB.y - r23.y*dcosdB.z)\n f3.y = K1*(r23.x*dcosdB.z - r23.z*dcosdB.x)\n f3.z = K1*(r23.y*dcosdB.x - r23.x*dcosdB.y)\n\n f2.x = K1*(r12.z*dcosdA.y - r12.y*dcosdA.z + r34.y*dcosdB.z - r34.z*dcosdB.y)\n f2.y = K1*(r12.x*dcosdA.z - r12.z*dcosdA.x + r34.z*dcosdB.x - r34.x*dcosdB.z)\n f2.z = K1*(r12.y*dcosdA.x - r12.x*dcosdA.y + r34.x*dcosdB.y - r34.y*dcosdB.x)\n\n else :\n\n # This angle is closer to 0 or 180 than it is to\n # 90, so use the cos version to avoid 1/sin terms\n\n #print \"cos\"\n\n # Normalize C\n rC = 1.0/rC\n C *= rC\n dsindC = (sin_phi*C-B)*rC\n dsindB = (sin_phi*B-C)*rB\n\n K1 = -K1/cos_phi\n\n f1, f2, f3 = chimera.Vector(), chimera.Vector(), chimera.Vector()\n\n f1.x = K1*((r23.y*r23.y + r23.z*r23.z)*dsindC.x - r23.x*r23.y*dsindC.y - r23.x*r23.z*dsindC.z)\n f1.y = K1*((r23.z*r23.z + r23.x*r23.x)*dsindC.y - r23.y*r23.z*dsindC.z - r23.y*r23.x*dsindC.x)\n f1.z = K1*((r23.x*r23.x + r23.y*r23.y)*dsindC.z - r23.z*r23.x*dsindC.x - r23.z*r23.y*dsindC.y)\n\n #f3 = K1*dsindB.Cross(r23)\n f3 = chimera.cross ( dsindB, r23 ) * K1\n\n f2.x = K1*(-(r23.y*r12.y + r23.z*r12.z)*dsindC.x +(2.0*r23.x*r12.y - r12.x*r23.y)*dsindC.y\n \t+(2.0*r23.x*r12.z - r12.x*r23.z)*dsindC.z +dsindB.z*r34.y - dsindB.y*r34.z)\n f2.y = K1*(-(r23.z*r12.z + r23.x*r12.x)*dsindC.y+(2.0*r23.y*r12.z - r12.y*r23.z)*dsindC.z\n \t+(2.0*r23.y*r12.x - r12.y*r23.x)*dsindC.x+dsindB.x*r34.z - dsindB.z*r34.x)\n f2.z = K1*(-(r23.x*r12.x + r23.y*r12.y)*dsindC.z +(2.0*r23.z*r12.x - r12.z*r23.x)*dsindC.x\n \t+(2.0*r23.z*r12.y - r12.z*r23.y)*dsindC.y+dsindB.y*r34.x - dsindB.x*r34.y)\n\n\n # store the forces\n #at1.G += f1\n #at2.G += f2 - f1\n #at3.G += f3 - f2\n #at4.G += -f3\n\n refGrad[at1.i] += f1\n refGrad[at2.i] += f2 - f1\n refGrad[at3.i] += f3 - f2\n refGrad[at4.i] += -f3\n\n\ndef PlaneG ( planeAtoms, F=0.1 ) :\n\n global refAtoms, refPos, refGrad\n\n points = numpy.zeros ( (len(planeAtoms), 3) )\n i = 0\n for at, esd in planeAtoms :\n #points[i] = at.P\n points[i] = refPos[at.i]\n i += 1\n\n if 0 :\n com = numpy.sum(points, axis=0) / len(points)\n comv = numpy.ones_like ( points ) * com\n points = points - comv\n i = numpy.matrix ( [[1,0,0], [0,1,0], [0,0,1]] )\n ii = i * numpy.sum ( numpy.multiply ( points, points ) )\n p_t = numpy.transpose(points)\n td = numpy.tensordot ( points, p_t, axes=[0,1] )\n I0 = ii - td\n try :\n U, S, V = numpy.linalg.svd( I0 )\n except :\n print \"- error computing SVD - prob. singular matrix\"\n\n #if F > 0 :\n #print \"ctr:\", C\n # print \"U:\", U\n # print \"S:\", S\n #print \"V:\", V\n\n ni = numpy.argmin ( numpy.abs(S) )\n #N = numpy.array ( [U[0,ni], U[1,ni], U[2,ni]] )\n #N = numpy.array ( [V[ni,0], V[ni,1], V[ni,2]] )\n N = numpy.array ( [U[ni,0], U[ni,1], U[ni,2]] )\n\n # barycenter of the points\n # compute centered coordinates\n com = points.sum(axis=0) / points.shape[0]\n\n # run SVD\n u, s, vh = numpy.linalg.svd(points - com)\n\n #print com\n #print u\n #print s\n #print vh\n\n # unitary normal vector\n N = vh[2, :]\n\n #if F > 0 : print \"N:\", N\n\n i = 0\n sumL = 0.0\n for at, esd in planeAtoms :\n v = com - points[i]\n dot = numpy.dot ( v, N )\n #if F > 0 : print \"%s -- %d : %.3f\" % (at.name, i, dot)\n sumL += abs ( dot )\n toPlane = N * (dot * F)\n #at.G += chimera.Vector ( *toPlane )\n refGrad[at.i] += toPlane\n #at.toPlaneV = chimera.Vector ( *(N*dot) ) * 10.0\n #at.com = chimera.Point ( *com )\n i += 1\n\n #if hasattr ( at, 'toPlaneV' ) : del at.toPlaneV\n #if hasattr ( at, 'com' ) : del at.com\n\n #if F > 0 : print \" - sumL: %.3f\" % sumL\n\n if F <= 0 :\n return sumL\n\n\n\n\n\n\n\n\ndef AtomsNearRegs ( regs, maxD=4.0 ) :\n\n mols = []\n #print \"atoms near in:\"\n for m in chimera.openModels.list() :\n if type(m) == chimera.Molecule and m.display == True :\n mols.append ( m )\n\n import grid\n reload(grid)\n\n agrid = grid.Grid ()\n agrid.FromMols ( mols, maxD )\n\n n_regs = []\n\n points = regs[0].points().astype ( numpy.float32 )\n for r in regs[1:] :\n npoints = r.points().astype ( numpy.float32 )\n points = numpy.concatenate ( [points, npoints], axis=0 )\n\n segMap = regs[0].segmentation.seg_map\n _contour.affine_transform_vertices ( points, segMap.data.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix( segMap.openState.xform ) )\n\n atMap = {}\n #resMap = {}\n for p in points :\n cp = chimera.Point(p[0],p[1],p[2])\n nearAts = agrid.AtsNearPt ( cp )\n for at in nearAts :\n atMap[at] = 1\n #resMap[at.residue] = 1\n\n return atMap.keys()\n\n\n\n\ndef AddMol ( molName, selAt, inMap, regs, toMol=None, toChainId=None ) :\n\n if molName.lower() == \"nag\" :\n print \" - adding nag\"\n AddNAG ( selAt, inMap, selReg )\n\n elif molName.lower() == \"bma\" :\n print \" - adding bma\"\n AddBMA ( selAt, inMap, selReg )\n\n elif molName.lower() == \"man\" :\n print \" - adding man\"\n AddMAN ( selAt, inMap, selReg )\n\n\n else :\n print \" - adding %s\" % molName.lower()\n AddMol2 ( molName.lower(), inMap, regs, toMol, toChainId )\n\n\n\n\ndef AddMol2 ( molName, inMap, regs, toMol, toChainId ) :\n\n fname = \"/Users/greg/Dropbox/_mol/Segger/_param/%s.pdb\" % molName\n\n from os import path\n if not path.isfile(fname) :\n print \" - did not find %s\" % fname\n return\n\n nmol = chimera.PDBio().readPDBfile ( fname )[0]\n print \" - read %s - %d atoms - %d res\" % ( nmol.name, len(nmol.atoms), len(nmol.residues) )\n addRes = nmol.residues[0]\n\n\n from axes import prAxes\n regsPoints = RegsPtsInMol ( regs, toMol )\n regsC, regsU, regsS, regsV = prAxes ( regsPoints )\n #print regsC, regsU\n\n molPoints = _multiscale.get_atom_coordinates ( addRes.atoms, transformed = False )\n molC, molU, molS, molV = prAxes ( molPoints )\n #print molC, molU\n\n import qscores\n reload ( qscores )\n\n xfs = uniform_rota_xfs ( 64 )\n score_xfs = []\n for rxf in xfs :\n\n xf = chimera.Xform.translation ( molC * -1.0 )\n xf.premultiply ( rxf )\n xf.premultiply ( chimera.Xform.translation ( regsC ) )\n\n #nres = AddResToMol ( addRes, toMol, toChainId, xf, withoutAtoms=[] )\n\n cc0, cc1, xfm = FitAtomsXf ( addRes.atoms, xf, inMap )\n xf.premultiply ( xfm )\n\n takeIt = True\n rv, ang = xf.getRotation(); tr = xf.getTranslation()\n for cc1_, cc0_, xf0 in score_xfs :\n # todo: use rmsd instead!\n rv0, ang0 = xf0.getRotation(); tr0 = xf0.getTranslation()\n rd, rang, td = numpy.arccos(rv*rv0)*180.0/numpy.pi, abs(ang0-ang), (tr0-tr).length\n if rd < 5.0 and rang < 5.0 and td < 3.0 :\n takeIt = False\n\n if takeIt :\n molg = qscores.MyMolMapX2 ( addRes.atoms, 2.0, inMap.data.step[0], xf )\n fpoints, fpoint_weights = qscores.fit_points_g ( molg, 1e-2 )\n map_values = inMap.interpolated_values ( fpoints, toMol.openState.xform )\n #print map_values\n olap, CC, CCm = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n print \" - taking %.3f -> %.3f\" % (cc1, CC)\n\n score_xfs.append ( [CC, cc0, xf] )\n\n\n score_xfs.sort ( reverse=True, key=lambda x: x[0] )\n print \"%d unique\" % len(score_xfs)\n\n #topCC, topCC0, xf = score_xfs[0]\n\n if 1 :\n for cc1, cc0, xf in score_xfs :\n nres = AddResToMol ( addRes, toMol, toChainId, xf, withoutAtoms=[] )\n nres.scoreCC = cc1\n print \" - added %s - %d.%s -- %.4f\" % (nres.type, nres.id.position, nres.id.chainId, cc1)\n\n else :\n q_xfs = []\n import qscores\n\n ats = [at for at in toMol.atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n print \" - search tree: %d/%d ats\" % ( len(ats), len(toMol.atoms) )\n pts = points.tolist()\n from CGLutil.AdaptiveTree import AdaptiveTree\n allPtTree = AdaptiveTree ( pts, pts, 1.0)\n #allAtTree = None\n minD, maxD = qscores.MinMaxD ( inMap )\n print \" - minD %.3f, maxD %.3f\" % (minD, maxD)\n\n\n for cc1, cc0, xf in score_xfs :\n #nres = AddResToMol ( addRes, toMol, toChainId, xf, withoutAtoms=[] )\n #nres.scoreCC = cc1\n #print \" - added %s - %d.%s -- %.4f\" % (nres.type, nres.id.position, nres.id.chainId, cc1)\n qavg, N = 0.0, 0.0\n for at in addRes.atoms :\n xfp = xf.apply ( at.coord() )\n #xfp = toMol.openState.xform.apply ( xfp )\n Q = qscores.QscorePt2 ( xfp, toMol.openState.xform, inMap, 0.6, allPtTree=allPtTree, log=0, numPts=8, toRAD=2.0, dRAD=0.1, minD=minD, maxD=maxD, fitg=0 )\n qavg += Q; N += 1.0\n Q = qavg / N\n\n q_xfs.append ( [Q, cc1, xf] )\n print \" - cc %.3f -> Q %.3f\" % (cc1, Q)\n\n q_xfs.sort ( reverse=True, key=lambda x: x[0] )\n\n for Q, cc1, xf in q_xfs :\n nres = AddResToMol ( addRes, toMol, toChainId, xf, withoutAtoms=[] )\n nres.scoreCC = cc1\n nres.scoreQ = Q\n print \" - added %s - %d.%s -- cc %.3f, Q %.3f\" % (nres.type, nres.id.position, nres.id.chainId, cc1, Q)\n\n\n\n\ndef SegFitRes ( res, inMap, regs, useAts=None ) :\n\n from axes import prAxes\n regsPoints = RegsPtsInMol ( regs, res.molecule )\n regsC, regsU, regsS, regsV = prAxes ( regsPoints )\n #print regsC, regsU\n\n ats = res.atoms if useAts == None else useAts\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n molC, molU, molS, molV = prAxes ( points )\n #print molC, molU\n\n xfs = uniform_rota_xfs ( 64 )\n score_xfs = []\n for rxf in xfs :\n\n xf = chimera.Xform.translation ( molC * -1.0 )\n xf.premultiply ( rxf )\n xf.premultiply ( chimera.Xform.translation ( regsC ) )\n\n #nres = AddResToMol ( addRes, toMol, toChainId, xf, withoutAtoms=[] )\n\n cc0, cc1, xfm = FitAtomsXf ( ats, xf, inMap )\n xf.premultiply ( xfm )\n\n rv, ang = xf.getRotation(); tr = xf.getTranslation()\n\n takeIt = True\n\n for cc1, cc0, xf0 in score_xfs :\n rv0, ang0 = xf0.getRotation(); tr0 = xf0.getTranslation()\n rd, rang, td = numpy.arccos(rv*rv0)*180.0/numpy.pi, abs(ang0-ang), (tr0-tr).length\n if rd < 5.0 and rang < 5.0 and td < 1.0 :\n takeIt = False\n\n if takeIt :\n score_xfs.append ( [cc1, cc0, xf] )\n\n\n score_xfs.sort ( reverse=True, key=lambda x: x[0] )\n print \"%d unique\" % len(score_xfs)\n\n #topCC, topCC0, xf = score_xfs[0]\n\n for cc1, cc0, xf in score_xfs :\n nres = AddResToMol ( res, res.molecule, res.id.chainId, xf, withoutAtoms=[] )\n nres.scoreCC = cc1\n print \" - added %s - %d.%s -- %.4f\" % (res.type, nres.id.position, nres.id.chainId, cc1)\n\n\n\n\ndef TorFit0 ( res, inMap ) :\n\n tors = FindTors ( [res] )\n print \"%d tors\" % len(tors)\n\n for tor in tors[1:2] :\n\n bond, ats1, ats2 = tor\n p2, p1 = bond.atoms[1].coord(), bond.atoms[0].coord()\n v = p2 - p1\n\n\n for i in range (180) :\n v.normalize()\n\n xf1 = chimera.Xform.translation ( p1.toVector() * -1 )\n xf1.premultiply ( chimera.Xform.rotation ( v, 1.0 ) )\n xf1.premultiply ( chimera.Xform.translation ( p1.toVector() ) )\n\n for at in ats1 :\n at.setCoord ( xf1.apply ( at.coord() ) )\n\n xf2 = chimera.Xform.translation ( p1.toVector() * -1 )\n xf2.premultiply ( chimera.Xform.rotation ( v, -1.0 ) )\n xf2.premultiply ( chimera.Xform.translation ( p1.toVector() ) )\n\n for at in ats2 :\n at.setCoord ( xf2.apply ( at.coord() ) )\n\n print \".\",\n\n\n break\n\n\n\n\ndef TorFitGrads ( res, inMap, useAtoms=None ) :\n\n tors = FindTors ( [res] )\n print \"%d tors\" % len(tors)\n\n conAts = ConAts(res)\n\n import grid\n reload(grid)\n\n\n if 0 :\n ats = res.atoms\n #ats = [at for at in atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n #_contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix(xf0) )\n #_contour.affine_transform_vertices ( fpoints, inMap.openState.xform )\n xyz_to_ijk_tf = inMap.data.xyz_to_ijk_transform\n matrix = inMap.data.full_matrix()\n from VolumeData import interpolate_volume_gradient\n gradients, outside = interpolate_volume_gradient(points, xyz_to_ijk_tf, matrix, 'linear')\n #print gradients\n for i, at in enumerate(ats) :\n at.i = i\n #print at.name, gradients[i]\n\n\n if useAtoms == None :\n useAtoms = res.atoms\n print \"using all %d atoms\" % len(useAtoms)\n else :\n print \"using %d atoms\" % len(useAtoms)\n\n\n tf, rmat = inMap.data.xyz_to_ijk_transform, inMap.data.full_matrix()\n points = _multiscale.get_atom_coordinates ( useAtoms, transformed = False )\n last_avg = numpy.average ( VolumeData.interpolate_volume_data ( points, tf, rmat )[0] )\n print \"%.5f -> \" % last_avg,\n\n ijk_step_size_max = 0.5\n ijk_step_size_min = 0.01\n ijk_step_size = ijk_step_size_max\n\n for i in range ( 100 ) :\n\n agrid = grid.Grid ()\n agrid.FromAtoms ( res.atoms, 3.0 )\n\n for tor in tors :\n\n bond, ats1, ats2 = tor\n p2, p1 = bond.atoms[1].coord(), bond.atoms[0].coord()\n v = p2 - p1\n\n AtTorques2 (bond, ats1, agrid, conAts, inMap.data, ijk_step_size)\n AtTorques2 (bond, ats2, agrid, conAts, inMap.data, ijk_step_size)\n\n #break\n\n #break\n\n cc, xf = FitAtoms ( useAtoms, inMap )\n\n for at in res.atoms :\n at.setCoord ( xf.apply ( at.coord() ) )\n\n points = _multiscale.get_atom_coordinates ( useAtoms, transformed = False )\n avg1 = numpy.average ( VolumeData.interpolate_volume_data ( points, tf, rmat )[0] )\n print \"%.3f\" % avg1,\n\n if avg1 < last_avg :\n ijk_step_size = ijk_step_size / 2.0\n if ijk_step_size < ijk_step_size_min :\n print \" -> %.5f\" % avg1\n #print \" - reached min step size, stopping\"\n break\n\n last_avg = avg1\n\n\n\n\ndef TorFitGradsBonds ( selBonds, inMap, useAtoms=None ) :\n\n #tors = FindTors ( res )\n #print \"%d tors\" % len(tors)\n\n ress = []\n\n res = selBonds[0].atoms[0].residue\n mol = res.molecule\n SetBBAts ( mol )\n\n rmap = {}\n #for r in mol.residues :\n # rmap[r.id.chainId + \"%d\"%r.id.position] = r\n for b in selBonds :\n rmap[b.atoms[0].residue] = 1\n rmap[b.atoms[1].residue] = 1\n\n if res.type in protein3to1 or res.type in nucleic1to3 :\n for r in res.molecule.residues :\n if r.id.chainId == res.id.chainId :\n ress.append ( r )\n else :\n ress = [res]\n\n tors = FindTors ( ress, selBonds )\n\n selTors = []\n for tor in tors :\n bond, ats1, ats2 = tor\n if bond in selBonds :\n selTors.append ( tor )\n\n print \" - %d total tors, using %d\" % ( len(tors), len(selTors) )\n\n if len(selTors) == 0 :\n return None\n\n\n scoreAtoms = []\n for r in rmap.keys() :\n #fitAtoms.extend ( r.atoms )\n scoreAtoms.extend ( r.bbAtoms )\n\n allAtoms = []\n for r in rmap.keys() :\n allAtoms.extend ( r.atoms )\n\n\n print \" - %d atoms in %d res, score %d atoms - in %s\" % (len(allAtoms), len(rmap.keys()), len(scoreAtoms), inMap.name)\n\n\n\n import grid\n reload(grid)\n\n\n if 0 :\n ats = res.atoms\n #ats = [at for at in atoms if not at.element.name == \"H\"]\n points = _multiscale.get_atom_coordinates ( ats, transformed = False )\n #_contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix(xf0) )\n #_contour.affine_transform_vertices ( fpoints, inMap.openState.xform )\n xyz_to_ijk_tf = inMap.data.xyz_to_ijk_transform\n matrix = inMap.data.full_matrix()\n from VolumeData import interpolate_volume_gradient\n gradients, outside = interpolate_volume_gradient(points, xyz_to_ijk_tf, matrix, 'linear')\n #print gradients\n for i, at in enumerate(ats) :\n at.i = i\n #print at.name, gradients[i]\n\n\n if useAtoms == None :\n useAtoms = res.atoms\n print \"using all %d atoms\" % len(useAtoms)\n else :\n print \"using %d atoms\" % len(useAtoms)\n\n useAtoms = scoreAtoms\n\n conAts = ConAts2(useAtoms)\n\n tf, rmat = inMap.data.xyz_to_ijk_transform, inMap.data.full_matrix()\n points = _multiscale.get_atom_coordinates ( useAtoms, transformed = False )\n last_avg = numpy.average ( VolumeData.interpolate_volume_data ( points, tf, rmat )[0] )\n print \"%.5f -> \" % last_avg,\n\n ijk_step_size_max = 0.5\n ijk_step_size_min = 0.01\n ijk_step_size = ijk_step_size_max\n\n for i in range ( 100 ) :\n\n agrid = grid.Grid ()\n agrid.FromAtoms ( useAtoms, 3.0 )\n\n for tor in selTors :\n\n bond, ats1, ats2 = tor\n p2, p1 = bond.atoms[1].coord(), bond.atoms[0].coord()\n v = p2 - p1\n\n AtTorques2 (bond, ats1, agrid, conAts, inMap.data, ijk_step_size)\n #AtTorques2 (bond, ats2, agrid, conAts, inMap.data, ijk_step_size)\n\n #break\n\n #break\n\n if 0 :\n cc, xf = FitAtoms ( useAtoms, inMap )\n for at in useAtoms :\n at.setCoord ( xf.apply ( at.coord() ) )\n\n #cc = FitScore ( useAtoms, inMap )\n\n points = _multiscale.get_atom_coordinates ( useAtoms, transformed = False )\n avg1 = numpy.average ( VolumeData.interpolate_volume_data ( points, tf, rmat )[0] )\n print \"%.3f\" % avg1,\n\n if avg1 < last_avg :\n ijk_step_size = ijk_step_size / 2.0\n if ijk_step_size < ijk_step_size_min :\n print \" -> %.5f\" % avg1\n #print \" - reached min step size, stopping\"\n break\n\n last_avg = avg1\n\n\n\n\n\ndef AtTorques2 (bond, atoms, agrid, conAts, mdata, step_size=0.5 ) :\n\n at1, at2 = bond.atoms\n center = at1.coord().data()\n rotVec = at2.coord() - at1.coord()\n rotVec.normalize()\n rotVecAr = numpy.array ( rotVec.data() )\n\n m1M = mdata.xyz_to_ijk_transform\n rmat = mdata.full_matrix()\n\n apoints = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n point_weights = None\n #point_weights = numpy.ones ( len(apoints), numpy.float32 )\n\n gradients = VolumeData.interpolate_volume_gradient(apoints, m1M, rmat)[0]\n #maxG = numpy.max ( gradients, axis=0 )\n maxG = numpy.sqrt ( max ( numpy.sum ( gradients*gradients, axis=1 ) ) )\n #print \"mg_%.3f\" % ( maxG )\n\n #print gradients\n\n if 1 :\n grads2 = numpy.zeros ( [len(atoms), 3] )\n for i, at in enumerate ( atoms ) :\n if not at in conAts :\n print \" at %s\" % at.name\n continue\n nearAts = agrid.AtsNearPtLocal ( at.coord() )\n for atn, v in nearAts :\n if not atn in conAts[at] :\n print \"%s -- %s - %.3f\" % (at.name, atn.name, v.length)\n #D = v.length\n v.normalize()\n v = v * maxG * 3.0\n grads2[i] -= v.data()\n\n #print grads2\n\n #maxG2 = numpy.sqrt ( max ( numpy.sum ( grads2*grads2, axis=1 ) ) )\n #print \"max grad2:\", maxG2\n\n gradients += grads2\n\n torque_axis = _distances.torque(apoints, point_weights, gradients, center )\n #print \"torque axis 0:\", torque_axis\n\n dt = numpy.dot ( torque_axis, rotVec.data() )\n torque_axis = rotVecAr if dt > 0 else rotVecAr * -1.0\n\n na = Matrix.norm(torque_axis)\n if na == 0 :\n #torque_axis = (0,0,1)\n pass\n else :\n torque_axis /= na\n angle = angle_step(torque_axis, apoints, center, m1M, step_size)\n #print \"torque axis:\", torque_axis, \"angle:\", angle\n move_tf = Matrix.rotation_transform ( torque_axis, angle, center )\n xf = Matrix.chimera_xform ( move_tf )\n\n for at in atoms :\n at.setCoord ( xf.apply (at.coord()) )\n\n\n\n\ndef AtTorques1 (bond, atoms, mdata, step_size=0.5 ) :\n\n\n at1, at2 = bond.atoms\n center = at1.coord().data()\n rotVec = at2.coord() - at1.coord()\n rotVec.normalize()\n rotVecAr = numpy.array ( rotVec.data() )\n\n\n m1M = mdata.xyz_to_ijk_transform\n rmat = mdata.full_matrix()\n\n apoints = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n point_weights = None\n #point_weights = numpy.ones ( len(apoints), numpy.float32 )\n\n gradients = VolumeData.interpolate_volume_gradient(apoints, m1M, rmat)[0]\n torque_axis = _distances.torque(apoints, point_weights, gradients, center )\n #print \"torque axis 0:\", torque_axis\n\n dt = numpy.dot ( torque_axis, rotVec.data() )\n torque_axis = rotVecAr if dt > 0 else rotVecAr * -1.0\n\n na = Matrix.norm(torque_axis)\n if na == 0 :\n #torque_axis = (0,0,1)\n pass\n else :\n torque_axis /= na\n angle = angle_step(torque_axis, apoints, center, m1M, step_size)\n #print \"torque axis:\", torque_axis, \"angle:\", angle\n move_tf = Matrix.rotation_transform ( torque_axis, angle, center )\n xf = Matrix.chimera_xform ( move_tf )\n\n for at in atoms :\n at.setCoord ( xf.apply (at.coord()) )\n\n\n\n\n\ndef AtTorques (bond, atoms, mdata ) :\n\n apoints = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n\n at1, at2 = bond.atoms\n center = at1.coord().data()\n rotVec = at2.coord() - at1.coord();\n rotVec.normalize()\n\n point_weights = None\n #point_weights = numpy.ones ( len(apoints), numpy.float32 )\n ijk_step_size_max = 0.5\n ijk_step_size_min = 0.01\n ijk_step_size = ijk_step_size_max\n\n #xyz_to_ijk_tf = inMap.data.xyz_to_ijk_transform\n #darray = inMap.data.full_matrix()\n #map_values, outside = VolumeData.interpolate_volume_data(fpoints, xyz_to_ijk_tf, darray)\n #olap0, cc0, other = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n\n m1M = mdata.xyz_to_ijk_transform\n rmat = mdata.full_matrix()\n\n values, outside = VolumeData.interpolate_volume_data ( apoints, m1M, rmat )\n avg0 = numpy.average ( values )\n last_avg = avg0\n print \" - 0: %.3f\" % last_avg\n\n xf = chimera.Xform.identity()\n\n rotVecAr = numpy.array ( rotVec.data() )\n totAngle = 0\n\n for i in range ( 100 ) :\n\n gradients = VolumeData.interpolate_volume_gradient(apoints, m1M, rmat)[0]\n torque_axis = _distances.torque(apoints, point_weights, gradients, center )\n #print \"torque axis 0:\", torque_axis\n\n dt = numpy.dot ( torque_axis, rotVec.data() )\n torque_axis = rotVecAr if dt > 0 else rotVecAr * -1.0\n\n na = Matrix.norm(torque_axis)\n if na == 0 :\n torque_axis = (0,0,1)\n angle = 0\n #print \" - torque axis 0, stopping\"\n break\n else :\n torque_axis /= na\n angle = angle_step(torque_axis, apoints, center, m1M, ijk_step_size)\n #print \"torque axis:\", torque_axis, \"angle:\", angle\n\n move_tf = Matrix.rotation_transform ( torque_axis, angle, center )\n _contour.affine_transform_vertices ( apoints, move_tf )\n\n values, outside = VolumeData.interpolate_volume_data ( apoints, m1M, rmat )\n avg1 = numpy.average ( values )\n\n print \" %d - score: %.3f - step: %.3f, angle %.1f\" % (i+1, avg1, ijk_step_size, angle)\n totAngle += angle\n\n xf.premultiply ( Matrix.chimera_xform ( move_tf ) )\n #m1M = Matrix.multiply_matrices( move_tf, m1M )\n\n if avg1 < last_avg :\n ijk_step_size = ijk_step_size / 2.0\n if ijk_step_size < ijk_step_size_min :\n print \" - reached min step size, stopping\"\n break\n\n last_avg = avg1\n\n #print \" - dih %s(%d) - %s(%d), %d its, score: %.3f -> %.3f, step: %.3f, angle %.1f\" % (at1.name, at1.residue.id.position, at2.name, at2.residue.id.position, i+1, avg0, last_avg, ijk_step_size, totAngle)\n\n #print \" - moving atoms in res %d\" % atoms[0].residue.id.position\n if 1 :\n for at in atoms :\n at.setCoord ( xf.apply (at.coord()) )\n\n return last_avg\n\n\n# -----------------------------------------------------------------------------\n# Return angle such that rotating point about given axis and center causes the\n# largest motion in ijk space to equal ijk_step_size.\n#\ndef angle_step(axis, points, center, xyz_to_ijk_transform, ijk_step_size):\n\n import Matrix as m\n tf = m.multiply_matrices(m.zero_translation(xyz_to_ijk_transform),\n m.cross_product_transform(axis),\n m.translation_matrix([-x for x in center]))\n\n import _distances as dist\n av = dist.maximum_norm(points, tf)\n\n if av > 0:\n from math import pi\n angle = (ijk_step_size / av) * 180.0/pi\n else:\n angle = 0\n return angle\n\n\n\ndef TorFitR ( res, inMap, stepSize ) :\n\n tors = FindTors ( [res] )\n print \"%d tors\" % len(tors)\n\n from random import random\n\n\n cc0, xf = FitAtoms ( res.atoms, inMap )\n for at in res.atoms :\n at.setCoord ( xf.apply ( at.coord() ) )\n at.coord0 = at.coord()\n print \"%.4f\" % cc0,\n\n\n for i in range ( 100 ) :\n\n for tor in tors :\n\n bond, ats1, ats2 = tor\n p2, p1 = bond.atoms[1].coord(), bond.atoms[0].coord()\n v = p2 - p1\n\n ang = (random()-0.5)*stepSize\n\n xf1 = chimera.Xform.translation ( p1.toVector() * -1 )\n xf1.premultiply ( chimera.Xform.rotation ( v, ang ) )\n xf1.premultiply ( chimera.Xform.translation ( p1.toVector() ) )\n\n for at in ats1 :\n at.setCoord ( xf1.apply ( at.coord() ) )\n\n\n cc, xf = FitAtoms ( res.atoms, inMap )\n\n if cc < cc0 :\n #print \"x\"\n for at in res.atoms :\n at.setCoord( at.coord0 )\n else :\n cc0 = cc\n print \"%d|%.4f\" % (i,cc),\n for at in res.atoms :\n at.coord0 = xf.apply ( at.coord() )\n at.setCoord ( at.coord0 )\n\n\n\n\n\n\ndef TorFitRSel ( selBonds, inMap, stepSize, doRigidFit=False ) :\n\n ress = []\n\n res = selBonds[0].atoms[0].residue\n mol = res.molecule\n SetBBAts ( mol )\n\n rmap = {}\n #for r in mol.residues :\n # rmap[r.id.chainId + \"%d\"%r.id.position] = r\n for b in selBonds :\n rmap[b.atoms[0].residue] = 1\n rmap[b.atoms[1].residue] = 1\n\n if res.type in protein3to1 or res.type in nucleic1to3 :\n for r in res.molecule.residues :\n if r.id.chainId == res.id.chainId :\n ress.append ( r )\n else :\n ress = [res]\n\n tors = FindTors ( ress, selBonds )\n print \" - %d total tors\" % len(tors)\n\n selTors = []\n for tor in tors :\n bond, ats1, ats2 = tor\n if bond in selBonds :\n selTors.append ( tor )\n\n if len(selTors) == 0 :\n print \"sel tors not found\"\n return None\n\n\n scoreAtoms = []\n for r in rmap.keys() :\n #fitAtoms.extend ( r.atoms )\n scoreAtoms.extend ( r.bbAtoms )\n\n allAtoms = []\n for r in rmap.keys() :\n allAtoms.extend ( r.atoms )\n\n print \" - %d atoms in %d res, score %d atoms\" % (len(allAtoms), len(rmap.keys()), len(scoreAtoms))\n\n from random import random\n\n cc0 = None\n if doRigidFit:\n cc0, xf = FitAtoms ( allAtoms, inMap )\n for at in allAtoms :\n at.setCoord ( xf.apply ( at.coord() ) )\n at.coord0 = at.coord()\n else :\n cc0 = FitScore ( scoreAtoms, inMap )\n for at in allAtoms :\n at.coord0 = at.coord()\n\n\n print \"Fitting %d tors - in map: %s, cc: %.4f\" % (len(selTors), inMap.name, cc0)\n\n for i in range ( 100 ) :\n\n for tor in selTors :\n\n bond, ats1, ats2 = tor\n p2, p1 = bond.atoms[1].coord(), bond.atoms[0].coord()\n v = p2 - p1\n\n ang = (random()-0.5)*stepSize\n\n xf1 = chimera.Xform.translation ( p1.toVector() * -1 )\n xf1.premultiply ( chimera.Xform.rotation ( v, ang ) )\n xf1.premultiply ( chimera.Xform.translation ( p1.toVector() ) )\n\n for at in ats1 :\n at.setCoord ( xf1.apply ( at.coord() ) )\n\n\n if doRigidFit :\n cc, xf = FitAtoms ( allAtoms, inMap )\n if cc < cc0 :\n print \".\",\n for at in allAtoms :\n at.setCoord( at.coord0 )\n else :\n #print \"%d|%.4f\" % (i,cc),\n print \"%.4f\" % (cc),\n cc0 = cc\n for at in allAtoms :\n at.coord0 = xf.apply ( at.coord() )\n at.setCoord ( at.coord0 )\n\n\n else :\n cc = FitScore ( scoreAtoms, inMap )\n if cc < cc0 :\n print \".\",\n for at in allAtoms :\n at.setCoord( at.coord0 )\n else :\n #print \"%d|%.4f\" % (i,cc),\n print \"%.4f\" % (cc),\n cc0 = cc\n for at in allAtoms :\n at.coord0 = at.coord()\n\n\n\n print \"\"\n\n\n\n\ndef FindTors ( ress, selBonds=None ) :\n\n bondedAtoms = {}\n amap = {}\n for res in ress :\n for at in res.atoms :\n amap[at] = 1\n bondedAtoms[at] = []\n\n mol = ress[0].molecule\n for b in mol.bonds :\n at1, at2 = b.atoms\n if at1 in amap or at2 in amap :\n bondedAtoms[at1].append ( at2 )\n bondedAtoms[at2].append ( at1 )\n\n bonds = selBonds\n if selBonds == None :\n # use all bonds - can be slow for large proteins/rna\n print \" - using all bonds in res\"\n bonds = []\n for b in mol.bonds :\n at1, at2 = b.atoms\n if at1 in amap or at2 in amap :\n bonds.append (b)\n\n print \" - %d/%d atoms, %d/%d bonds\" % ( len(res.atoms), len(mol.atoms), len(bonds), len(mol.bonds) )\n\n tors = []\n for b in bonds :\n\n at1, at2 = b.atoms\n\n cycle, ats1 = BondGo ( at1, at2, bondedAtoms )\n if cycle :\n #print \"cycle\"\n continue\n\n ats2 = {}\n cycle, ats2 = BondGo ( at2, at1, bondedAtoms )\n if cycle :\n #print \"cycle\"\n continue\n\n if len(ats1) == 0 or len(ats2) == 0 :\n continue\n\n a1 = ats1 if len(ats1) < len(ats2) else ats2\n a2 = ats1 if a1 == ats2 else ats2\n tors.append ( [b, a1, a2] )\n\n if 0:\n print \"bond %s-%s \" % (at1.name, at2.name),\n for at in ats :\n print at.name,\n print \"\"\n\n #break\n\n return tors\n\n\n\n\ndef FindTorsDir ( ress, selBonds=None ) :\n\n # same as above but preserves direction\n\n bondedAtoms = {}\n amap = {}\n for res in ress :\n for at in res.atoms :\n amap[at] = 1\n bondedAtoms[at] = []\n\n for b in ress[0].molecule.bonds :\n at1, at2 = b.atoms\n if at1 in amap or at2 in amap :\n bondedAtoms[at1].append ( at2 )\n bondedAtoms[at2].append ( at1 )\n\n bonds = selBonds\n if selBonds == None :\n # use all bonds - can be slow for large proteins/rna\n bonds = []\n for b in ress[0].molecule.bonds :\n if at1 in amap or at2 in amap :\n bonds.append (b)\n\n print \"%d atoms, %d bonds\" % ( len(res.atoms), len(bonds) )\n\n tors = []\n for b in bonds :\n\n at1, at2 = b.atoms\n\n cycle, ats1 = BondGo ( at1, at2, bondedAtoms )\n if cycle :\n #print \"cycle\"\n continue\n\n ats2 = {}\n cycle, ats2 = BondGo ( at2, at1, bondedAtoms )\n if cycle :\n #print \"cycle\"\n continue\n\n if len(ats1) == 0 or len(ats2) == 0 :\n continue\n\n minDir = 1.0 if len(ats1) < len(ats2) else -1.0\n tors.append ( [b, ats1, ats2, minDir] )\n\n if 0:\n print \"bond %s-%s \" % (at1.name, at2.name),\n for at in ats :\n print at.name,\n print \"\"\n\n #break\n\n return tors\n\n\n\ndef BondGo ( at0, at1, bondedAtoms ) :\n\n visAts = { at0:1, at1:1 }\n\n first = True\n cycle = False\n Q = [at1]\n\n while len(Q) > 0 :\n\n at = Q.pop(0)\n #print \"%s \" % at.name\n visAts[at] = 1\n\n for at2 in bondedAtoms[at] :\n #print \" -> %s \" % (at2.name),\n\n if not first and at2 == at0 :\n #print \"cycle\"\n cycle = True\n break\n\n if not at2 in visAts :\n Q.append ( at2 )\n #print \" > \"\n\n first = False\n if cycle :\n break\n\n del visAts[at0]\n del visAts[at1]\n return cycle, visAts.keys()\n\n\n# uses atoms in residue only\ndef ConAts ( res ) :\n\n bondedAtoms = {}\n amap = {}\n for at in res.atoms :\n amap[at] = 1\n bondedAtoms[at] = []\n\n bonds = []\n for b in res.molecule.bonds :\n at1, at2 = b.atoms\n if at1 in amap or at2 in amap :\n bonds.append (b)\n bondedAtoms[at1].append ( at2 )\n bondedAtoms[at2].append ( at1 )\n\n print \"ConAts - %d atoms, %d bonds\" % ( len(res.atoms), len(bonds) )\n\n conAts = {}\n for at in res.atoms :\n\n conAts[at] = ConAtsGo ( at, bondedAtoms )\n\n #print at.name, \" : \",\n #for atc in conAts[at].keys() :\n # print atc.name,\n #print \"\"\n\n\n return conAts\n\n\n# uses list of atoms\ndef ConAts2 ( atoms ) :\n\n bondedAtoms = {}\n amap = {}\n for at in atoms :\n amap[at] = 1\n bondedAtoms[at] = []\n\n bonds = []\n for b in atoms[0].molecule.bonds :\n at1, at2 = b.atoms\n if at1 in amap or at2 in amap :\n bonds.append (b)\n if at1 in bondedAtoms :\n bondedAtoms[at1].append ( at2 )\n if at2 in bondedAtoms :\n bondedAtoms[at2].append ( at1 )\n\n print \"ConAts - %d atoms, %d bonds\" % ( len(atoms), len(bonds) )\n\n conAts = {}\n for at in atoms :\n\n conAts[at] = ConAtsGo ( at, bondedAtoms )\n\n #print at.name, \" : \",\n #for atc in conAts[at].keys() :\n # print atc.name,\n #print \"\"\n\n\n return conAts\n\n\ndef ConAtsGo ( at, bondedAtoms ) :\n\n visAts = { at:1 }\n depthAt = { at:1 }\n Q = [at]\n\n while len(Q) > 0 :\n\n at = Q.pop(0)\n #print \"%s \" % at.name\n visAts[at] = 1\n if depthAt[at] > 3 :\n continue\n\n if at in bondedAtoms :\n\n for at2 in bondedAtoms[at] :\n #print \" -> %s \" % (at2.name),\n\n if not at2 in visAts :\n Q.append ( at2 )\n depthAt[at2] = depthAt[at]+1\n #print \" > \"\n\n return visAts\n\n\n\n\ndef FitAtomsXf ( atoms, xf0, inMap, doTranslate = True, doRotate = True ) :\n\n #fpoints, fpoint_weights, darray = RessPtsInMap (ress, regs, segMap)\n\n #print inMap.name\n\n ats = [at for at in atoms if not at.element.name == \"H\"]\n fpoints = _multiscale.get_atom_coordinates ( ats, transformed = False )\n\n _contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix(xf0) )\n #_contour.affine_transform_vertices ( fpoints, inMap.openState.xform )\n fpoint_weights = numpy.ones ( len(fpoints), numpy.float32 )\n\n #fpoints = numpy.array ( fpoints, dtype=numpy.float32 )\n\n xyz_to_ijk_tf = inMap.data.xyz_to_ijk_transform\n darray = inMap.data.full_matrix()\n map_values, outside = VolumeData.interpolate_volume_data(fpoints, xyz_to_ijk_tf, darray)\n olap0, cc0, other = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #print cc0,\n\n move_tf, stats = FitMap.locate_maximum(fpoints, fpoint_weights,\n darray, xyz_to_ijk_tf,\n max_steps = 1000,\n ijk_step_size_min = 0.01,\n ijk_step_size_max = 0.5,\n optimize_translation = doTranslate,\n optimize_rotation = doRotate,\n metric = 'sum product', # 'correlation' or 'correlation about mean'\n request_stop_cb = None)\n\n xf = Matrix.chimera_xform ( move_tf )\n cc1 = float ( stats['correlation'] )\n #ApplyXf ( ress, xf )\n #print \" -> \", cc1\n\n return cc0, cc1, xf\n\n\n\n\ndef FitAtoms ( atoms, inMap, doTranslate = True, doRotate = True ) :\n\n #fpoints, fpoint_weights, darray = RessPtsInMap (ress, regs, segMap)\n\n #ats = [at for at in atoms if not at.element.name == \"H\"]\n fpoints = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n #_contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix(xf0) )\n fpoint_weights = numpy.ones ( len(fpoints), numpy.float32 )\n\n #fpoints = numpy.array ( fpoints, dtype=numpy.float32 )\n\n xyz_to_ijk_tf = inMap.data.xyz_to_ijk_transform\n darray = inMap.data.full_matrix()\n\n #map_values, outside = VolumeData.interpolate_volume_data(fpoints, xyz_to_ijk_tf, darray)\n #olap0, cc0, other = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n #print cc0,\n\n move_tf, stats = FitMap.locate_maximum(fpoints, fpoint_weights,\n darray, xyz_to_ijk_tf,\n max_steps = 1000,\n ijk_step_size_min = 0.01,\n ijk_step_size_max = 0.5,\n optimize_translation = doTranslate,\n optimize_rotation = doRotate,\n metric = 'sum product',\n request_stop_cb = None)\n\n xf = Matrix.chimera_xform ( move_tf )\n cc1 = float ( stats['correlation'] )\n #ApplyXf ( ress, xf )\n #print \" -> \", cc1\n\n return cc1, xf\n\n\n\ndef FitScore ( atoms, inMap ) :\n\n fpoints = _multiscale.get_atom_coordinates ( atoms, transformed = False )\n #_contour.affine_transform_vertices ( fpoints, Matrix.xform_matrix(xf0) )\n fpoint_weights = numpy.ones ( len(fpoints), numpy.float32 )\n\n xyz_to_ijk_tf = inMap.data.xyz_to_ijk_transform\n darray = inMap.data.full_matrix()\n\n map_values, outside = VolumeData.interpolate_volume_data(fpoints, xyz_to_ijk_tf, darray)\n olap0, cc0, other = FitMap.overlap_and_correlation ( fpoint_weights, map_values )\n return cc0\n\n\n\n\ndef RegsPtsInMol ( regs, toMol ) :\n\n regsPoints = regs[0].points().astype ( numpy.float32 )\n for r in regs[1:] :\n npoints = r.points().astype ( numpy.float32 )\n regsPoints = numpy.concatenate ( [regsPoints, npoints], axis=0 )\n\n segMap = regs[0].segmentation.seg_map\n _contour.affine_transform_vertices ( regsPoints, segMap.data.ijk_to_xyz_transform )\n _contour.affine_transform_vertices ( regsPoints, Matrix.xform_matrix( segMap.openState.xform ) )\n _contour.affine_transform_vertices ( regsPoints, Matrix.xform_matrix( toMol.openState.xform.inverse() ) )\n\n return regsPoints\n\n\n\ndef AlignResToRegs ( res, regs ) :\n\n regsPoints = RegsPtsInMol ( regs, res.molecule )\n\n from axes import prAxes\n\n regsC, regsU, regsS, regsV = prAxes ( regsPoints )\n #print regsC, regsU\n\n ats = [at for at in res.atoms if not at.element.name == \"H\"]\n molPoints = _multiscale.get_atom_coordinates ( ats, transformed = False )\n molC, molU, molS, molV = prAxes ( molPoints )\n #print molC, molU\n\n xf = chimera.Xform.translation ( molC * -1.0 )\n #xf.premultiply ( chimera.Xform.rotation(rax, ang) )\n xf.premultiply ( chimera.Xform.translation ( regsC ) )\n\n for at in res.atoms :\n at.setCoord ( xf.apply ( at.coord() ) )\n\n\n\n\ndef uniform_rota_xfs ( num ) :\n\n N = int ( numpy.floor ( numpy.sqrt ( num ) ) )\n M = int ( numpy.floor ( num / N ) )\n\n thetas, phis = [], []\n from math import acos, sin, cos, sqrt, pi\n for k in range ( 1, N+1 ) :\n h = -1.0 + ( 2.0*float(k-1)/float(N-1) )\n phis.append ( acos(h) )\n thetas.append ( 0 if k == 1 or k == N else\n (thetas[k-2] + 3.6/sqrt(N*(1.0-h**2.0))) % (2*pi) )\n\n xfs = []\n for theta, phi in zip(thetas, phis):\n for m in range ( M ) :\n rot = 2*pi*float(m)/float(M)\n #ralist.append((theta,phi,rot))\n v = chimera.Vector (sin(phi)*cos(theta), sin(phi)*sin(theta), cos(phi))\n xfR = chimera.Xform.rotation ( v, rot*180/pi )\n xfs.append ( xfR )\n\n return xfs\n\n\n\n\n\n\ndef AddNAG ( selAt, inMap, selReg ) :\n\n nmol = chimera.PDBio().readPDBfile ( \"/Users/greg/Dropbox/_mol/Segger/_param/nag.pdb\" )[0]\n print \" - read %s - %d atoms\" % ( nmol.name, len(nmol.atoms) )\n\n\n if selAt.residue.type == \"ASN\" :\n\n pCG = selAt.residue.atomsMap[\"CG\"][0].coord()\n pN = selAt.residue.atomsMap[\"ND2\"][0].coord()\n pO = selAt.residue.atomsMap[\"OD1\"][0].coord()\n pO1_ = nmol.residues[0].atomsMap[\"O1\"][0].coord()\n pC1_ = nmol.residues[0].atomsMap[\"C1\"][0].coord()\n xf = ConnectXf ( pCG, pN, pO, 1.450, 124.669, pO1_, pC1_ )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=[\"O1\"] )\n atN = selAt.residue.atomsMap[\"ND2\"][0]\n atC1 = nres.atomsMap[\"C1\"][0]\n nb = selAt.molecule.newBond ( atN, atC1 )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n OptDihedral ( pN, atC1.coord(), nres.atoms, inMap, selReg )\n\n elif selAt.residue.type == \"NAG\" :\n\n pC4 = selAt.residue.atomsMap[\"C4\"][0].coord()\n pO4 = selAt.residue.atomsMap[\"O4\"][0].coord()\n pC3 = selAt.residue.atomsMap[\"C3\"][0].coord()\n pO1_ = nmol.residues[0].atomsMap[\"O1\"][0].coord()\n pC1_ = nmol.residues[0].atomsMap[\"C1\"][0].coord()\n xf = ConnectXf ( pC4, pO4, pC3, 1.433, 118.567, pO1_, pC1_ )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=[\"O1\"] )\n atO4 = selAt.residue.atomsMap[\"O4\"][0]\n atC1 = nres.atomsMap[\"C1\"][0]\n nb = selAt.molecule.newBond ( atO4, atC1 )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n SetDihedral ( pC3, pC4, pO4, atC1.coord(), 64.190, nres.atoms )\n\n OptDihedral ( pO4, atC1.coord(), nres.atoms, inMap, selReg )\n\n\n\ndef AddBMA ( selAt, inMap, selReg ) :\n\n nmol = chimera.PDBio().readPDBfile ( \"/Users/greg/Dropbox/_mol/Segger/_param/bma.pdb\" )[0]\n print \" - read %s - %d atoms\" % ( nmol.name, len(nmol.atoms) )\n\n\n if selAt.residue.type == \"NAG\" :\n\n pC4 = selAt.residue.atomsMap[\"C4\"][0].coord()\n pO4 = selAt.residue.atomsMap[\"O4\"][0].coord()\n pC3 = selAt.residue.atomsMap[\"C3\"][0].coord()\n pO1_ = nmol.residues[0].atomsMap[\"O1\"][0].coord()\n pC1_ = nmol.residues[0].atomsMap[\"C1\"][0].coord()\n xf = ConnectXf ( pC4, pO4, pC3, 1.433, 109.147, pO1_, pC1_ )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=[\"O1\"] )\n atO4 = selAt.residue.atomsMap[\"O4\"][0]\n atC1 = nres.atomsMap[\"C1\"][0]\n nb = selAt.molecule.newBond ( atO4, atC1 )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n SetDihedral ( pC3, pC4, pO4, atC1.coord(), 137.239, nres.atoms )\n\n OptDihedral ( pO4, atC1.coord(), nres.atoms, inMap, selReg )\n\n\n\ndef AddMAN ( selAt, inMap, selReg ) :\n\n nmol = chimera.PDBio().readPDBfile ( \"/Users/greg/Dropbox/_mol/Segger/_param/man.pdb\" )[0]\n print \" - read %s - %d atoms\" % ( nmol.name, len(nmol.atoms) )\n\n\n if selAt.residue.type == \"BMA\" :\n\n if selAt.name == \"O6\" :\n\n pC6 = selAt.residue.atomsMap[\"C6\"][0].coord()\n pO6 = selAt.residue.atomsMap[\"O6\"][0].coord()\n pC5 = selAt.residue.atomsMap[\"C5\"][0].coord()\n pO1_ = nmol.residues[0].atomsMap[\"O1\"][0].coord()\n pC1_ = nmol.residues[0].atomsMap[\"C1\"][0].coord()\n xf = ConnectXf ( pC6, pO6, pC5, 1.425, 115.695, pO1_, pC1_ )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=[\"O1\"] )\n atO6 = selAt.residue.atomsMap[\"O6\"][0]\n atC1 = nres.atomsMap[\"C1\"][0]\n nb = selAt.molecule.newBond ( atO6, atC1 )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n SetDihedral ( pC5, pC6, pO6, atC1.coord(), 177.537, nres.atoms )\n\n OptDihedral ( pO6, atC1.coord(), nres.atoms, inMap, selReg )\n\n\n if selAt.name == \"O3\" :\n\n pC3 = selAt.residue.atomsMap[\"C3\"][0].coord()\n pO3 = selAt.residue.atomsMap[\"O3\"][0].coord()\n pC4 = selAt.residue.atomsMap[\"C4\"][0].coord()\n pO1_ = nmol.residues[0].atomsMap[\"O1\"][0].coord()\n pC1_ = nmol.residues[0].atomsMap[\"C1\"][0].coord()\n xf = ConnectXf ( pC3, pO3, pC4, 1.475, 110.731, pO1_, pC1_ )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=[\"O1\"] )\n atO3 = selAt.residue.atomsMap[\"O3\"][0]\n atC1 = nres.atomsMap[\"C1\"][0]\n nb = selAt.molecule.newBond ( atO3, atC1 )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n SetDihedral ( pC4, pC3, pO3, atC1.coord(), 144.288, nres.atoms )\n\n OptDihedral ( pO3, atC1.coord(), nres.atoms, inMap, selReg )\n\n\n\n\n\n\ndef AddRes ( resType, selAt, inMap, selReg ) :\n\n print \"Adding:\", resType\n for i in range ( len(resType) ) :\n\n RT = resType[i].upper()\n if RT in protein1to3 :\n rtype = protein1to3[RT]\n print \" %s -prot> %s\" % (RT, rtype)\n AddProtRes ( rtype, selAt, inMap, selReg )\n\n\n\ndef AddNuc ( resType, selAt, inMap, selReg ) :\n\n print \"Adding:\", resType\n for i in range ( len(resType) ) :\n\n RT = resType[i].upper()\n\n if RT in nucleic1to3 :\n rtype = nucleic1to3[RT]\n print \" %s -nucleic> %s\" % (RT, rtype)\n AddNucRes ( rtype, selAt, inMap, selReg )\n\n\ndef AddNucRes ( rtype, selAt, inMap, selReg ) :\n\n from mmcif import ParamPathPdb\n rpath = ParamPathPdb ( rtype )\n print \" - res from %s\" % rpath\n\n #nmol = chimera.PDBio().readPDBfile ( \"/Users/greg/Dropbox/_mol/Segger/_param/%s.pdb\" % rtype )[0]\n nmol = chimera.PDBio().readPDBfile ( rpath )[0]\n print \" - read %s - %d atoms\" % ( nmol.name, len(nmol.atoms) )\n\n #chimera.openModels.add ( [nmol] )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n rmap = {}\n for r in toMol.residues :\n if r.id.chainId == toChain :\n rmap[r.id.position] = r\n\n if selAt.name == \"P\" :\n\n rid = selAt.residue.id.position - 1\n if rid in rmap :\n umsg ( \"Residue at position %d already exists\" % rid )\n return\n\n pP = selAt.coord() #.residue.atomsMap[\"C\"][0].coord()\n pO5p = selAt.residue.atomsMap[\"O5'\"][0].coord()\n pC5p = selAt.residue.atomsMap[\"C5'\"][0].coord()\n pO3p_ = addRes.atomsMap[\"O3'\"][0].coord()\n pC3p_ = addRes.atomsMap[\"C3'\"][0].coord()\n\n xf = ConnectXfR ( pO5p, pP, pC5p, pO3p_, pC3p_ ) # pCA, pC, pO, pN_, pCA_\n\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=[], rid=rid )\n atP = selAt.residue.atomsMap[\"P\"][0]\n atO3p = nres.atomsMap[\"O3'\"][0]\n nb = selAt.molecule.newBond ( atP, atO3p )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n #OptDihedral ( pN, atC1.coord(), nres.atoms, inMap, selReg )\n\n if 0 and rid + 1 in rmap :\n toRes = rmap[rid + 1]\n print \"- connect to %s.%d\" % (toRes.type, toRes.id.position)\n\n atC = nres.atomsMap[\"C\"][0]\n atN = toRes.atomsMap[\"N\"][0]\n nb = selAt.molecule.newBond ( atC, atN )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n elif selAt.name == \"O3'\" :\n print \" - todo - add to O3'\"\n\n else :\n print \" - replace %s -> %s\" % ( selAt.residue.type, rtype )\n\n\n\n\ndef AddProtRes ( rtype, selAt, inMap, selReg ) :\n\n nmol = chimera.PDBio().readPDBfile ( \"/Users/greg/Dropbox/_mol/Segger/_param/%s.pdb\" % rtype.lower() )[0]\n print \" - read %s - %d atoms\" % ( nmol.name, len(nmol.atoms) )\n\n addRes = nmol.residues[0]\n toMol = selAt.molecule\n toChain = selAt.residue.id.chainId\n rmap = {}\n for r in toMol.residues :\n if r.id.chainId == toChain :\n rmap[r.id.position] = r\n\n if selAt.name == \"C\" :\n\n rid = selAt.residue.id.position + 1\n if rid in rmap :\n umsg ( \"Residue at position %d already exists\" % rid )\n return\n\n pC = selAt.coord() #.residue.atomsMap[\"C\"][0].coord()\n pCA = selAt.residue.atomsMap[\"CA\"][0].coord()\n pO = selAt.residue.atomsMap[\"O\"][0].coord()\n pN_ = nmol.residues[0].atomsMap[\"N\"][0].coord()\n pCA_ = nmol.residues[0].atomsMap[\"CA\"][0].coord()\n xf = ConnectXfP ( pCA, pC, pO, pN_, pCA_ ) # pCA, pC, pO, pN_, pCA_\n\n nres = AddResToMol ( addRes, toMol, toChain, xf, withoutAtoms=['OXT'], rid=rid )\n atC = selAt.residue.atomsMap[\"C\"][0]\n atN = nres.atomsMap[\"N\"][0]\n nb = selAt.molecule.newBond ( atC, atN )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n #OptDihedral ( pN, atC1.coord(), nres.atoms, inMap, selReg )\n\n\n if 0 and rid + 1 in rmap :\n toRes = rmap[rid + 1]\n print \"- connect to %s.%d\" % (toRes.type, toRes.id.position)\n\n atC = nres.atomsMap[\"C\"][0]\n atN = toRes.atomsMap[\"N\"][0]\n nb = selAt.molecule.newBond ( atC, atN )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n\n\n\n\n\ndef SetDihedral ( p1, p2, p3, p4, toAngDeg, atoms ) :\n\n b1, b2, b3 = p2 - p1, p3 - p2, p4 - p3\n\n n1 = chimera.cross ( b1, b2 ); n1.normalize()\n n2 = chimera.cross ( b2, b3 ); n2.normalize()\n m1 = chimera.cross ( n1, b2 ); m1.normalize()\n\n x, y = n1 * n2, m1 * n2\n\n A = -1.0 * numpy.arctan2 (y, x) * 180.0 / numpy.pi\n\n print \" - dih: %.3f -> %.3f\" % (A, toAngDeg)\n\n V = p3.toVector()\n b2.normalize()\n xf = chimera.Xform.translation ( V * -1.0 )\n xf.premultiply ( chimera.Xform.rotation(b2, toAngDeg-A) )\n xf.premultiply ( chimera.Xform.translation ( V ) )\n\n for at in atoms :\n at.setCoord ( xf.apply(at.coord()) )\n\n\n\n\n\ndef ConnectXf ( P0, P1, P2, bondLength, angleDeg, M0, M1 ) :\n\n v1 = P1 - P0; v1.normalize()\n v2 = P2 - P0; v2.normalize()\n vA = chimera.cross ( v2, v1 ); vA.normalize()\n\n vP = chimera.Xform.rotation (vA, angleDeg) .apply ( v1*-1.0 )\n vP.normalize()\n pP = P1 + vP * bondLength\n\n vM = M1 - M0; vM.normalize()\n\n # align vM to vP, put M1 to pP\n\n rax = chimera.cross ( vM, vP ); rax.normalize()\n ang = numpy.arccos ( vM * vP ) * 180.0 / numpy.pi\n\n xf = chimera.Xform.translation ( M1.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.rotation(rax, ang) )\n xf.premultiply ( chimera.Xform.translation ( pP.toVector() ) )\n\n return xf\n\n\n\ndef ConnectXfP ( pCA, pC, pO, pN_, pCA_ ) :\n\n v1 = pCA - pC; v1.normalize()\n v2 = pO - pC; v2.normalize()\n vA = chimera.cross ( v2, v1 ); vA.normalize()\n\n vN = chimera.Xform.rotation (vA, 114.017) .apply ( v1 )\n vN.normalize()\n pN = pC + vN * 1.302\n\n vCA = chimera.Xform.rotation (vA*-1.0, 116.766) .apply ( vN * -1.0 )\n vCA.normalize()\n #pCA = pN + vCA * 1.373\n\n vCA_ = pCA_ - pN_; vCA_.normalize()\n\n # align vCA_ to vCA, put pN_ to pN\n\n rax = chimera.cross ( vCA_, vCA );\n if rax.length < 1e-4 :\n xf = chimera.Xform.translation ( pN_.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.translation ( pN.toVector() ) )\n return xf\n\n else :\n rax.normalize()\n ang = numpy.arccos ( vCA_ * vCA ) * 180.0 / numpy.pi\n xf = chimera.Xform.translation ( pN_.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.rotation(rax, ang) )\n xf.premultiply ( chimera.Xform.translation ( pN.toVector() ) )\n return xf\n\n\n\ndef ConnectXfR ( pO5p, pP, pC5p, pO3p_, pC3p_ ) :\n\n v1 = pP - pO5p; v1.normalize()\n v2 = pC5p - pO5p; v2.normalize()\n vA = chimera.cross ( v2, v1 ); vA.normalize()\n\n vN = chimera.Xform.rotation (vA, 104.0) .apply ( v1*-1.0 )\n vN.normalize()\n pO3p = pP + vN * 1.608\n\n vA = chimera.cross ( vN, v1 ); vA.normalize()\n #print vA\n v = chimera.Xform.rotation (vA*-1.0, 119.7) .apply ( vN*-1.0 )\n v.normalize()\n #print v\n\n v_ = pC3p_ - pO3p_; v_.normalize()\n\n # align v_ to v, put pO3p_ to pO3p\n rax = chimera.cross ( v_, v );\n if rax.length < 1e-3 :\n xf = chimera.Xform.translation ( pO3p_.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.translation ( pO3p.toVector() ) )\n return xf\n\n else :\n ang = numpy.arccos ( v * v_ ) * 180.0 / numpy.pi\n #print \"ang0: %.3f\" % ang\n rax.normalize()\n print ang\n xf = chimera.Xform.translation ( pO3p_.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.rotation(rax, ang) )\n xf.premultiply ( chimera.Xform.translation ( pO3p.toVector() ) )\n\n pO3p_ = xf.apply ( pO3p_ ); print \"O3p: \", pO3p_\n pC3p_ = xf.apply ( pC3p_ ); print \"C3p: \", pC3p_\n\n v_ = pC3p_ - pO3p_; v_.normalize()\n v = pP - pO3p_; v.normalize()\n rax = chimera.cross ( v, v_ );\n ang = numpy.arccos ( v * v_ ) * 180.0 / numpy.pi\n print \"- new ang:\", ang\n\n return xf\n\n\n\ndef AddResToMol ( res, toMol, toChain, xf, withoutAtoms, rid=None, asType=None ) :\n\n if rid == None :\n rid = 0\n for r in toMol.residues :\n if r.id.chainId == toChain :\n if r.id.position > rid :\n rid = r.id.position\n rid += 1\n\n if asType == None :\n asType = res.type\n\n aMap = {}\n nres = toMol.newResidue ( asType, chimera.MolResId(toChain, rid))\n\n if hasattr ( res, 'isHelix' ) :\n nres.isHelix = res.isHelix\n if hasattr ( res, 'isSheet' ) :\n nres.isSheet = res.isSheet\n\n for at in res.atoms :\n if at.element.name == \"H\" :\n continue\n elif 1 and at.name in withoutAtoms :\n continue\n\n nat = toMol.newAtom (at.name, chimera.Element(at.element.number))\n aMap[at] = nat\n nres.addAtom( nat )\n nat.drawMode = nat.EndCap\n nat.setCoord ( xf.apply ( at.coord()) )\n nat.display = True\n if nat.element.name.upper() in atomColors : nat.color = atomColors[nat.element.name.upper()]\n\n for bond in res.molecule.bonds :\n if bond.atoms[0] in aMap and bond.atoms[1] in aMap :\n nb = toMol.newBond ( aMap[bond.atoms[0]], aMap[bond.atoms[1]] )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n\n return nres\n\n\n\ndef OptDihedral ( P, P2, forAtoms, inMap, selReg ) :\n\n V = P2 - P\n\n dmap, rdata, rmat = None, None, None\n\n if selReg != None :\n dmap = selReg.segmentation.seg_map\n print \" - seg map:\", dmap.name\n zoneR = dmap.data.step[0]/2.0\n rpoints = numpy.concatenate ( [selReg.map_points() for r in [selReg]], axis=0 ).astype ( numpy.float32 )\n rdata = VolumeData.zone_masked_grid_data ( segMap.data, rpoints, zoneR )\n rmat = rdata.matrix()\n\n elif inMap != None :\n dmap = inMap\n print \" - in map:\", dmap.name\n rdata = dmap.data\n rmat = inMap.full_matrix()\n\n ##gdata = VolumeData.Array_Grid_Data ( ndata.full_matrix(), segMap.data.origin, segMap.data.step, segMap.data.cell_angles, name = \"atom masked\" )\n #nv = VolumeViewer.volume.volume_from_grid_data ( rdata )\n #nv.name = \"helix mask 2\"\n\n maxAng, maxD, angD = 0, -1e9, 1.0\n for ang in range ( 0, int(round(360.0/angD)), 1 ) :\n\n xf = chimera.Xform.translation ( P.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.rotation(V, angD) )\n xf.premultiply ( chimera.Xform.translation ( P.toVector() ) )\n\n for at in forAtoms :\n at.setCoord ( xf.apply(at.coord()) )\n\n if dmap != None :\n points = _multiscale.get_atom_coordinates ( forAtoms, transformed = True )\n _contour.affine_transform_vertices ( points, Matrix.xform_matrix(dmap.openState.xform.inverse()) )\n values, outside = VolumeData.interpolate_volume_data ( points, rdata.xyz_to_ijk_transform, rmat )\n #values = nv.interpolated_values ( points, selAt.molecule.openState.xform )\n #olap, corr, other = overlap_and_correlation ( rpoint_weights, rmap_values )\n avgD = numpy.average ( values )\n #print \"%.1f\\t%.4f\" % (ang, avgD)\n\n if avgD > maxD :\n maxD = avgD\n maxAng = round(float(ang)/angD)\n\n\n print \"Max ang: %.3f\" % maxAng\n xf = chimera.Xform.translation ( P.toVector() * -1.0 )\n xf.premultiply ( chimera.Xform.rotation(V, maxAng) )\n xf.premultiply ( chimera.Xform.translation ( P.toVector() ) )\n\n for at in forAtoms :\n at.setCoord ( xf.apply(at.coord()) )\n\n\n\n\ndef AddResN ( rtype, atRes ) :\n\n\n global mrPar\n\n\n if len(rtype) == 1 :\n from chimera.resCode import protein1to3\n try :\n rtype = protein1to3[rtype]\n except :\n print \"Adding - %s - unknown\"\n\n ats = mrAtoms[rtype]\n\n addAts = []\n for at in ats :\n rtype, atomId, el, charge, x, y, z = at\n if el == \"H\" :\n continue\n addAts.append (at)\n\n mol = atRes.molecule\n chainId = atRes.id.chainId\n atPos = atRes.id.position\n\n print \"Adding - %s - %d atoms, to %s, chaind %s, ri %d\" % (rtype, len(addAts), mol.name, chainId, atPos)\n\n ress = []\n for r in mol.residues :\n if r.id.chainId == chainId :\n ress.append ( [r.id.position, r] )\n\n ress.sort()\n ress.reverse()\n\n print \" - %d res in chain, first %d, last %d\" % (len(ress), ress[0][1].id.position, ress[-1][1].id.position)\n\n\n aMap = {}\n\n bonds = []\n for bond in mol.bonds :\n bonds.append ( [bond.atoms[0], bond.atoms[1]] )\n\n for i, riRes in enumerate(ress) :\n\n ri, res = riRes\n\n if res.id.position >= atPos :\n ri = ri + 1\n else :\n break\n\n print \" - at %d -> %d\" % (res.id.position, ri)\n\n nres = mol.newResidue (res.type, chimera.MolResId(chainId, ri))\n for at in res.atoms :\n nat = mol.newAtom (at.name, chimera.Element(at.element.number))\n aMap[at] = nat\n nres.addAtom( nat )\n nat.setCoord ( at.coord() )\n nat.drawMode = nat.EndCap\n nat.display = True\n\n nres.isHelix = res.isHelix\n nres.isHet = res.isHet\n nres.isSheet = res.isSheet\n nres.isStrand = res.isStrand\n nres.ribbonDisplay = True\n nres.ribbonDrawMode = 2\n\n mol.deleteResidue ( res )\n\n\n nbondsAdded = 0\n for a1, a2 in bonds :\n if not a1 in aMap :\n continue\n if not a2 in aMap :\n continue\n nat1, nat2 = aMap[a1], aMap[a2]\n if abs(nat1.residue.id.position - nat2.residue.id.position) <= 1 :\n nb = mol.newBond ( aMap[a1], aMap[a2] )\n nb.display = nb.Smart\n nb.drawMode = nb.Stick\n nbondsAdded += 1\n\n print \" - added %d bonds\" % nbondsAdded\n\n\n\n\n\n\n\n\n\ndef SetBBAts ( mol ) :\n\n #print \" - setting bbAts in %s\" % mol.name\n for r in mol.residues :\n\n r.isProt = r.type in protein3to1\n r.isNA = r.type in nucleic3to1\n\n r.bbAtoms = []\n r.scAtoms = []\n\n if r.isProt :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n a.isBB = n==\"C\" or n==\"CA\" or n==\"O\" or n==\"N\" or n==\"OT1\" or n==\"OT2\"\n a.isSC = not a.isBB\n if a.isBB :\n r.bbAtoms.append ( a )\n else :\n r.scAtoms.append ( a )\n\n a.isSugar, a.isBase = False, False\n\n elif r.isNA :\n for a in r.atoms :\n if a.element.name == \"H\" :\n a.isBB, a.isSC = False, False\n continue\n n = a.name\n\n a.isBB = n==\"P\" or n==\"O1P\" or n==\"O2P\" or n==\"OP1\" or n==\"OP2\" or n==\"O5'\" or n==\"C5'\" or n==\"O3'\" or n==\"C3'\" or n==\"C4'\"\n a.isSugar = n==\"C1'\" or n==\"C2'\" or n==\"O4'\" or n==\"O2'\" or n==\"C3'\" or n==\"C4'\"\n\n #a.isBB = a.isBB or a.isSugar\n a.isBase = not a.isBB\n a.isSC = a.isBase\n\n if a.isBB or a.isSugar :\n r.bbAtoms.append ( a )\n else :\n r.scAtoms.append ( a )\n\n\n else :\n for a in r.atoms :\n a.isBB, a.isSC, a.isSugar, a.isBase = False, False, False, False\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# end\n", "id": "5573995", "language": "Python", "matching_score": 6.760110855102539, "max_stars_count": 6, "path": "Segger/molref.py" }, { "content": "\n\n\n\n\n# execfile ( \"Segger/mmcif.py\" ); r = ReadCif ( \"/Users/greg/Box Sync/_data/problems/emd_30342/7cec_s.cif\" )\n\ndef ReadCif ( fpath, log=False ) :\n\n print \"Reading %s\" % fpath\n\n from os import path\n\n if not path.isfile ( fpath ) :\n print \" - could not find\"\n return\n\n\n import time\n start = time.time()\n\n cif_name = \"\"\n cif = []\n loops = {}\n\n fp = open ( fpath )\n\n li = 0\n getNext = True\n while 1 :\n\n if getNext :\n atLine, getNext = fp.readline(), True\n li += 1\n else :\n getNext = True\n\n if not atLine :\n print \" - done\"\n break\n\n ls = atLine.strip ()\n\n #print \"%d %s\" % (li, ls)\n\n if len(ls) == 0 :\n continue\n\n elif ls[0:4] == \"data\" :\n cif_name = ls\n print \" - cif name:\", cif_name\n cif.append ( atLine )\n\n elif ls[0] == \"#\" :\n cif.append ( atLine )\n\n elif ls[0] == \"_\" :\n #cif.append ( ls )\n li, name, data, lines = GetData1 ( fp, atLine, ls, li )\n cif.append ( lines )\n\n elif ls[0:5] == \"loop_\" :\n\n li, atLine, ls, getNext, name, labels, data = GetLoop ( fp, atLine, ls, li )\n cif.append ( [name, labels, data] )\n loops[name] = { 'labels':labels, 'data':data }\n #print \" - got loop: %s\" % name\n\n else :\n #cif.append ( atLine )\n print \" - ? %d -.- %s\" % (li, ls)\n\n fp.close()\n\n print ( \" - done CIF - %d lines -- %.1fs\" % (li, time.time()-start) )\n\n if log :\n print \"\"\n print \"Loops:\"\n for name, ld in loops.iteritems() :\n labels, data = ld['labels'], ld['data']\n print \" - %s [%d x %d]\" % ( name, len(labels), len(data) )\n\n #outf = path.splitext ( fpath )[0] + \"_w.cif\"\n #WriteCif ( cif, outf )\n\n return cif, loops\n\n\ndef GetLoop ( fp, atLine, ls, li ) :\n\n loopLabels, loopData, loopName = [], [], None\n\n getData = False\n getNext = True\n\n # get loop labels first, then data\n while 1 :\n\n atLine, getNext = fp.readline(), True\n if not atLine :\n print \" - %d - eof while getting loop\" % li\n break\n li += 1\n ls = atLine.strip()\n #print \" _ %d %s %d\" % (li, ls, len(ls))\n\n if len(ls) == 0 :\n #print \" - done loop %s\" % li\n #print \"[\" + ls + \"]\"\n getNext = False\n break\n\n elif ls[0] == \"_\" :\n if getData :\n print \" - done loop? / %d\" % li, ls\n print loopLabels\n #print loopData\n print loopName\n print \"\"\n getNext = False\n break\n else :\n # loop label\n name, label = ls.split(\".\")\n loopLabels.append ( label )\n loopName = name\n\n elif ls[0] == \"#\" :\n #print \" - done loop %s\" % li\n #print \"[\" + ls + \"]\"\n getNext = False\n break\n\n else :\n getData = True\n\n if len(loopLabels) == 0 :\n print \" - ? %d - no labels for loop\" % li\n return\n\n li, data = GetData ( fp, atLine, ls, li, loopLabels )\n\n if len(data) != len(loopLabels) :\n print \" - ? %d - labels/data different sizes %d/%d\" % (li, len(data), len(loopLabels))\n print data\n return\n\n else :\n mdata = {}\n for i, label in enumerate ( loopLabels ) :\n mdata[label] = data[i]\n #loopData.append ( [data, mdata] )\n loopData.append ( {'asArray':data, 'asMap':mdata} )\n\n if 0 :\n print \"[%d]\" % li\n for label, data in mdata.iteritems () :\n if type(data) is list:\n print \" - %s:%s\" % (label, data[0])\n else :\n print \" - %s:%s\" % (label, data)\n #print pdict\n\n\n #print \" - returning from getloop - \", ls\n return li, atLine, ls, getNext, loopName, loopLabels, loopData\n\n\ndef GetData1 ( fp, atLine, ls, li ) :\n # get data for a single label\n\n name, data, lines = \"\", [], \"\"\n\n tsi = splitm ( li, ls )\n\n if len(tsi) == 2 :\n lines += atLine\n name, data = tsi\n\n elif len(tsi) == 1 :\n name = tsi[0]\n lines += atLine\n liStart = li\n\n atLine, getNext = fp.readline(), True\n if not atLine :\n print \" - ? %d - eof while getting single value\" % li\n return li, name, data, lines\n\n lines += atLine\n li += 1\n ls = atLine.strip ()\n if ls[0] == ';' :\n data = ls[1:]\n\n # look for end ;\n while 1 :\n atLine, getNext = fp.readline(), True\n if not atLine :\n print \" - ? %d - eof while getting single value starting at %d\" % (li, liStart)\n return li, name, data, lines\n\n li += 1\n lines += atLine\n ls = atLine.strip ()\n if ls[0] == ';' :\n # done\n break\n else :\n data += \" \" + ls\n else :\n data = ls\n\n else :\n print \" - ? getdata1 line %d - \" % li, ls\n print \" - %d tokens\" % len(tsi)\n print tsi\n print ls.split()\n print \"\"\n\n return li, name, data, lines\n\n\ndef GetData ( fp, atLine, ls, li, labels ) :\n # get (multiple) data for loop\n\n data = []\n liStart = li\n\n while 1 :\n if ls[0] == ';' :\n block = atLine\n t = ls[1:]\n while 1 :\n atLine, getNext = fp.readline(), True\n li += 1\n if not atLine :\n print \" - ? %d - reached eof while scanning block\" % liStart\n break\n block += atLine\n ls = atLine.strip()\n #print \" ; %d %s\" % (li, ls)\n if len(ls) == 0 :\n continue\n if ls[0] == ';' :\n break\n else :\n t += \" \" + ls\n #data.append ( [t, block] )\n data.append ( {'string':t, 'lines':block} )\n\n else :\n tsi = splitm ( li, ls )\n data.extend ( tsi )\n\n if len(data) >= len(labels) :\n # done\n break\n else :\n # keep going...\n atLine, getNext = fp.readline(), True\n li += 1\n if not atLine :\n print \" - ? %d - reached eof while getting data\" % liStart\n break\n ls = atLine.strip()\n\n\n return li, data\n\n\ndef splitm ( li, l ) :\n ts = l.split()\n tsr = []\n addTo = None\n for t in ts :\n #print \" - %s, %d, %s\" % (t, len(t), addTo)\n if addTo != None :\n addTo += \" \" + t\n if t[-1] == \"'\" :\n tsr.append ( addTo[0:-1] )\n addTo = None\n elif t[0] == \"'\" :\n if len(t)>1 and t[-1] == \"'\" :\n tsr.append ( t[1:-1] )\n else :\n addTo = t[1:]\n else :\n tsr.append ( t )\n if addTo :\n print \" - ? %d - unmatched '\" % li\n tsr += addTo\n\n #t2 = []\n for i,t in enumerate(tsr) :\n if len(t) == 0 :\n #print \" - length 0 on line %d\" % li\n tsr[i] = t[1:-1]\n continue\n if t[0] == '\"' and t[-1] == '\"' :\n tsr[i] = t[1:-1]\n\n return tsr\n\n\n\ndef WriteCif ( cif, fout ) :\n\n print \"\"\n print \" - writing to %s\" % fout\n\n fp = open ( fout, \"w\" )\n\n for ls in cif :\n if type(ls) == list :\n name, labels, data = ls\n fp.write ( \"loop_\\n\" )\n for label in labels :\n fp.write ( \"%s.%s\\n\" % (name, label) )\n\n cws = [0] * len(labels)\n for d in data :\n adata, mdata = d['asArray'], d['asMap']\n\n for i, ds in enumerate(adata) :\n dd = \"'%s'\" % ds if ' ' in ds else ds\n cws[i] = max ( cws[i], len(dd) )\n\n for d in data :\n adata, mdata = d['asArray'], d['asMap']\n first, lastWasLines = True, False\n for di, dd in enumerate(adata) :\n if type(dd) == dict :\n # write original block starting with ; on start and end lines\n if not first :\n fp.write ( \"\\n\" )\n fp.write ( dd['lines'] )\n first = True\n lastWasLines = True\n else :\n # write in columns\n dd = \"'%s'\" % dd if ' ' in dd else dd\n #dd = dd if first else (\"\\t\" + dd)\n padn = cws[di] - len(dd) + 1\n fp.write ( \"%s%s\" % (dd, \" \"*padn) )\n first = False\n lastWasLines = False\n if not lastWasLines :\n fp.write (\"\\n\")\n else :\n fp.write ( ls )\n\n fp.close()\n\n\n\n\ntry :\n import chimera\n import numpy\n\n from chimera.resCode import nucleic3to1\n from chimera.resCode import protein3to1, protein1to3\n protein3to1['HSD'] = protein3to1['HIS']\n protein3to1['HSE'] = protein3to1['HIS']\n\n nucleic1to3 = { 'T':'THY', 'C':'CYT', 'G':'GUA', 'A':'ADE', 'U':'URA'}\n nucleic3to1['GDP'] = nucleic3to1['GUA']\n\n atomColors = {'C' : chimera.MaterialColor (0.565,0.565,0.565),\n 'Cbb' : chimera.MaterialColor (0.2,0.6,0.2),\n 'S' : chimera.MaterialColor (1.000,1.000,0.188),\n 'O' : chimera.MaterialColor (1.000,0.051,0.051),\n 'N' : chimera.MaterialColor (0.188,0.314,0.973),\n 'P' : chimera.MaterialColor (1.0, 0.502, 0.0),\n 'H' : chimera.MaterialColor (0.9,.9,.9),\n ' ' : chimera.MaterialColor (0.2,1,.2),\n \"MG\" : chimera.MaterialColor (0,1,0),\n \"NA\" : chimera.MaterialColor (.6,.3,.6),\n \"CL\" : chimera.MaterialColor (.2,.6,.2),\n \"CA\" : chimera.MaterialColor (.4,.4,.6),\n \"ZN\" : chimera.MaterialColor (.2,.8,.2),\n \"MN\" : chimera.MaterialColor (.4,.4,.6),\n \"FE\" : chimera.MaterialColor (.4,.4,.6),\n \"CO\" : chimera.MaterialColor (.4,.4,.6),\n \"NI\" : chimera.MaterialColor (.4,.4,.6)\n }\n\n\nexcept :\n pass\n\n\n\ndef LoadMol ( fpath, log=False ) :\n\n mol = ReadMol ( fpath, log )\n\n chimera.openModels.add ( [mol] )\n #return mol\n\n print \" - got %s\" % mol.name\n\n for at in mol.atoms :\n at.display = True\n at.drawMode = at.Sphere\n at.color = mol.chainColors[at.residue.id.chainId]\n\n for res in mol.residues :\n res.ribbonDisplay = False # drawRib\n res.ribbonDrawMode = 2\n res.ribbonColor = mol.chainColors[at.residue.id.chainId]\n\n #if hasattr ( mol, 'chainDescr' ) :\n # for cid, descr in mol.chainDescr.iteritems() :\n # print \" - %s - %s\" % (cid, \", \".join(descr))\n\n return mol\n\n\ndef ParamPathPdb ( rtype ) :\n\n #ppath = \"/Users/greg/Dropbox/_mol/Segger/_param/\"\n\n from os import path\n dir_path = path.dirname ( path.realpath(__file__) )\n inDir = path.split(dir_path)[0]\n #print \" -- working dir:\", inDir\n #mapQPPath = os.path.join ( inDir, 'Segger' )\n ppath = path.join ( dir_path, '_param' )\n #print \" -- path to param:\", ppath\n #fname = ppath + \"%s.pdb\" % rtype\n fname = path.join ( ppath, \"%s.pdb\" % rtype )\n return fname\n\n\ndef GetResMol ( rtype ) :\n\n from os import path\n dir_path = path.dirname ( path.realpath(__file__) )\n inDir = path.split(dir_path)[0]\n ppath = path.join ( dir_path, '_param' )\n fname = path.join ( ppath, \"%s.pdb\" % rtype )\n\n if not path.isfile(fname) :\n print \" - did not find %s\" % fname\n\n phPath = \"/Users/greg/_mol/phenix-1.18.2-3874/build/bin/phenix.elbow\"\n if not path.isfile ( phPath ) :\n print \" - %s - no phenix.elbow path...\" % rtype\n return None\n\n #args = [\"/Users/greg/_mol/phenix-1.19.2-4158/build/bin/phenix.elbow\", \"--chemical_component\", rtype]\n args = [phPath, \"--chemical_component\", rtype]\n\n print \"Running elbow:\"\n print args\n\n fname_log = path.join ( ppath, \"%s.log\" % rtype )\n fname_err = path.join ( ppath, \"%s_err.log\" % rtype )\n\n fout = open ( fname_log, \"w\" )\n foute = open ( fname_err, \"w\" )\n import subprocess\n p = subprocess.Popen(args, stdout=fout, stderr=foute, cwd=ppath)\n\n print \" - waiting...\"\n p.wait()\n fout.close()\n foute.close()\n\n if not path.isfile(fname) :\n print \" - elbow file not found %s\" % fname\n return None\n\n import chimera\n nmol = chimera.PDBio().readPDBfile ( fname )[0]\n #print \" - read %s - %d atoms - %d res\" % ( fname, len(nmol.atoms), len(nmol.residues) )\n #addRes = nmol.residues[0]\n return nmol\n\n\ndef LoadMol2 ( fpath, log=False ) :\n\n if 0 :\n from chimera import tasks, CancelOperation\n task = tasks.Task('...', modal = True)\n\n try :\n mol = LoadMol2_ ( fpath, log, task )\n #mol = LoadMolCh_ ( fpath, log, task )\n\n except Exception, err:\n #umsg ( \"Something went wrong...\" )\n print Exception, err\n traceback.print_exc()\n return\n\n finally :\n task.finished()\n\n return mol\n else :\n mol = LoadMol2_ ( fpath, log, None )\n #mol = LoadMolCh_ ( fpath, log, None )\n return mol\n\n\n\n\ndef LoadMol2_ ( fpath, log=False, task=None ) :\n\n mol = ReadMol ( fpath, log=False )\n\n crmap = {}\n rmolmap = {}\n\n print \" - %d residues\" % len(mol.residues)\n\n import time\n start = time.time()\n\n for r in mol.residues :\n if not r.id.chainId in crmap :\n crmap[r.id.chainId] = { r.id.position : r }\n else :\n crmap[r.id.chainId][r.id.position] = r\n\n bonds = []\n for ri, r in enumerate ( mol.residues ) :\n if task :\n if ri % 100 == 0 :\n task.updateStatus( \"%s - residue %d/%d\" % ( mol.name, ri, len(mol.residues) ) )\n\n rmol = None\n\n rtype = r.type.upper()\n #if rtype in nucleic1to3 :\n # rtype = nucleic1to3[rtype]\n # #print r.type, \"->\", rtype\n\n #print \"%d %d.%s %s\" % (ri, r.id.position, r.id.chainId, r.type)\n\n if rtype.lower() in rmolmap :\n rmol = rmolmap[ rtype.lower() ]\n else :\n rmol = GetResMol ( rtype.lower() )\n #if rmol != None :\n rmolmap[rtype.lower()] = rmol\n\n if 1 and rmol != None :\n for b in rmol.bonds :\n a1n, a2n = b.atoms[0].name, b.atoms[1].name\n if a1n in r.atomsMap and a2n in r.atomsMap :\n for a1 in r.atomsMap[a1n] :\n for a2 in r.atomsMap[a2n] :\n #print \"%s - %s\" % ( At(a1), At(a2) )\n #nb = mol.newBond ( a1, a2 )\n if a1.altLoc == a2.altLoc :\n bonds.append ( [a1,a2] )\n\n if 1 :\n if r.type.upper() in protein3to1 :\n if r.id.position-1 in crmap[r.id.chainId] :\n pres = crmap[r.id.chainId][r.id.position-1]\n if pres.type.upper() in protein3to1 :\n #GetSS ( pres, r )\n if \"C\" in pres.atomsMap and \"N\" in r.atomsMap :\n for a1 in pres.atomsMap[\"C\"] :\n for a2 in r.atomsMap[\"N\"] :\n #print a1.name, pres.id.position, a2.name, r.id.position\n #nb = mol.newBond ( a1, a2 )\n if a1.altLoc == a2.altLoc :\n bonds.append ( [a1,a2] )\n\n if r.type.upper() in nucleic1to3 or r.type.upper() in nucleic3to1 :\n if r.id.position-1 in crmap[r.id.chainId] :\n pres = crmap[r.id.chainId][r.id.position-1]\n if pres.type.upper() in nucleic1to3 or pres.type.upper() in nucleic3to1 :\n if \"O3'\" in pres.atomsMap and \"P\" in r.atomsMap :\n for a1 in pres.atomsMap[\"O3'\"] :\n for a2 in r.atomsMap[\"P\"] :\n #print a1.name, pres.id.position, a2.name, r.id.position\n #nb = mol.newBond ( a1, a2 )\n if a1.altLoc == a2.altLoc :\n bonds.append ( [a1,a2] )\n\n print ( \" - %d bonds, %.1fs\" % (len(bonds), time.time()-start) )\n\n start = time.time()\n for a1, a2 in bonds :\n nb = mol.newBond ( a1, a2 )\n\n print ( \" - added bonds in %.1fs\" % (time.time()-start) )\n\n start = time.time()\n chimera.openModels.add ( [mol] )\n print \" - added mol, %.1fs\" % (time.time()-start)\n\n return mol\n\n\ndef ColorMol ( mol ) :\n\n if not hasattr ( mol, 'chainColors' ) :\n from random import random\n mol.chainColors = {}\n for r in mol.residues :\n if not r.id.chainId in mol.chainColors :\n clr = chimera.MaterialColor ( random(), random(), random(), 1.0 )\n mol.chainColors[r.id.chainId] = clr\n\n for r in mol.residues :\n rt = r.type.upper()\n if rt in protein3to1 or rt in nucleic3to1 or rt in nucleic1to3 :\n r.ribbonDisplay = True\n r.ribbonDrawMode = 2\n r.ribbonColor = mol.chainColors[r.id.chainId]\n for at in r.atoms :\n at.display = False\n at.drawMode = at.EndCap\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = mol.chainColors[r.id.chainId]\n else :\n for at in r.atoms :\n at.display = True\n at.drawMode = at.EndCap\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = mol.chainColors[r.id.chainId]\n\n for b in mol.bonds :\n b.drawMode = b.Stick\n b.display = b.Smart\n\n\ndef At ( at ) :\n return \"%d.%s(%s)_%s\" % (at.residue.id.position, at.residue.id.chainId, at.residue.type, at.name)\n\n\n\n# this makes one molecule for each chains\n# probably useless, but was useful to see why adding bonds was crazy slow\ndef LoadMolCh_ ( fpath, log=False, task=None ) :\n\n mol = ReadMol ( fpath, log=False )\n\n from chimera.resCode import nucleic3to1\n from chimera.resCode import protein3to1, protein1to3\n protein3to1['HSD'] = protein3to1['HIS']\n protein3to1['HSE'] = protein3to1['HIS']\n\n nucleic1to3 = { 'T':'THY', 'C':'CYT', 'G':'GUA', 'A':'ADE', 'U':'URA'}\n nucleic3to1['GDP'] = nucleic3to1['GUA']\n\n crmap = {}\n rmolmap = {}\n\n print \" - adding bonds\"\n\n import time\n start = time.time()\n\n for r in mol.residues :\n if not r.id.chainId in crmap :\n crmap[r.id.chainId] = { r.id.position : r }\n else :\n crmap[r.id.chainId][r.id.position] = r\n\n chains = mol.chainColors.keys()\n\n print \"%d residues - %d chains\" % ( len(mol.residues), len(chains) )\n from os.path import splitext\n\n chMols = {}\n for ch in chains :\n\n start = time.time()\n\n chMol = chimera.Molecule()\n chMol.name = splitext ( mol.name )[0] + \"_\" + ch\n rmap0 = crmap[ch]\n chMols[ch] = chMol\n print \" - %s - %d residues\" % (ch, len(rmap0)),\n\n rmap = {}\n for ri, res in rmap0.iteritems() :\n nr = chMol.newResidue ( res.type, chimera.MolResId(ch, res.id.position) )\n rmap[nr.id.position] = nr\n for at in res.atoms :\n nat = chMol.newAtom ( at.name, at.element )\n nr.addAtom ( nat )\n nat.setCoord ( at.coord() )\n\n print \", %d atoms\" % len(chMol.atoms),\n\n for ri, r in rmap.iteritems() :\n #if ri % 100 == 0 :\n # print \"%d/%d\" % ( ri, len(mol.residues) )\n # if task :\n # task.updateStatus( \"%d/%d\" % ( ri, len(mol.residues) ) )\n\n rmol = None\n rtype = r.type.upper()\n if rtype.lower() in rmolmap :\n rmol = rmolmap[ rtype.lower() ]\n else :\n rmol = GetResMol ( rtype.lower() )\n if rmol != None :\n rmolmap[rtype.lower()] = rmol\n\n if rmol != None :\n for b in rmol.bonds :\n a1n, a2n = b.atoms[0].name, b.atoms[1].name\n if a1n in r.atomsMap and a2n in r.atomsMap :\n for a1 in r.atomsMap[a1n] :\n for a2 in r.atomsMap[a2n] :\n #print \"%s - %s\" % ( At(a1), At(a2) )\n nb = chMol.newBond ( a1, a2 )\n pass\n else :\n print \" - rmol %s not found\" % rtype\n\n if 1 :\n if r.type.upper() in protein3to1 :\n if r.id.position-1 in rmap :\n pres = rmap[r.id.position-1]\n if pres.type.upper() in protein3to1 :\n #GetSS ( pres, r )\n if \"C\" in pres.atomsMap and \"N\" in r.atomsMap :\n for a1 in pres.atomsMap[\"C\"] :\n for a2 in r.atomsMap[\"N\"] :\n #print a1.name, pres.id.position, a2.name, r.id.position\n nb = chMol.newBond ( a1, a2 )\n pass\n\n if r.type.upper() in nucleic1to3 or r.type.upper() in nucleic3to1 :\n if r.id.position-1 in rmap :\n pres = rmap[r.id.position-1]\n if pres.type.upper() in nucleic1to3 or pres.type.upper() in nucleic3to1 :\n if \"O3'\" in pres.atomsMap and \"P\" in r.atomsMap :\n for a1 in pres.atomsMap[\"O3'\"] :\n for a2 in r.atomsMap[\"P\"] :\n #print a1.name, pres.id.position, a2.name, r.id.position\n nb = chMol.newBond ( a1, a2 )\n pass\n\n print ( \", %d bonds, %.1fs\" % (len(chMol.bonds), time.time()-start) )\n\n #start = time.time()\n #chimera.openModels.add ( [chMol] )\n #print \" - added mol %ss, %.1fs\" % (chMol.name, time.time()-start)\n\n start = time.time()\n for r in chMol.residues :\n rt = r.type.upper()\n if rt in protein3to1 or rt in nucleic3to1 or rt in nucleic1to3 :\n r.ribbonDisplay = True\n r.ribbonDrawMode = 2\n r.ribbonColor = mol.chainColors[r.id.chainId]\n for at in r.atoms :\n at.display = False\n at.drawMode = at.EndCap\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = mol.chainColors[r.id.chainId]\n else :\n for at in r.atoms :\n at.display = True\n at.drawMode = at.EndCap\n if at.element.name.upper() in atomColors :\n at.color = atomColors[at.element.name.upper()]\n else :\n at.color = mol.chainColors[r.id.chainId]\n\n for b in mol.bonds :\n b.drawMode = b.Stick\n b.display = b.Smart\n\n #print \" - changed mol disp %s - %.1fs\" % (chMol.name, time.time()-start)\n\n return mol\n\n\n\ndef GetSS ( pres, r ) :\n\n if not \"N\" in r.atomsMap or not \"CA\" in r.atomsMap or not \"C\" in r.atomsMap :\n return\n\n n2 = r.atomsMap[\"N\"][0]\n ca2 = r.atomsMap[\"CA\"][0]\n c2 = r.atomsMap[\"C\"][0]\n\n if not \"N\" in pres.atomsMap or not \"CA\" in pres.atomsMap or not \"C\" in pres.atomsMap :\n return\n\n n1 = pres.atomsMap[\"N\"][0]\n ca1 = pres.atomsMap[\"CA\"][0]\n c1 = pres.atomsMap[\"C\"][0]\n #o1 = pres.atomsMap[\"O\"][0]\n\n phi = diha ( n1, ca1, c1, n2 )\n om = diha ( ca1, c1, n2, ca2 )\n psi = diha ( c1, n2, ca2, c2 )\n #oo = diha ( n1, ca1, c1, o1 )\n\n r.isHelix = phi > -90.0 and phi < -30.0 and psi > -80.0 and psi < -20.0\n r.isHet = False\n r.isSheet = False\n r.isStrand = phi > -150.0 and phi < -90.0 and psi > 90 and psi < -150.0\n\n\n\ndef diha ( a1, a2, a3, a4 ) :\n #n1 = vnorm ( a1.coord(), a2.coord(), a3.coord() )\n #n2 = vnorm ( a2.coord(), a3.coord(), a4.coord() )\n #return numpy.arccos ( n2 * n1 * -1.0 ) * 180.0 / numpy.pi\n\n # http://math.stackexchange.com/questions/47059/how-do-i-calculate-a-dihedral-angle-given-cartesian-coordinates\n b1 = a2.coord() - a1.coord()\n b2 = a3.coord() - a2.coord()\n b3 = a4.coord() - a3.coord()\n\n n1 = chimera.cross ( b1, b2 ); n1.normalize()\n n2 = chimera.cross ( b2, b3 ); n2.normalize()\n m1 = chimera.cross ( n1, b2 ); m1.normalize()\n\n x = n1 * n2\n y = m1 * n2\n\n return -1.0 * numpy.arctan2 ( y, x) * 180.0 / numpy.pi\n\n\n\n\ndef ReadMol ( fpath, log=False ) :\n\n from random import random\n\n cif, loops = ReadCif ( fpath, log )\n\n # descriptions by chain id:\n descrByEntityId = GetEntityDescr ( cif, loops )\n\n try :\n atoms = loops['_atom_site']['data']\n print \" - %d atom records\" % len(atoms)\n except :\n print \" - no atoms in cif?\"\n return None\n\n labels = loops['_atom_site']['labels']\n if 0 :\n print \"Labels:\"\n for l in labels :\n print \" : \", l\n\n import time\n start = time.time()\n\n rmap = {}\n\n nmol = chimera.Molecule()\n from os import path\n #nmol.name = path.splitext ( path.split (fpath)[1] )[0]\n nmol.name = path.split (fpath) [1]\n nmol.openedAs = [ fpath, [] ]\n nmol.cif = cif\n nmol.cifLoops = loops\n\n nmol.chainColors = {}\n nmol.chainDescr = {}\n\n numQ = 0\n first = True\n for at in atoms :\n mp = at['asMap']\n\n if log and first :\n #for label, val in mp.iteritems () :\n for li, label in enumerate ( labels ) :\n print \" %d : %s : %s\" % (li+1, label, mp[label])\n\n first = False\n\n atType = mp['type_symbol']\n atName = mp['label_atom_id']\n rtype = mp['label_comp_id']\n chainId = mp['label_asym_id']\n chainEId = mp['label_entity_id']\n px = mp['Cartn_x']\n py = mp['Cartn_y']\n pz = mp['Cartn_z']\n occ = mp['occupancy']\n bfactor = mp['B_iso_or_equiv']\n altLoc = mp['label_alt_id']\n if altLoc == \".\" : altLoc = ''\n\n if chainEId in descrByEntityId :\n nmol.chainDescr [chainId] = descrByEntityId [chainEId]\n\n resId = ResId ( mp )\n if resId == None :\n continue\n\n ris = \"%s%d\" % (chainId, resId)\n res = None\n if not ris in rmap :\n res = nmol.newResidue ( rtype, chimera.MolResId(chainId, resId) )\n rmap[ris] = res\n else :\n res = rmap[ris]\n\n clr = None\n if not chainId in nmol.chainColors :\n clr = chimera.MaterialColor ( random(), random(), random(), 1.0 )\n nmol.chainColors[chainId] = clr\n if 0 and log :\n print \" - chain %s\" % chainId\n else :\n clr = nmol.chainColors [chainId]\n\n nat = nmol.newAtom ( atName, chimera.Element(atType) )\n\n drawRib = rtype in protein3to1 or rtype in nucleic3to1\n\n #aMap[at] = nat\n res.addAtom( nat )\n nat.setCoord ( chimera.Point( float(px), float(py), float(pz) ) )\n nat.altLoc = altLoc\n nat.occupancy = float(occ)\n nat.bfactor = float(bfactor)\n\n if 'Q-score' in mp :\n try :\n Q = float ( mp['Q-score'] )\n nat.Q = Q\n numQ += 1\n except :\n #print \" - q score is\", mp['Q-score']\n pass\n\n end = time.time()\n print \" - created %d atoms, %.1fs, %d q-scores\" % ( len(nmol.atoms), end-start, numQ )\n\n return nmol\n\n\ndef DataStr ( data ) :\n if type(data) == dict :\n return data['string']\n return data\n\ndef GetEntityDescr ( cif, loops ) :\n\n descrByEntityId = {}\n if '_entity' in loops :\n elabels, entities = loops['_entity']['labels'], loops['_entity']['data']\n print \" - found _entity - %d labels, %d records\" % ( len(elabels), len(entities) )\n for ent in entities :\n entMap = ent['asMap']\n\n descr = []\n #if 'type' in entMap :\n # #descr.append ( \"Type: \" + entMap['type'] )\n # descr.append ( DataStr(entMap['type']) )\n\n if 'pdbx_description' in entMap :\n #descr.append ( \"Descr: \" + entMap['pdbx_description'] )\n descr.append ( DataStr(entMap['pdbx_description']) )\n\n if 'id' in entMap :\n descrByEntityId[ entMap['id'] ] = descr\n\n if '_entity_name_com' in loops :\n entities = loops['_entity_name_com']['data']\n print \" - found _entity_name_com - %d records\" % ( len(entities) )\n for ent in entities :\n entMap = ent['asMap']\n\n descr = []\n if 'entity_id' in entMap :\n eid = entMap['entity_id']\n if eid in descrByEntityId :\n descr = descrByEntityId[eid]\n else :\n descrByEntityId[eid] = descr\n\n if 'name' in entMap :\n #descr.append ( \"Name: \" + entMap['name'] )\n descr.append ( DataStr(entMap['name']) )\n\n return descrByEntityId\n\n\ndef ResId ( mp ) :\n\n resId = mp['label_seq_id']\n try :\n resId = int(resId)\n except :\n resId = None\n\n if resId == None :\n try :\n resId = int( mp['auth_seq_id'] )\n except :\n print \" - atom resId not numeric: %s/%s\" % ( mp['label_seq_id'], mp['auth_seq_id'] )\n resId = None\n\n return resId\n\n\ndef ConnectAtoms () :\n\n from chimera.resCode import nucleic3to1\n from chimera.resCode import protein3to1\n protein3to1['HSD'] = protein3to1['HIS']\n\n\n\ndef UpdateAtoms ( cif, mol ) :\n\n print \"Updating atoms in cif - %s\" % mol.name\n\n addQ = False\n amap = {}\n for r in mol.residues :\n for at in r.atoms :\n atId = \"%d.%s.%s.%s\" % (r.id.position,r.id.chainId,at.name,at.altLoc)\n amap[atId] = at\n if not addQ and hasattr ( at, 'Q' ) :\n addQ = True\n\n #print \" - %d items in cif\" % len(cif)\n\n for ls in cif :\n if type(ls) == list :\n name, labels, data = ls\n\n if name == \"_atom_site\" :\n #print \" - found atoms - %d\" % len(data)\n\n ilabels = {}\n for i, l in enumerate ( labels ) :\n ilabels [ l ] = i\n #print \" -- %s - %d\" % (l, i)\n\n addQatI = None\n if addQ and not 'Q-score' in ilabels :\n if 'B_iso_or_equiv' in ilabels :\n addQatI = ilabels['B_iso_or_equiv'] + 1\n else :\n addQatI = len (labels)\n labels.insert ( addQatI, \"Q-score\" )\n print \" - added Q-score column %d\" % addQatI\n\n ilabels = {}\n for i, l in enumerate ( labels ) :\n ilabels [ l ] = i\n #print \" -- %s - %d\" % (l, i)\n\n for d in data :\n adata, mp = d['asArray'], d['asMap']\n\n resId = ResId ( mp )\n if resId == None : continue\n atName = mp['label_atom_id']\n chainId = mp['label_asym_id']\n altLoc = mp['label_alt_id']\n if altLoc == \".\" : altLoc = ''\n\n datId = \"%d.%s.%s.%s\" % (resId,chainId,atName,altLoc)\n if datId in amap :\n at = amap[datId]\n #adata[ ilabels['B_iso_or_equiv'] ] = \"%.3f\" % at.bfactor\n qs = (\"%.3f\" % at.Q) if hasattr ( at, 'Q' ) else \"?\"\n if addQatI :\n adata.insert ( addQatI, qs )\n else :\n adata[ ilabels['Q-score'] ] = qs\n else :\n print \" - atom %s in cif - not found in mol\" % datId\n\n\n\ndef WriteMol ( mol, fout ) :\n\n if not hasattr (mol, 'cif') :\n print \" - cif not found in %s\" % mol.name\n return\n\n UpdateAtoms ( mol.cif, mol )\n WriteCif ( mol.cif, fout )\n\n\n#\n", "id": "7497416", "language": "Python", "matching_score": 1.7781989574432373, "max_stars_count": 6, "path": "Segger/mmcif.py" }, { "content": "\n\n\nfrom os import listdir\nfrom os.path import isfile, join, splitext\nimport shutil\nimport sys\n\nprint sys.argv[1]\nfromPath = sys.argv[1]\n\npath = \".\"\nprint \"\"\n\ncopied, notCopied = [], []\nprint \"Copying...\"\nprint \"-----------\"\nfor fname in listdir(fromPath) :\n if '__init__' in fname :\n notCopied.append (fname)\n continue\n if \".pyc\" == splitext(fname)[1] :\n continue\n fpathTo = join(path, fname)\n fpathFrom = join(fromPath, fname)\n #print \" -< \", fromPath,\n if isfile( fpathTo ) and isfile ( fpathFrom ) :\n shutil.copyfile ( fpathFrom, fpathTo )\n #print fname\n copied.append ( fname )\n else :\n notCopied.append (fname)\n\nprint \"\"\n\n\nprint \"Copied:\"\nprint \"-----------\"\ncopied.sort()\nfor f in copied :\n print f\nprint \"\"\n\nprint \"Not copied:\"\nprint \"-----------\"\nnotCopied.sort()\nfor f in notCopied :\n if splitext(f)[1] != \".pyc\" :\n print f\nprint \"\"\n\nprint \"Param:\"\nprint \"-----------\"\nfor f in listdir ( fromPath + \"/_param\" ) :\n\n fname, fext = splitext (f)\n if fext == \".pdb\" :\n print f,\n shutil.copy2 ( fromPath + \"/_param/\" + f, \"./_param/\" + f )\n\nprint \"\"\nprint \"\"\n", "id": "11767881", "language": "Python", "matching_score": 2.591433525085449, "max_stars_count": 6, "path": "Segger/from.py" }, { "content": "\n\n\nimport os\nimport shutil\nimport sys\n\nprint sys.argv[1]\nfromPath = sys.argv[1]\n\nfiles = [\"mapq.py\", \"qscores.py\", \"mmcif.py\"]\n\nfor f in files :\n print f,\n\n try :\n shutil.copy2 ( fromPath + \"/\" + f, \"./\" + f )\n print \" - ok\"\n except :\n print \"?\"\n\n\nfor f in os.listdir ( fromPath + \"/_param\" ) :\n\n fname, fext = os.path.splitext (f)\n if fext == \".pdb\" :\n print f,\n shutil.copy2 ( fromPath + \"/_param/\" + f, \"./_param/\" + f )\n\nprint \"\"\n", "id": "2465012", "language": "Python", "matching_score": 0.06416475027799606, "max_stars_count": 6, "path": "mapq/from.py" }, { "content": "# -----------------------------------------------------------------------------\n# Read and write segmentation data in hdf5 format.\n#\n# Example layout:\n#\n# format = \"segger\"\n# format_version = 1\n#\n# name = \"somedata segmentation\"\n# mask = <3-d array of region indices>\n# region_ids = <array or region ids numbers, length N>\n# region_colors = <array of rgba, N by 4>\n# ref_points = <array of region reference points, N by 3>\n# parent_ids = <array each regions parent (0 = no parent), length N>\n# smoothing_levels = <array of float, length N>\n#\n# map_path = \"/Users/smith/somedata.mrc\"\n# map_size = (512, 512, 200)\n# map_level = 1.245\n# ijk_to_xyz_transform = <3x4 matrix>\n#\n# Region attributes are each written in a separate group named to match\n# the attribute name with a type int, float, string appened to the name.\n# An array named \"attributes\" contains names of these group nodes.\n#\n# attributes = <array of node names>, e.g. [\"curvature float\", ...]\n#\n# /curvature float\n# attribute_name = \"curvature\"\n# ids = <array of region indices, length M>\n# values = <array of values, length M>\n#\n# skeleton = 'string encoding chimera marker file'\n#\n# The file is saved with the Python PyTables modules which includes\n# additional attributes \"VERSION\", \"CLASS\", \"TITLE\", \"PYTABLES_FORMAT_VERSION\".\n#\n# Tests with alternate data storage with every region being a separate HDF\n# node and every contact being a separate HDF node gave extremely slow\n# read/write speed.\n#\ndef write_segmentation(seg, path = None):\n\n if path is None:\n show_save_dialog(seg)\n return\n\n import tables\n h5file = tables.openFile(path, mode = 'w')\n\n try:\n\n root = h5file.root\n a = root._v_attrs\n a.format = 'segger'\n a.format_version = 2\n a.name = seg.name\n\n m = seg.mask\n atom = tables.Atom.from_dtype(m.dtype)\n filters = tables.Filters(complevel=5, complib='zlib')\n ma = h5file.createCArray(root, 'mask', atom, m.shape, filters = filters)\n ma[:] = m\n\n print \" - updating region colors...\"\n seg.region_colors ()\n\n from numpy import array, int32, float32\n rlist = seg.id_to_region.values()\n rlist.sort(lambda r1,r2: cmp(r1.rid,r2.rid))\n\n rids = array([r.rid for r in rlist], int32)\n h5file.createArray(root, 'region_ids', rids)\n\n rcolors = array([r.color for r in rlist], float32)\n h5file.createArray(root, 'region_colors', rcolors)\n\n refpts = array([r.max_point for r in rlist], float32)\n h5file.createArray(root, 'ref_points', refpts)\n\n slev = array([r.smoothing_level for r in rlist], float32)\n h5file.createArray(root, 'smoothing_levels', slev)\n\n pids = array([(r.preg.rid if r.preg else 0) for r in rlist], int32)\n h5file.createArray(root, 'parent_ids', pids)\n\n map = seg.volume_data()\n if map:\n d = map.data\n a.map_path = d.path\n print \" - map path: \", d.path\n a.map_size = array(d.size, int32)\n\n if not seg.map_level is None:\n a.map_level = seg.map_level\n\n t = seg.point_transform()\n if t is not None:\n from numpy import array, float32\n a.ijk_to_xyz_transform = array(t, float32)\n\n write_attributes(h5file, seg)\n\n if seg.adj_graph:\n write_skeleton(h5file, seg.adj_graph)\n\n finally:\n\n h5file.close()\n\n seg.path = path\n\n# -----------------------------------------------------------------------------\n#\ndef show_save_dialog(seg, saved_cb = None):\n\n def save ( okay, dialog, saved_cb = saved_cb ):\n if okay:\n paths = dialog.getPaths ( )\n if paths:\n write_segmentation ( seg, paths[0] )\n if saved_cb:\n saved_cb(seg)\n\n if hasattr(seg, 'path'):\n import os.path\n idir, ifile = os.path.split(seg.path)\n else:\n idir = None\n ifile = seg.name\n\n from OpenSave import SaveModeless\n SaveModeless ( title = 'Save Segmentation %s' % seg.name,\n filters = [('Segmentation', '*.seg', '.seg')],\n initialdir = idir, initialfile = ifile, command = save )\n\n# -----------------------------------------------------------------------------\n#\ndef write_attributes(h5file, seg):\n\n aa = {}\n for r in seg.all_regions():\n for a,v in r.attributes().items():\n ta = (a, attribute_value_type(v))\n if ta in aa:\n aa[ta].append((r.rid, v))\n else:\n aa[ta] = [(r.rid, v)]\n\n if len(aa) == 0:\n return # HDF5 doesn't handle 0 length arrays.\n\n gnames = []\n from numpy import array, uint32, int32, float64\n for (a,t), vals in aa.items():\n gname = a.replace('/','_') + ' ' + t\n gnames.append(gname)\n g = h5file.createGroup(\"/\", gname, 'region attribute')\n g._v_attrs.attribute_name = a\n rid = array([i for i,v in vals], uint32)\n h5file.createArray(g, 'ids', rid, 'region id numbers')\n if t == 'int':\n va = array([v for i,v in vals], int32)\n elif t == 'float':\n va = array([v for i,v in vals], float64)\n elif t == 'string':\n va = [v for i,v in vals]\n elif t == 'image':\n va = [image_to_string(v) for i,v in vals]\n h5file.createArray(g, 'values', va, 'attribute values')\n if t == 'image':\n g._v_attrs.value_type = 'PNG image'\n\n h5file.createArray(h5file.root, 'attributes', gnames)\n\n\n# -----------------------------------------------------------------------------\n#\ndef read_attributes(h5file, seg):\n\n r = h5file.root\n if not hasattr(r, 'attributes'):\n return\n\n id2r = seg.id_to_region\n for gname in r.attributes:\n g = getattr(r, gname)\n a = g._v_attrs.attribute_name\n ids = g.ids\n values = g.values\n img = (hasattr(g._v_attrs, 'value_type') and\n g._v_attrs.value_type == 'PNG image')\n for id,v in zip(ids,values):\n if id in id2r:\n if img:\n v = string_to_image(v)\n id2r[id].set_attribute(a, v)\n\n# -----------------------------------------------------------------------------\n#\nimport numpy\nint_types = (int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\n numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64)\nfloat_types = (float, numpy.float32, numpy.float64)\ndef attribute_value_type(v):\n\n from PIL.Image import Image\n if isinstance(v, int_types):\n return 'int'\n elif isinstance(v, float_types):\n return 'float'\n elif isinstance(v, basestring):\n return 'string'\n elif isinstance(v, Image):\n return 'image'\n\n raise TypeError, \"Can't save value type %s\" % str(type(v))\n\n# -----------------------------------------------------------------------------\n#\ndef image_to_string(image):\n\n from cStringIO import StringIO\n s = StringIO()\n image.save(s, 'PNG')\n return s.getvalue()\n\n# -----------------------------------------------------------------------------\n#\ndef string_to_image(string):\n\n from cStringIO import StringIO\n f = StringIO(string)\n from PIL import Image\n i = Image.open(f)\n return i\n\n# -----------------------------------------------------------------------------\n#\ndef write_skeleton(h5file, mset):\n\n import StringIO\n s = StringIO.StringIO()\n mset.save_as_xml(s)\n import numpy\n skel = numpy.char.array(str(s.getvalue()), itemsize = 1)\n s.close()\n h5file.createArray(h5file.root, 'skeleton', skel)\n\n# -----------------------------------------------------------------------------\n#\ndef read_segmentation(path, open = True, task = None):\n\n import tables\n f = tables.openFile(path)\n\n try :\n\n r = f.root\n a = r._v_attrs\n for n in ('format', 'format_version', 'name'):\n if not n in a:\n raise ValueError, 'Segmentation file does not have \"%s\" attribute' % n\n if a.format != 'segger':\n raise ValueError, 'Segmentation file format is not \"segger\"'\n if a.format_version != 2:\n raise ValueError, 'Segmentation file format is not 2'\n\n import os.path\n fname = os.path.basename(path)\n\n from regions import Segmentation\n s = Segmentation(fname, open = open)\n\n if 'map_path' in a:\n s.map_path = a.map_path\n print \" - map path: \" + s.map_path\n if 'map_level' in a:\n s.map_level = a.map_level\n if 'map_name' in a:\n s.map_name = a.map_name\n print \" - map name: \" + s.map_name\n\n if 'ijk_to_xyz_transform' in a:\n s.ijk_to_xyz_transform = a.ijk_to_xyz_transform\n\n s.mask = r.mask.read()\n #print \"mask:\"pl\n #print s.mask\n rids = r.region_ids.read()\n #print \"rids:\"\n #print rids\n rcolors = r.region_colors.read()\n refpts = r.ref_points.read()\n slevels = r.smoothing_levels.read() if hasattr(r, 'smoothing_levels') else None\n pids = r.parent_ids.read()\n #print \"pids:\"\n #print pids\n\n create_regions(s, rids, rcolors, refpts, slevels, pids, task)\n\n print \" - created regions\"\n\n read_attributes(f, s)\n\n read_skeleton(f, s)\n\n read_patches (f, s)\n\n finally:\n\n f.close()\n\n s.path = path\n\n print \" - done reading seg file: \" + path\n return s\n\n\n# -----------------------------------------------------------------------------\n#\ndef create_regions(s, rids, rcolors, refpts, slevels, pids, task):\n\n if task:\n task.updateStatus('Making ID table')\n id_to_index = dict([(id,i) for i,id in enumerate(rids)])\n\n if task:\n task.updateStatus('Collecting child region IDs')\n id_to_child_ids = {}\n n = len(rids)\n for i in range(n):\n pid = pids[i]\n if pid > 0:\n if pid in id_to_child_ids:\n id_to_child_ids[pid].append(rids[i])\n else:\n id_to_child_ids[pid] = [rids[i]]\n\n if task:\n task.updateStatus('Ordering IDs')\n from regions import Region\n ids = depth_order(rids, id_to_child_ids, set())\n rlist = []\n for c,rid in enumerate(ids):\n if rid in id_to_child_ids:\n children = [s.id_to_region[cid] for cid in id_to_child_ids[rid]]\n else:\n children = []\n i = id_to_index[rid]\n r = Region(s, rid, refpts[i], children)\n # TODO: Get wrappy error setting surface piece color to numpy array.\n r.color = tuple(rcolors[i])\n if not slevels is None:\n r.smoothing_level = slevels[i]\n rlist.append(r)\n if task and c % 1000 == 0:\n task.updateStatus('Created %d of %d regions' % (c,n))\n\n if not slevels is None:\n s.smoothing_level = max(slevels)\n\n return rlist\n\n# -----------------------------------------------------------------------------\n#\ndef depth_order(rids, id_to_child_ids, used):\n\n idlist = []\n for rid in rids:\n if not rid in used:\n used.add(rid)\n if rid in id_to_child_ids:\n cids = id_to_child_ids[rid]\n idlist.extend(depth_order(cids, id_to_child_ids, used))\n idlist.append(rid)\n return idlist\n\n# -----------------------------------------------------------------------------\n#\ndef show_open_dialog(dir, callback):\n\n def open ( okay, dialog ):\n if okay:\n paths_types = dialog.getPathsAndTypes ( )\n if paths_types:\n callback ( paths_types )\n\n from OpenSave import OpenModeless\n OpenModeless ( title = 'Open Segmentation',\n initialdir = dir,\n filters = [('Segmentation', ['*.seg']),\n ('Old regions file', ['*_regions'])],\n defaultFilter = 'Segmentation',\n command = open )\n\n# -----------------------------------------------------------------------------\n#\ndef read_skeleton(f, s):\n\n a = f.root\n if not 'skeleton' in a :\n return\n\n sks = a.skeleton.read().tostring()\n import StringIO\n xml = StringIO.StringIO(sks)\n from VolumePath import markerset\n marker_sets = markerset.load_marker_set_xml(xml, model_id = s.id)\n skel = marker_sets[0]\n skel.show_model(True)\n\n # Map markers to regions\n id2r = dict([(r.rid, r) for r in s.all_regions()])\n for m in skel.markers():\n rid = int(m.extra_attributes['region_id'])\n m.region = id2r.get(rid, None)\n if m.region is None:\n print 'missing skeleton region', rid\n\n s.adj_graph = skel\n\n\n# -----------------------------------------------------------------------------\n#\ndef read_patches(f, s):\n\n a = f.root\n if not 'patches' in a :\n return\n\n print \" - reading patches:\"\n\n patches = list( a.patches )\n print patches\n\n import chimera\n import Mesh\n reload ( Mesh )\n mesh = None\n\n for rg in patches:\n\n rgPath = rg._v_pathname\n print \" - path: \", rgPath\n rid = rgPath [ len(\"/patches/\"): ]\n print \" - patch for region \", rid, \" - type: \", rg._v_attrs[\"type\"]\n\n if not 'verts' in rg or not 'tris' in rg :\n print \" - tris or verts not found\"\n continue\n\n #print \" - region ids:\"\n #print s.id_to_region\n\n try :\n reg = s.id_to_region[int(rid)]\n except :\n print \" - did not find region for id\"\n continue\n\n\n verts = rg.verts.read()\n tris = rg.tris.read()\n print \" - %d verts, %d tris\" % (len(verts), len(tris))\n #print verts\n #print tris\n\n if mesh == None :\n mesh = Mesh.MeshFromVertsTris (verts, tris, color=reg.color, m=mesh)\n mesh.name = \"patches\"\n chimera.openModels.add([mesh])\n else :\n mesh = Mesh.MeshFromVertsTris (verts, tris, color=reg.color, m=mesh)\n", "id": "7076320", "language": "Python", "matching_score": 3.1405227184295654, "max_stars_count": 6, "path": "Segger/segfile.py" }, { "content": "\ndef create_graph ( smod, links ) :\n\n print \"\\nCreating graph for %s - %s\" % (smod.name, links)\n\n if hasattr(smod, 'adj_graph') and smod.adj_graph :\n smod.adj_graph.close()\n smod.adj_graph = None\n\n from VolumePath import Marker_Set, Marker, Link\n\n gname = smod.name + \" graph(%s)\" % links\n g = Marker_Set ( gname )\n smod.adj_graph = g\n smod.graph_links = links\n aMap = dict()\n\n regions = smod.regions\n marker_radius = 0.1 * sum([r.enclosed_volume() ** (1./3) for r in regions]) / len(regions)\n# ijk_to_xyz_transform = smod.point_transform()\n from Matrix import apply_matrix\n for reg in regions :\n# xyz = apply_matrix(ijk_to_xyz_transform, reg.max_point)\n c = reg.center_of_points()\n m = Marker(g, reg.rid, c, reg.color, marker_radius)\n aMap[reg] = m\n m.region = reg\n m.extra_attributes = { 'region_id' : str(reg.rid),\n 'region_size': str(reg.point_count()) }\n\n link_color = ( .5, .5, .5, 1 )\n link_radius = 0.5 * marker_radius\n\n from regions import group_contacts\n cons = group_contacts(smod.region_contacts())\n\n Ns, min_N, max_N = [], None, None\n avgds, min_avgd, max_avgd = [], None, None\n maxds, min_maxd, max_maxd = [], None, None\n\n # first run through contacts to list contacts and properties\n if links == \"avgd\" or links == \"maxd\" or links == \"N\" :\n for r1 in cons.keys() :\n for r2 in cons[r1].keys() :\n if r2 > r1 :\n\n con = cons[r1][r2]\n\n #print \"link %d -> %d -- N:%.1f, \" % (r1.rid, r2.rid, con.N),\n\n if con.N < 0.1 :\n #print \"*hmm*\"\n continue\n else :\n Ns.append ( con.N )\n\n if con.D :\n #print \"D:%.3f, \" % con.D,\n avgd = float(con.D) / (2.0 * float(con.N))\n avgds.append ( avgd )\n #else : print \"D:*\",\n\n if con.maximum_density :\n #print \"MaxD:%.3f, \" % con.maximum_density\n maxds.append ( con.maximum_density )\n #else : print \"MaxD:*\"\n\n\n min_N, max_N = min(Ns), max(Ns)\n #min_maxd, max_maxd = min(maxds), max(maxds)\n #min_avgd, max_avgd = min(avgds), max(avgds)\n\n #print \"Avg densities: %.5f -> %.5f\" % (min_avgd, max_avgd)\n print \"N: %.1f -> %.1f\" % (min_N, max_N)\n #print \"Maximum densities %.5f -> %.5f\" % (min_maxd, max_maxd)\n\n\n min_rad = marker_radius * 0.1\n max_rad = marker_radius * 0.75 - min_rad\n\n for r1 in cons.keys() :\n for r2 in cons[r1].keys() :\n if r2 > r1 :\n\n con = cons[r1][r2]\n\n if links == \"maxd\" and con.maximum_density :\n # radius proportional to max density at boundary\n if con.N > 0.1 :\n maxd = con.maximum_density\n link_radius_var = min_rad + max_rad * (maxd - min_maxd)/(max_maxd-min_maxd)\n Link ( aMap[r1], aMap[r2], link_color, link_radius_var )\n\n elif links == \"N\" and con.N:\n # radius of link proportional to area of contact\n # - where area of contact ~ #voxels between regions\n con = cons[r1][r2]\n link_radius_var = min_rad + max_rad * (con.N - min_N)/(max_N-min_N)\n Link ( aMap[r1], aMap[r2], link_color, link_radius_var )\n\n elif links == \"avgd\" and con.D:\n # radius of link proportional to average density\n # - at boundary\n if con.N > 0.1 :\n avgd = float(con.D) / (2.0 * float(con.N))\n link_radius_var = min_rad + max_rad * (avgd - min_avgd)/(max_avgd-min_avgd)\n Link ( aMap[r1], aMap[r2], link_color, link_radius_var )\n else :\n # same link radius for all links\n Link ( aMap[r1], aMap[r2], link_color, link_radius )\n\n\n g.show_model ( True )\n smod.display = True\n smod.regions_scale = 0.5\n smod.display_regions ('Voxel_Surfaces', None, None, True)\n\n\ndef break_selected_links():\n\n from VolumePath import markerset\n for l in markerset.selected_links():\n l.delete()\n\ndef link_selected():\n\n from VolumePath import selected_markers, Link\n msel = selected_markers()\n if len(msel) == 2:\n m0, m1 = msel\n if m0.marker_set == m1.marker_set:\n link_color = ( .5, .5, .5, 1)\n link_radius = 0.5 * m0.radius\n Link(m0, m1, link_color, link_radius)\n\ndef open_skeleton ( smod ) :\n\n if smod.adj_graph and hasattr(smod.adj_graph, 'path'):\n import os.path\n initdir, initfile = os.path.split(smod.adj_graph.path)\n else:\n initdir, initfile = smod.path, smod.name + ' skeleton'\n def open(o, d, smod = smod):\n open_skeleton_file(o, d, smod)\n import OpenSave\n d = OpenSave.OpenModeless(title = 'Open skeleton',\n initialdir = initdir,\n initialfile = initfile,\n filters = [('Chimera Markers', '*.cmm', '')],\n multiple = False,\n command = open)\n\ndef open_skeleton_file ( open, dialog, smod ) :\n\n if not open:\n return\n\n paths = dialog.getPaths()\n path = paths[0]\n import VolumePath\n g = VolumePath.open_marker_set(path)\n\n mlist = g.markers()\n for m in mlist:\n m.region = None\n rid = int(m.extra_attributes['region_id'])\n if rid in smod.id_to_region:\n r = smod.id_to_region[rid]\n rsize = int(m.extra_attributes['region_size'])\n if r.point_count() == rsize:\n m.region = r\n\n#TODO: The regions file format renumbers the regions consecutively. So\n# matching based on region id won't work. Should change file format to\n# hdf5 and include region ids, colors, and any other useful per-region info.\n\n nomatch = [m for m in mlist if m.region is None]\n if nomatch:\n umsg('%d of %d skeleton nodes did not match a region' %\n (len(nomatch), len(mlist)))\n\n if smod.adj_graph:\n smod.adj_graph.close()\n smod.adj_graph = g\n\n\ndef save_skeleton ( smod ) :\n\n if smod.adj_graph is None:\n umsg('No skeleton for %s' % smod.name)\n return\n\n if hasattr(smod.adj_graph, 'path'):\n import os.path\n initdir, initfile = os.path.split(smod.adj_graph.path)\n else:\n initdir, initfile = smod.path, smod.name + ' skeleton'\n import OpenSave\n d = OpenSave.SaveModal(title = 'Save skeleton',\n initialdir = initdir,\n initialfile = initfile,\n filters = [('Chimera Markers', '*.cmm', '')])\n from chimera.tkgui import app\n paths_and_types = d.run(app)\n if paths_and_types:\n path = paths_and_types[0][0]\n out = open(path, 'w')\n from VolumePath import markerset\n markerset.save_marker_sets([smod.adj_graph], out)\n out.close()\n\n\ndef close ( smod ) :\n\n g = smod.adj_graph\n if g:\n g.close()\n smod.adj_graph = None\n smod.regions_scale = 1.0\n smod.display_regions ('Voxel_Surfaces', None, None, True)\n\n\ndef group_by_skeleton ( smod ) :\n\n g = smod.adj_graph\n if g is None:\n return\n\n # Find connected groups of markers.\n msets = connected_markers(g)\n\n # Find connected regions.\n rgroups = [[m.region for m in mset] for mset in msets]\n\n # Exclude region groups that are already grouped correctly.\n rgroups = [rgroup for rgroup in rgroups if not is_region_group(rgroup)]\n\n # Find current most common color for each group.\n colors = [most_common_region_color(rgroup) for rgroup in rgroups]\n\n # Make split regions have different colors.\n csize = {}\n for rg, c in zip(rgroups, colors):\n if c in csize:\n csize[c] = max(csize[c], len(rg))\n else:\n csize[c] = len(rg)\n from regions import random_color\n for i, c in enumerate(colors):\n if len(rgroups[i]) < csize[c]:\n colors[i] = random_color()\n\n # Ungroup regions that need regrouping\n remove_parents(concatenate(rgroups), smod)\n\n # Group connected associated regions\n for rgroup, c in zip(rgroups, colors):\n r = smod.join_regions ( rgroup )\n r.color = c\n\n\ndef remove_parents(regions, smod):\n\n while True:\n from regions import TopParentRegions\n parents = TopParentRegions ( [r for r in regions if r.preg] )\n if parents:\n smod.ungroup_regions ( parents )\n else:\n break\n\ndef is_region_group(regions):\n\n r0 = regions[0]\n if r0.preg is None:\n return False\n p = r0.preg\n if p.preg:\n return False\n return same_list_elements(p.cregs, regions)\n\ndef most_common_region_color(regions):\n\n ct = {}\n for r in regions:\n c = tuple(r.top_parent().color)\n if c in ct:\n ct[c] += 1\n else:\n ct[c] = 1\n count, color = max([(count, c) for c, count in ct.items()])\n return color\n\ndef connected_markers(mset):\n\n cm = {}\n for l in mset.links():\n m1, m2 = l.marker1, l.marker2\n if m1.region is None or m2.region is None:\n continue\n cm1 = cm.setdefault(m1, set([m1]))\n cm2 = cm.setdefault(m2, set([m2]))\n if not cm2 is cm1:\n cm1.update(cm2)\n for m in cm2:\n cm[m] = cm1\n msets = dict([(id(ms), ms) for ms in cm.values()]).values()\n\n # Add lone markers.\n for m in mset.markers():\n if m.region and len(m.links()) == 0:\n msets.append(set([m]))\n\n return msets\n\ndef concatenate(lists):\n\n c = []\n for e in lists:\n c.extend(e)\n return c\n\ndef same_list_elements(list1, list2):\n\n if len(list1) != len(list2):\n return False\n set2 = set(list2)\n for e in list1:\n if not e in set2:\n return False\n return True\n", "id": "2268881", "language": "Python", "matching_score": 0.5339290499687195, "max_stars_count": 6, "path": "Segger/graph.py" }, { "content": "dev_menus = False # Include under-development menus.\nshowDevTools = False # some new also under-development tools\ntiming = False # Report execution times for optimizing code.\nseggerVersion = '2.8.2'\nmapqVersion = '1.8.2'\n\nfrom regions import Segmentation, Region, SelectedRegions\n", "id": "184255", "language": "Python", "matching_score": 3.4924416542053223, "max_stars_count": 6, "path": "Segger/__init__.py" }, { "content": "showDevTools = False # some new also under-development tools\nmapqVersion = '1.8.1'\n", "id": "6018499", "language": "Python", "matching_score": 0.14660313725471497, "max_stars_count": 6, "path": "mapq/__init__.py" }, { "content": "from chimera.extension import EMO, manager\n\n# -----------------------------------------------------------------------------\n#\nclass Segger_EMO ( EMO ):\n\n def name(self):\n return 'Segger'\n def description(self):\n return self.categoryDescriptions()['Volume Data']\n def categories(self):\n return self.categoryDescriptions().keys()\n def categoryDescriptions(self):\n # since we want to use specialized descriptions for certain categories...\n return {\n 'Volume Data': 'Segment volume data to identify components',\n }\n def icon(self):\n return self.path('volseg.png')\n def activate(self):\n # self.module('volumedialog').show_volume_dialog()\n d = self.module('segment_dialog').show_volume_segmentation_dialog()\n return None\n\n# -----------------------------------------------------------------------------\n#\nclass SegFit_EMO ( EMO ):\n\n def name(self):\n return 'SegFit'\n def description(self):\n return self.categoryDescriptions()['Volume Data']\n def categories(self):\n return self.categoryDescriptions().keys()\n def categoryDescriptions(self):\n # since we want to use specialized descriptions for certain categories...\n return {\n 'Volume Data': 'Fit structures into segmented regions',\n }\n def icon(self):\n return self.path('fitseg.png')\n def activate(self):\n # self.module('volumedialog').show_volume_dialog()\n d = self.module('fit_dialog').show_fit_segments_dialog()\n return None\n\n# -----------------------------------------------------------------------------\n#\nclass MapQ_EMO ( EMO ):\n\n def name(self):\n return 'MapQ from Segger'\n def description(self):\n return self.categoryDescriptions()['Volume Data']\n def categories(self):\n return self.categoryDescriptions().keys()\n def categoryDescriptions(self):\n # since we want to use specialized descriptions for certain categories...\n return {\n 'Volume Data': 'Evaluate map & model',\n }\n def icon(self):\n return self.path('mapq.png')\n def activate(self):\n # self.module('volumedialog').show_volume_dialog()\n d = self.module('mapq').show_dialog()\n return None\n\n\n# -----------------------------------------------------------------------------\n# Register dialogs and menu entry.\n#\nmanager.registerExtension ( SegFit_EMO ( __file__ ) )\nmanager.registerExtension ( Segger_EMO ( __file__ ) )\n#manager.registerExtension ( MapQ_EMO ( __file__ ) )\n\n# -----------------------------------------------------------------------------\n# Register segmentation file reader.\n#\ndef open_seg(path):\n from Segger import segment_dialog as sd\n d = sd.show_volume_segmentation_dialog()\n d.OpenSegFiles([(path, 'Segmentation')])\n return []\n\nimport chimera\nfi = chimera.fileInfo\nfi.register('Segger segmentation', open_seg, ['.seg'], ['segger'],\n category = fi.GENERIC3D)\n\n# -----------------------------------------------------------------------------\n# Register segment command.\n#\ndef segment(cmdname, args):\n from Segger import segcmd\n segcmd.segment_command(cmdname, args)\n\nimport Midas.midas_text\nMidas.midas_text.addCommand('segment', segment, None, help = True)\n", "id": "12231243", "language": "Python", "matching_score": 4.605138778686523, "max_stars_count": 6, "path": "Segger/ChimeraExtension.py" }, { "content": "from chimera.extension import EMO, manager\n\n# -----------------------------------------------------------------------------\n#\nclass MapQ_Dialog_EMO ( EMO ):\n\n def name(self):\n return 'MapQ'\n def description(self):\n return self.categoryDescriptions()['Volume Data']\n def categories(self):\n return self.categoryDescriptions().keys()\n def categoryDescriptions(self):\n # since we want to use specialized descriptions for certain categories...\n return {\n 'Volume Data': 'Evaluate map & model',\n }\n def icon(self):\n return self.path('mapq.png')\n def activate(self):\n # self.module('volumedialog').show_volume_dialog()\n d = self.module('mapq').show_dialog()\n return None\n\n# -----------------------------------------------------------------------------\n# Register dialogs and menu entry.\n#\nmanager.registerExtension ( MapQ_Dialog_EMO ( __file__ ) )\n", "id": "12814466", "language": "Python", "matching_score": 3.5089454650878906, "max_stars_count": 6, "path": "mapq/ChimeraExtension.py" }, { "content": "from chimera.extension import EMO, manager\n\n# -----------------------------------------------------------------------------\n#\nclass BioMovie_EMO ( EMO ):\n\n def name(self):\n return 'BioMovie'\n def description(self):\n return self.categoryDescriptions()['Utilities']\n def categories(self):\n return self.categoryDescriptions().keys()\n def categoryDescriptions(self):\n # since we want to use specialized descriptions for certain categories...\n return {\n 'Utilities': 'Run movie script',\n }\n def icon(self):\n return None #self.path('volseg.png')\n def activate(self):\n # self.module('volumedialog').show_volume_dialog()\n d = self.module('biomovie').show_dialog()\n return None\n\n# -----------------------------------------------------------------------------\n# Register dialogs and menu entry.\n#\nmanager.registerExtension ( BioMovie_EMO ( __file__ ) )\n", "id": "8082323", "language": "Python", "matching_score": 3.899113655090332, "max_stars_count": 1, "path": "biomovie/ChimeraExtension.py" } ]
3.508945
UTNkar
[ { "content": "from django.apps import AppConfig\n\n\nclass Tailwind_themeConfig(AppConfig):\n name = 'tailwind_theme'\n", "id": "11804899", "language": "Python", "matching_score": 0.14917898178100586, "max_stars_count": 0, "path": "src/tailwind_theme/apps.py" }, { "content": "# Generated by Django 3.2.2 on 2021-05-11 11:26\n\nfrom django.db import migrations\n\n\ndef add_default_group(apps, schema_editor):\n Group = apps.get_model('event_calendar', 'Group')\n Group.objects.create(\n name_sv='UTN',\n name_en='UTN'\n )\n\n\ndef remove_default_group(apps, schema_editor):\n Group = apps.get_model('event_calendar', 'Group')\n Group.objects.filter(name_sv='UTN').delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('event_calendar', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(\n add_default_group,\n reverse_code=remove_default_group\n )\n ]\n", "id": "2953066", "language": "Python", "matching_score": 0.5054783225059509, "max_stars_count": 0, "path": "src/event_calendar/migrations/0002_add_default_group.py" }, { "content": "from django.views.generic import ListView\nfrom event_calendar.models import Event\n\n\nclass EventListView(ListView):\n \"\"\"The view that lists all events.\"\"\"\n\n model = Event\n template_name = \"event_list.html\"\n", "id": "4183348", "language": "Python", "matching_score": 0.4251128137111664, "max_stars_count": 0, "path": "src/event_calendar/views.py" }, { "content": "from .base import *\nfrom decouple import config\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('DJANGO_SECRET')\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': config('DJANGO_DB_NAME', default='event_calendar'),\n 'USER': config('DJANGO_DB_USER', default='event_calendar'),\n 'PASSWORD': config('DJANGO_DB_PASS', default=''),\n 'HOST': config('DJANGO_DB_HOST', default='127.0.0.1'),\n 'PORT': config('DJANGO_DB_PORT', default='5432'),\n }\n}\n\nALLOWED_HOSTS = ['.utn.se']\n\nCSRF_COOKIE_SECURE = True\n\nSESSION_COOKIE_DOMAIN = '.utn.se'\n\nSESSION_COOKIE_SECURE = True\n", "id": "1435884", "language": "Python", "matching_score": 3.499642848968506, "max_stars_count": 0, "path": "src/event_calendar/settings/production.py" }, { "content": "from .base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'django-insecure-+gw(r$=3=@js0!6qd^611*c4wpbfwsd&q8b=!=<KEY>'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'dev.sqlite3'),\n }\n }", "id": "8735473", "language": "Python", "matching_score": 1.0723676681518555, "max_stars_count": 0, "path": "src/event_calendar/settings/dev.py" }, { "content": "\"\"\"\nDjango settings for event_calendar project.\n\nGenerated by 'django-admin startproject' using Django 3.1.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nfrom pathlib import Path\nfrom decouple import config\nimport os\nfrom django.utils.translation import gettext_lazy as _\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'event_calendar',\n 'tailwind',\n 'tailwind_theme'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'event_calendar.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'event_calendar.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en'\n\nLANGUAGES = [\n ('sv', _('Swedish')),\n ('en', _('English')),\n]\n\nTIME_ZONE = 'Europe/Stockholm'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\n\nMEDIA_URL = '/media/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nAUTH_USER_MODEL = \"event_calendar.User\"\n\nTAILWIND_APP_NAME = 'tailwind_theme'\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n", "id": "9810359", "language": "Python", "matching_score": 2.7462220191955566, "max_stars_count": 0, "path": "src/event_calendar/settings/base.py" }, { "content": "from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.forms import ReadOnlyPasswordHashField\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom event_calendar.models import User\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass UserCreationForm(forms.ModelForm):\n \"\"\"A form for creating administrators in the admin pages.\"\"\"\n\n password1 = forms.CharField(label='Password', widget=forms.PasswordInput)\n password2 = forms.CharField(\n label='Password confirmation',\n widget=forms.PasswordInput\n )\n\n class Meta:\n model = User\n fields = ('email', 'group', 'is_superuser')\n\n def clean_password2(self):\n \"\"\"Check that the two passwords are the same.\"\"\"\n # Check that the two password entries match\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise ValidationError(_(\"Passwords don't match\"))\n return password2\n\n def save(self, commit=True): # noqa\n user = super().save(commit=False)\n user.set_password(self.cleaned_data[\"<PASSWORD>\"])\n user.save(commit=commit)\n return user\n\n\nclass UserChangeForm(forms.ModelForm):\n \"\"\"\n A form for updating users.\n\n Includes all the fields on the user, but replaces the password field with\n admin's password hash display field.\n \"\"\"\n\n password = ReadOnlyPasswordHashField(\n help_text=_(\n \"<a href=\\\"../password/\\\">Change the user's password here</a>.\"\n )\n )\n\n class Meta:\n model = User\n fields = (\n 'email', 'password', 'group', 'is_superuser'\n )\n\n def clean_password(self): # noqa\n # Regardless of what the user provides, return the initial value.\n # This is done here, rather than on the field, because the\n # field does not have access to the initial value\n return self.initial[\"password\"]\n\n\nclass UserAdmin(BaseUserAdmin):\n \"\"\"A Custom UserAdmin class for our Admin model.\"\"\"\n\n # The forms to add and change user instances\n form = UserChangeForm\n add_form = UserCreationForm\n\n # The fields to be used in displaying the User model.\n # These override the definitions on the base UserAdmin\n # that reference specific fields on auth.User.\n list_display = ('email', 'group', 'is_superuser')\n list_filter = ()\n fieldsets = (\n (None, {'fields': ('email', 'password', 'group', 'is_superuser')}),\n )\n # add_fieldsets is not a standard ModelAdmin attribute. UserAdmin\n # overrides get_fieldsets to use this attribute when creating a user.\n add_fieldsets = (\n (None, {\n 'fields': (\n 'email', 'group', 'password1', 'password2', 'is_superuser'\n ),\n }),\n )\n search_fields = ('email', 'group__name_sv', 'group__name_en')\n ordering = ('group', 'email',)\n filter_horizontal = ()\n", "id": "3549359", "language": "Python", "matching_score": 2.0628507137298584, "max_stars_count": 0, "path": "src/event_calendar/admin/user.py" }, { "content": "# Generated by Django 3.2.2 on 2021-05-12 13:41\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name_sv', models.CharField(max_length=128, verbose_name='Swedish name')),\n ('name_en', models.CharField(max_length=128, verbose_name='English name')),\n ],\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title_sv', models.CharField(max_length=256, verbose_name='Swedish event title')),\n ('title_en', models.CharField(max_length=256, verbose_name='English event title')),\n ('cover_photo', models.ImageField(upload_to='')),\n ('description_en', models.TextField(verbose_name='English event description')),\n ('description_sv', models.TextField(verbose_name='Swedish event description')),\n ('created', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),\n ('modified', models.DateTimeField(auto_now=True, verbose_name='Last modified')),\n ('date_start', models.DateTimeField(help_text='Date and time when the event will start.', verbose_name='Event start date')),\n ('date_end', models.DateTimeField(blank=True, help_text='Leave empty if the event has no specific end time.', null=True, verbose_name='Event end date')),\n ('published', models.BooleanField(default=False)),\n ('membership_required', models.BooleanField(help_text='If the event requires a section or UTN membership.', verbose_name='Membership required')),\n ('contact', models.CharField(max_length=512)),\n ('location', models.CharField(blank=True, max_length=256)),\n ('price', models.CharField(blank=True, max_length=256)),\n ('link', models.URLField(blank=True, help_text='If your event has an important URL, such as a Zoom-link or Facebook page, enter it here.', verbose_name='URL')),\n ('categories', models.ManyToManyField(related_name='categories', to='event_calendar.Category', verbose_name='Event categories')),\n ],\n ),\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name_en', models.CharField(max_length=128, verbose_name='English name')),\n ('name_sv', models.CharField(max_length=128, verbose_name='Swedish name')),\n ('description_en', models.TextField(blank=True, verbose_name='English description')),\n ('description_sv', models.TextField(blank=True, verbose_name='Swedish description')),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('content', models.TextField(verbose_name='Post contents')),\n ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Time of posting')),\n ('has_been_edited', models.BooleanField(default=False, verbose_name='Has been edited')),\n ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event_calendar.group', verbose_name='Post creator')),\n ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='event_calendar.event', verbose_name='Related event')),\n ],\n ),\n migrations.CreateModel(\n name='EventCoHost',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('status', models.CharField(choices=[('invited', 'Invited'), ('accepted', 'Accepted')], default='invited', max_length=32)),\n ('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event_calendar.event')),\n ('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event_calendar.group')),\n ],\n ),\n migrations.AddField(\n model_name='event',\n name='cohosts',\n field=models.ManyToManyField(related_name='cohosted_events', through='event_calendar.EventCoHost', to='event_calendar.Group', verbose_name='Event co-hosts'),\n ),\n migrations.AddField(\n model_name='event',\n name='host',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event_calendar.group', verbose_name='Event host'),\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('password', models.CharField(max_length=128, verbose_name='password')),\n ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),\n ('email', models.EmailField(max_length=254, unique=True)),\n ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),\n ('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='event_calendar.group')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n", "id": "3673124", "language": "Python", "matching_score": 6.90122652053833, "max_stars_count": 0, "path": "src/event_calendar/migrations/0001_initial.py" }, { "content": "from django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager\n\n\nclass UserManager(BaseUserManager):\n \"\"\"The user manager for custom user model.\"\"\"\n\n def create_user(self, email, password, group):\n \"\"\"Create a new user.\"\"\"\n group_instance = Group.objects.get(pk=group)\n user = self.model(\n email=email,\n group=group_instance\n )\n\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, password, group):\n \"\"\"Create a user and make it a superuser.\"\"\"\n user = self.create_user(email, password, group)\n\n user.is_superuser = True\n user.save(using=self._db)\n return user\n\n\nclass User(AbstractBaseUser):\n \"\"\"The user.\"\"\"\n\n email = models.EmailField(unique=True)\n\n group = models.ForeignKey(\n 'Group',\n on_delete=models.CASCADE\n )\n\n is_superuser = models.BooleanField(\n _('superuser status'),\n default=False,\n help_text=_(\n 'Designates that this user has all permissions without '\n 'explicitly assigning them.'\n ),\n )\n\n objects = UserManager()\n\n is_active = True\n is_staff = True\n\n USERNAME_FIELD = 'email'\n EMAIL_FIELD = 'email'\n REQUIRED_FIELDS = ['group']\n\n def has_perm(self, perm, obj=None): # noqa\n return True\n\n def has_module_perms(self, app_label): # noqa\n return True\n\n\nclass Category(models.Model):\n \"\"\"The categories an event can have.\"\"\"\n\n name_sv = models.CharField(\n max_length=128,\n verbose_name=_(\"Swedish name\")\n )\n name_en = models.CharField(\n max_length=128,\n verbose_name=_(\"English name\")\n )\n\n def __str__(self): # noqa\n return self.name_en\n\n\nclass Post(models.Model):\n \"\"\"\n Text posts where the event hosts can write updates.\n\n Text posts written by the event hosts that will be displayed on the\n event main page. Can contain information, updates, release links etcetera.\n If has_been_edited is true, the post will be displayed as 'edited'.\n \"\"\"\n\n event = models.ForeignKey(\n 'Event',\n on_delete=models.CASCADE,\n verbose_name=_(\"Related event\"),\n related_name='posts'\n )\n content = models.TextField(\n verbose_name=_(\"Post contents\"),\n )\n created_by = models.ForeignKey(\n 'Group',\n on_delete=models.CASCADE,\n verbose_name=_(\"Post creator\"),\n )\n created_at = models.DateTimeField(\n auto_now_add=True,\n verbose_name=_(\"Time of posting\"),\n )\n has_been_edited = models.BooleanField(\n default=False,\n verbose_name=_(\"Has been edited\"),\n )\n\n def __str__(self): # noqa\n return \"In {} posted by {} at {}.\".format(\n self.event,\n self.created_by,\n self.created_at\n )\n\n def save(self, *args, **kwargs):\n \"\"\"If post has been edited, update has_been_edited.\"\"\"\n if self.id:\n self.has_been_edited = True\n return super(Post, self).save(*args, **kwargs)\n\n\nclass Group(models.Model):\n \"\"\"The group that events belong to, e.g. Forskå or E-sektionen.\"\"\"\n\n name_en = models.CharField(\n max_length=128,\n verbose_name=_(\"English name\"),\n )\n name_sv = models.CharField(\n max_length=128,\n verbose_name=_(\"Swedish name\"),\n )\n description_en = models.TextField(\n verbose_name=_(\"English description\"),\n blank=True\n )\n description_sv = models.TextField(\n verbose_name=_(\"Swedish description\"),\n blank=True\n )\n\n def __str__(self): # noqa\n return self.name_en\n\n\nclass EventCoHost(models.Model):\n \"\"\"A co host to an event.\"\"\"\n\n event = models.ForeignKey(\n 'Event',\n on_delete=models.CASCADE\n )\n\n group = models.ForeignKey(\n 'Group',\n on_delete=models.CASCADE\n )\n\n status = models.CharField(\n max_length=32,\n choices=(\n (\"invited\", _(\"Invited\")),\n (\"accepted\", _(\"Accepted\"))\n ),\n default=\"invited\"\n )\n\n\nclass Event(models.Model):\n \"\"\"The event model itself.\"\"\"\n\n title_sv = models.CharField(\n max_length=256,\n verbose_name=_(\"Swedish event title\"),\n )\n title_en = models.CharField(\n max_length=256,\n verbose_name=_(\"English event title\")\n )\n host = models.ForeignKey(\n 'Group',\n on_delete=models.CASCADE,\n verbose_name=_(\"Event host\"),\n )\n cohosts = models.ManyToManyField(\n 'Group',\n verbose_name=_(\"Event co-hosts\"),\n related_name=\"cohosted_events\",\n through=\"EventCoHost\"\n )\n categories = models.ManyToManyField(\n 'Category',\n verbose_name=_(\"Event categories\"),\n related_name=\"categories\"\n )\n # TODO: Remove image from system automatically when event is deleted\n cover_photo = models.ImageField()\n description_en = models.TextField(\n verbose_name=_(\"English event description\"),\n )\n description_sv = models.TextField(\n verbose_name=_(\"Swedish event description\"),\n )\n created = models.DateTimeField(\n auto_now_add=True,\n verbose_name=_(\"Created at\"),\n )\n modified = models.DateTimeField(\n auto_now=True,\n verbose_name=_(\"Last modified\"),\n )\n date_start = models.DateTimeField(\n verbose_name=_(\"Event start date\"),\n help_text=_(\"Date and time when the event will start.\"),\n )\n date_end = models.DateTimeField(\n blank=True,\n null=True,\n verbose_name=_(\"Event end date\"),\n help_text=_(\"Leave empty if the event has no specific end time.\")\n )\n published = models.BooleanField(\n default=False\n )\n membership_required = models.BooleanField(\n verbose_name=_(\"Membership required\"),\n help_text=_(\"If the event requires a section or UTN membership.\"),\n )\n contact = models.CharField(\n max_length=512\n )\n location = models.CharField(\n max_length=256,\n blank=True\n )\n price = models.CharField(\n max_length=256,\n blank=True\n )\n link = models.URLField(\n blank=True,\n verbose_name=_(\"URL\"),\n help_text=_(\n \"If your event has an important URL, \"\n \"such as a Zoom-link or Facebook page, enter it here.\"\n ),\n )\n\n def __str__(self): # noqa\n return self.title_en + \" by \" + self.host.name_en\n", "id": "6320312", "language": "Python", "matching_score": 0.8509933948516846, "max_stars_count": 0, "path": "src/event_calendar/models.py" }, { "content": "from django.contrib import admin\nfrom django.contrib.auth.models import Group as DjangoGroup\nfrom .user import UserAdmin\nfrom ..models import (\n User, Event, Group, EventCoHost, Post, Category\n)\nfrom .event import EventAdmin\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Group, admin.ModelAdmin)\nadmin.site.register(Post, admin.ModelAdmin)\nadmin.site.register(Category, admin.ModelAdmin)\nadmin.site.register(EventCoHost, admin.ModelAdmin)\n\nadmin.site.unregister(DjangoGroup)\n", "id": "6149032", "language": "Python", "matching_score": 2.506154775619507, "max_stars_count": 0, "path": "src/event_calendar/admin/__init__.py" }, { "content": "from django.contrib import admin\nfrom event_calendar.models import EventCoHost\n\n\nclass CoHostInline(admin.TabularInline): # noqa\n model = EventCoHost\n extra = 1\n\n\nclass EventAdmin(admin.ModelAdmin): # noqa\n inlines = (CoHostInline,)\n", "id": "5813558", "language": "Python", "matching_score": 1.903469443321228, "max_stars_count": 0, "path": "src/event_calendar/admin/event.py" } ]
1.903469
tobiagru
[ { "content": "#import core\n\n#import public\nfrom flask import render_template, request, Flask, redirect, url_for\n\n#import privat\nfrom . import main\nimport src\n\n#Session\n\[email protected]('/')\ndef index():\n return render_template('main/quizapp.html')\n\[email protected]('/terms')\ndef terms():\n return render_template('main/tos.html')\n\[email protected]('/privacy')\ndef tos():\n return render_template('main/privacy.html')\n\n#returns: questions\[email protected]('/getquestion', methods=['GET', 'POST'])\ndef getquestion():\n\tif request.method == 'POST':\n\t\treturn src.build_questions(\n\t\t\tlanguage = request.get_json()['locale'][-2:]\n\t\t)\n\tif request.method == 'GET':\n\t\treturn src.build_questions()\n\n#recieve: answers, asnwer_meta\n#return: usertype\[email protected]('/postanswer', methods=['GET', 'POST'])\ndef postanswer():\n\tsrc.save_answers(request.get_json(force=True))\n\treturn src.getusertype()\n\n#\tif request.is_json:\n#\t \tsrc.save_answers(request.get_json(force=True))\n#\t \treturn src.getusertype()\n#\telse:\n#\t\ttry:\n#\t\t\tsrc.save_answers(json.loads(request.get_data()))\n#\t\texcept:\n#\t\t\tprint(\"cannot handle this type of data\")\n#\t\t\tprint(request.get_data())\n#\t \treturn src.getusertype()\n\n#return: ...\[email protected]('/analytics')\ndef analytics():\n\treturn 200\n", "id": "6890277", "language": "Python", "matching_score": 1.2483603954315186, "max_stars_count": 0, "path": "app/main/views.py" }, { "content": "from flask import url_for\nimport os\n\ndef register_template_utils(app):\n \"\"\"Register Jinja 2 helpers (called from __init__.py).\"\"\"\n\n @app.template_test()\n def equalto(value, other):\n return value == other\n\n @app.template_global()\n def is_hidden_field(field):\n from wtforms.fields import HiddenField\n return isinstance(field, HiddenField)\n\n app.add_template_global(index_for_role)\n\n\ndef index_for_role(role):\n return url_for(role.index)\n\n\ndef process_url_for(app):\n @app.context_processor\n def override_url_for():\n return dict(url_for=dated_url_for)\n\n def dated_url_for(endpoint, **values):\n if endpoint == 'static':\n filename = values.get('filename', None)\n if filename:\n file_path = os.path.join(app.root_path,\n endpoint, filename)\n values['q'] = int(os.stat(file_path).st_mtime)\n return url_for(endpoint, **values)\n", "id": "3718939", "language": "Python", "matching_score": 0.10078401863574982, "max_stars_count": 0, "path": "app/utils.py" }, { "content": "#!/usr/bin/env python3\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport dash_table\nimport plotly.graph_objs as go\nimport pandas as pd\nimport numpy as np\nimport os\nimport math\nfrom scipy.stats import mannwhitneyu, ttest_ind\nfrom nutris import nutris\n\nBASEPATH = \"/data\"\n\napp = dash.Dash(__name__)\napp.config['suppress_callback_exceptions']=True\n\n\ndef combine_all_data():\n print(\"getting new data\")\n survey_df = pd.DataFrame()\n #tracking_files = {}\n machineLayouts = pd.DataFrame()\n timings = pd.DataFrame()\n\n for filename in os.listdir(BASEPATH):\n if \".csv\" in filename:\n if \"machineLayout\" in filename:\n user_id = filename.split(\"_\")[0]\n task = filename.split(\"_\")[1]\n #the machinelayout is the same for all tasks no need to store it multiple times\n #extract the machine layout\n machinelayout_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), sep=';')\n machinelayout_df_tmp[\"user_id\"] = user_id\n machinelayout_df_tmp[\"task\"] = task\n machineLayouts = machineLayouts.append(machinelayout_df_tmp, ignore_index=True)\n if \"_trackings_\" in filename:\n user_id = filename.split(\"_\")[0]\n task = filename.split(\"_\")[1]\n timings_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), sep=',')\n timings = timings.append({\"user_id\":user_id, \"task\":task, \"time\":timings_df_tmp.iloc[-1][\"timestamp\"] / 1000}, ignore_index=True)\n\n for filename in os.listdir(BASEPATH):\n if \".csv\" in filename and not \"BAK\" in filename:\n if \"_evaluation_\" in filename:\n survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col=\"user_id\", sep=';')\n survey_df = survey_df_tmp.combine_first(survey_df)\n elif \"_basic_\" in filename:\n survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col=\"user_id\", sep=';')\n survey_df = survey_df_tmp.combine_first(survey_df)\n elif \"_guess_\" in filename:\n survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col=\"user_id\", sep=';')\n survey_df = survey_df_tmp.combine_first(survey_df)\n elif \"_task_\" in filename:\n #extract the nutriscore & label from machine layout if available\n survey_df_tmp = pd.read_csv(os.path.join(BASEPATH, filename), index_col=\"user_id\", sep=';')\n user_id = str(survey_df_tmp.index[0])\n #assuming there is only one row in the survey_task.csv, which is fine if data from typeform\n for taskNr in range(1,5):\n try:\n product = machineLayouts[ (machineLayouts[\"user_id\"] == user_id) & \\\n (machineLayouts[\"BoxNr\"] == int(survey_df_tmp[\"t_{}\".format(taskNr)].iloc[0]))\n ].iloc[0]\n survey_df_tmp[\"nutri_label_{}\".format(taskNr)] = product[\"ProductNutriLabel\"]\n survey_df_tmp[\"nutri_score_{}\".format(taskNr)] = product[\"ProductNutriScore\"]\n survey_df_tmp[\"energy_{}\".format(taskNr)] = nutris[product[\"ProductId\"]][\"energy\"]\n survey_df_tmp[\"sugar_{}\".format(taskNr)] = nutris[product[\"ProductId\"]][\"sugar\"]\n survey_df_tmp[\"sat_fat_{}\".format(taskNr)] = nutris[product[\"ProductId\"]][\"sat_fat\"]\n survey_df_tmp[\"natrium_{}\".format(taskNr)] = nutris[product[\"ProductId\"]][\"natrium\"]\n survey_df_tmp[\"protein_{}\".format(taskNr)] = nutris[product[\"ProductId\"]][\"protein\"]\n survey_df_tmp[\"fiber_{}\".format(taskNr)]= nutris[product[\"ProductId\"]][\"fiber\"]\n survey_df_tmp[\"health_percentage_{}\".format(taskNr)] = nutris[product[\"ProductId\"]][\"health_percentage\"]\n survey_df_tmp[\"time_{}\".format(taskNr)] = timings.loc[(timings[\"user_id\"]==user_id) & (timings[\"task\"]==str(taskNr)),\"time\"].iloc[0]\n except:\n survey_df_tmp[\"nutri_label_{}\".format(taskNr)] = None\n survey_df_tmp[\"nutri_score_{}\".format(taskNr)] = None\n survey_df_tmp[\"energy_{}\".format(taskNr)] = None\n survey_df_tmp[\"sugar_{}\".format(taskNr)] = None\n survey_df_tmp[\"sat_fat_{}\".format(taskNr)] = None\n survey_df_tmp[\"natrium_{}\".format(taskNr)] = None\n survey_df_tmp[\"protein_{}\".format(taskNr)] = None\n survey_df_tmp[\"fiber_{}\".format(taskNr)]= None\n survey_df_tmp[\"health_percentage_{}\".format(taskNr)] = None\n survey_df_tmp[\"time_{}\".format(taskNr)] = None \n survey_df = survey_df_tmp.combine_first(survey_df)\n\n age_classes = {\n 0: \"0.) < 19yrs\",\n 1: \"1.) 20 - 29 yrs\",\n 2: \"2.) 30 - 49 yrs\",\n 3: \"2.) 30 - 49 yrs\",\n 4: \"3.) 50 - 65 yrs\",\n 5: \"4.) > 65 yrs\",\n 6: \"4.) > 65 yrs\"}\n\n survey_df[\"age_class\"] = survey_df[\"age\"].apply(lambda x: safe_dict(x, age_classes))\n\n ages = {\n 0: 18,\n 1: 25,\n 2: 35,\n 2: 45,\n 3: 57,\n 4: 72,\n 5: 85\n }\n survey_df[\"age\"] = survey_df[\"age\"].apply(lambda x: safe_dict(x, ages))\n\n weights = {\n \"39-\": 35,\n \"40-49\": 45,\n \"50-59\": 55,\n \"60-69\": 65,\n \"70-79\": 75,\n \"80-89\": 85,\n \"90-99\": 95,\n \"100-109\": 105,\n \"110-119\": 115,\n \"120-129\": 125,\n \"130-139\": 135,\n \"140-149\": 145,\n \"150+\": 155\n }\n survey_df[\"weight\"] = survey_df[\"weight\"].apply(lambda x: safe_dict(x, weights, False))\n\n heights = {\n \"139-\": 1.35,\n \"140-149\": 1.45,\n \"150-159\": 1.55,\n \"160-169\": 1.65,\n \"170-179\": 1.75,\n \"180-189\": 1.85,\n \"190-199\": 1.95,\n \"200-209\": 2.05,\n \"210+\": 2.15\n }\n\n survey_df[\"height\"] = survey_df[\"height\"].apply(lambda x: safe_dict(x, heights, False))\n\n genders = {\n \"male\": \"0.) Male\",\n \"female\": \"1.)Female\"\n }\n\n survey_df[\"gender\"] = survey_df[\"gender\"].apply(lambda x: safe_dict(x, genders, False))\n\n survey_df[\"bmi\"] = survey_df[\"weight\"] / (survey_df[\"height\"] * survey_df[\"height\"])\n\n survey_df[\"bmi_class\"] = survey_df[\"bmi\"].apply(bmi_class)\n\n diets = {\n \"No I don't follow a certain diet\": \"None\",\n \"Nein, ich folge keiner bestimmten Diät\": \"None\",\n \"I avoid certain foods because of an allergy or food intolerance\": \"Allergy / Intolerance\",\n \"Ich vermeide bestimmte Lebensmittel wegen Allergie oder Unverträglichkeit\": \"Allergy / Intolerance\",\n \"I eat vegetarian\": \"Vegiatrian / Vegan\",\n \"Ich esse vegetarisch (ovo-lacto-vegetarisch, lacto-vegetarisch)\": \"Vegiatrian / Vegan\",\n \"I eat vegan\": \"Vegiatrian / Vegan\",\n \"Ich esse vegan\": \"Vegiatrian / Vegan\",\n \"I avoid certain foods for ethical/cultural/religious reasons\": \"Cultural / Ethnical\",\n \"Ich vermeide bestimmte Lebensmittel aus ethischen, kulturellen oder religiösen Gründen\": \"Cultural / Ethnical\",\n \"I follow a high carbohydrate diet\": \"High Carb\",\n \"Ich esse kohlenhydratreich\": \"High Carb\",\n \"I follow a diet low in carbohydrates\": \"Low Carb\",\n \"Ich esse kohlenhydrat-arm\": \"Low Carb\",\n \"I follow a low fat or cholosterol diet\": \"Low Fat\",\n \"Ich esse fettarm oder cholesterin-arm\": \"Low Fat\",\n \"I follow a diet with reduced salt consumption\": \"Low Salt\",\n \"Ich esse salz-reduziert\": \"Low Salt\",\n \"I follow a diet low in protein\": \"Low Protein\",\n \"Ich esse protein-arm\": \"Low Protein\",\n \"I follow a diet rich in protein\": \"High Protein\",\n \"Ich esse protein-reich\": \"High Protein\",\n \"I follow an environmentally friendly / sustainable diet\": \"Sustainable\",\n \"Ich ernähre mich umweltreundlich und nachhaltig\": \"Sustainable\",\n }\n\n survey_df[\"diet\"] = survey_df[\"diet\"].apply(lambda x: safe_dict(x, diets, False))\n\n educations = {\n \"Manditory School\": \"0:) primary education\",\n \"Middle school\": \"0:) primary education\",\n \"High school\": \"1.) secondary education\",\n \"Vocational school\": \"1.) secondary education\",\n \"master's diploma\": \"2.) tertiary education\",\n \"College / University\": \"2.) tertiary education\",\n \"Obligatorische Schule\": \"0:) primary education\",\n \"Weiterführende Schule\": \"0:) primary education\",\n \"Matura\": \"1.) secondary education\",\n \"Berufsschule\": \"1.) secondary education\",\n \"Meister- / eidg. Diplom\": \"2.) tertiary education\",\n \"Studium\": \"2.) tertiary education\",\n }\n\n survey_df[\"education\"] = survey_df[\"education\"].apply(lambda x: safe_dict(x, educations, False))\n\n snack_frequencies = {\n \"sehr selten bis nie\": \"0.) never\",\n \"never\":\"0.) never\",\n \"once or twice per year\":\"0.) never\",\n \"ca. monatlich\":\"1.) monthly\",\n \"monthly\":\"1.) monthly\",\n \"ca. wöchentlich\":\"2.) weekly\",\n \"weekly\":\"2.) weekly\",\n \"ca. 2-3 mal pro Woche\":\"2.) weekly\",\n \"ca. 4-5 mal pro Woche\":\"3.) almost daily\",\n \"daily\":\"3.) almost daily\",\n \"ca. täglich\":\"3.) almost daily\",\n }\n\n snack_frequencies_int = {\n \"sehr selten bis nie\": 0,\n \"never\":0,\n \"once or twice per year\":0,\n \"ca. monatlich\":1,\n \"monthly\":1,\n \"ca. wöchentlich\":4,\n \"weekly\":4,\n \"ca. 2-3 mal pro Woche\":10,\n \"ca. 4-5 mal pro Woche\":20,\n \"daily\":31,\n \"ca. täglich\":31,\n }\n\n survey_df[\"snack_frequency_int\"] = survey_df[\"snack_frequency\"].apply(lambda x: safe_dict(x, snack_frequencies_int, False))\n survey_df[\"snack_frequency\"] = survey_df[\"snack_frequency\"].apply(lambda x: safe_dict(x, snack_frequencies, False))\n\n ar_frequencies = {\n \"Never used\":\"0.) Never\",\n \"Noch nie benutz\":\"0.) Never\",\n \"Tried once or twice\":\"1.) Few Times\",\n \"Schon ein oder zwei Mal benutzt\":\"1.) Few Times\",\n \"I use it sometimes\":\"2.) Sometimes\",\n \"Ich benutze es hin und wieder privat\":\"2.) Sometimes\",\n \"I worked with it on a project\":\"3.) Regularly\",\n \"Ich habe an einem Projekt damit gearbeitet\":\"3.) Regularly\",\n \"I use it regularly for private purpose\":\"3.) Regularly\",\n \"Ich benutze es regelmäßig privat\":\"3.) Regularly\",\n \"It is part of my job on a regular basis\":\"3.) Regularly\",\n \"Ich komme auf der Arbeit regelmäßig damit in Kontakt\":\"3.) Regularly\",\n \"I am an expert / developer in the field\":\"4.) Expert\",\n \"Ich bin ein Experte / Entwickler auf dem Feld\":\"4.) Expert\",\n }\n\n ar_frequencies_int = {\n \"Never used\":0,\n \"Noch nie benutz\":0,\n \"Tried once or twice\":1,\n \"Schon ein oder zwei Mal benutzt\":1,\n \"I use it sometimes\":2,\n \"Ich benutze es hin und wieder privat\":2,\n \"I worked with it on a project\":3,\n \"Ich habe an einem Projekt damit gearbeitet\":3,\n \"I use it regularly for private purpose\":3,\n \"Ich benutze es regelmäßig privat\":3,\n \"It is part of my job on a regular basis\":3,\n \"Ich komme auf der Arbeit regelmäßig damit in Kontakt\":3,\n \"I am an expert / developer in the field\":4,\n \"Ich bin ein Experte / Entwickler auf dem Feld\":4,\n }\n\n survey_df[\"ar_frequency_int\"] = survey_df[\"ar_frequency\"].apply(lambda x: safe_dict(x, ar_frequencies_int, False))\n survey_df[\"ar_frequency\"] = survey_df[\"ar_frequency\"].apply(lambda x: safe_dict(x, ar_frequencies, False))\n\n survey_df[\"BI_avg\"] = survey_df[[\"BI1\", \"BI2\",\"BI3\"]].mean(axis=1, numeric_only=True)\n survey_df[\"EE_avg\"] = survey_df[[\"EE1\", \"EE2\",\"EE3\"]].mean(axis=1, numeric_only=True)\n survey_df[\"FL_avg\"] = survey_df[[\"FL2\",\"FL3\"]].mean(axis=1, numeric_only=True)\n survey_df[\"HM_avg\"] = survey_df[[\"HM1\", \"HM2\"]].mean(axis=1, numeric_only=True)\n survey_df[\"IE_avg\"] = survey_df[[\"IE1\", \"IE2\"]].mean(axis=1, numeric_only=True)\n survey_df[\"PE_avg\"] = survey_df[[\"PE1\", \"PE2\",\"PE3\"]].mean(axis=1, numeric_only=True)\n survey_df[\"PI_avg\"] = survey_df[[\"PI1\", \"PI2\",\"PI3\"]].mean(axis=1, numeric_only=True)\n survey_df[\"SI_avg\"] = survey_df[[\"SI1\", \"SI2\",\"SI3\"]].mean(axis=1, numeric_only=True)\n \n survey_df.fillna(value=pd.np.nan, inplace=True)\n\n return survey_df\n\n\ndef render_box_per_col(col, survey_df):\n is_test = survey_df[\"group\"] == \"Test\"\n is_control = survey_df[\"group\"] == \"Control\"\n data = []\n data.append(go.Box(\n x = survey_df[col][is_test],\n name=\"test\",\n marker = dict(\n color = 'rgb(7,40,89)'),\n line = dict(\n color = 'rgb(7,40,89)')\n ))\n data.append(go.Box(\n x = survey_df[col][is_control],\n name=\"control\",\n marker = dict(\n color = 'rgb(107,174,214)'),\n line = dict(\n color = 'rgb(107,174,214)')\n ))\n\n graph = dcc.Graph(\n figure = go.Figure( \n data = data,\n layout = go.Layout(\n showlegend=True,\n legend=go.layout.Legend(\n x=0,\n y=1.0\n ),\n margin=go.layout.Margin(l=40, r=0, t=40, b=30)\n )\n ),\n style={'height': 150},\n id='box_{}'.format(col)\n )\n\n graph_div = html.Div([graph], \n style={'padding-top': '20',\n 'padding-bottom': '20'})\n\n return graph_div\n\ndef data_per_col(col, survey_df):\n is_test = survey_df[\"group\"] == \"Test\"\n is_control = survey_df[\"group\"] == \"Control\"\n\n data = [\n go.Histogram(\n x = survey_df[col][is_test].sort_values(),\n name=\"test\",\n opacity=0.75,\n marker = dict(\n color = 'rgb(7,40,89)'),\n ),\n go.Histogram(\n x = survey_df[col][is_control].sort_values(),\n name=\"control\",\n opacity=0.75,\n marker = dict(\n color = 'rgb(107,174,214)'),\n )\n ]\n return data\n\ndef render_hist_per_col(col, survey_df):\n data = data_per_col(col, survey_df)\n\n graph = dcc.Graph(\n figure = go.Figure( \n data = data,\n layout = go.Layout(\n showlegend=True,\n legend=go.layout.Legend(\n x=0,\n y=1.0\n ),\n margin=go.layout.Margin(l=40, r=0, t=40, b=30)\n )\n ),\n style={'height': 300}\n )\n\n graph_div = html.Div([graph], \n style={'padding-top': '20',\n 'padding-bottom': '20'})\n\n return graph_div\n\ndef render_table(survey_df):\n table = dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} for i in survey_df.columns],\n data=survey_df.to_dict(\"rows\"),\n )\n return table \n\n#def load_user_tracking(user_id, task_id):\n# filename = tracking_files[user_id][task_id]\n\ndef calc_p_whitney(col, s, ns):\n Rg = col.rank()\n \n nt = col[s].count()\n nc = col[ns].count()\n\n if (Rg == Rg.iloc[0]).all():\n return Rg[s].sum() - nt * (nt + 1) / 2, 0.5, nt, nc\n\n u, p = stats.mannwhitneyu(Rg[s], Rg[ns])\n return p\n\n# def calc_p_whitney(colname, survey_df):\n# col = survey_df[colname]\n# istest = survey_df[\"group\"]==\"Test\"\n# iscontrol = survey_df[\"group\"]==\"Control\"\n# Rg = col.rank()\n#\n# nt = col[istest].count()\n# nc = col[iscontrol].count()\n#\n# if (Rg == Rg.iloc[0]).all():\n# return Rg[istest].sum() - nt * (nt + 1) / 2, 0.5, nt, nc\n#\n# u, p = mannwhitneyu(Rg[istest], Rg[iscontrol])\n# return u, p, nt, nc\n\ndef calc_p_t(colname, survey_df):\n col = survey_df[colname]\n istest = survey_df[\"group\"]==\"Test\"\n iscontrol = survey_df[\"group\"]==\"Control\"\n\n t, p = ttest_ind(col[istest].values, col[iscontrol].values, axis=0, nan_policy='omit')\n return t, p\n\ndef table_group(task_nr, survey_df, header):\n istest = survey_df[\"group\"] == \"Test\"\n iscontrol = survey_df[\"group\"] == \"Control\"\n\n isoverweight = survey_df[\"bmi\"] > 25\n isnormal = survey_df[\"bmi\"] <= 25\n \n iseducated = survey_df[\"bmi\"] > 25\n\n isliterate = survey_df[\"FL_avg\"] > 4.5\n isilliterate = survey_df[\"FL_avg\"] <= 4.5\n\n cols = [\"nutri_score\",\n \"energy\",\n \"sat_fat\",\n \"sugar\",\n \"natrium\",\n \"protein\",\n \"fiber\",\n \"health_percentage\",\n \"time\"]\n\n data = pd.DataFrame()\n for col in cols:\n col_name = \"{}_{}\".format(col, task_nr)\n data.loc[col, \"N Total\"] = \"[{}]\".format(int(data.loc[col, \"N Test\"] + data.loc[col, \"N Control\"]))\n data.loc[col, \"mean Total\"] = \"{:.2f}\".format(survey_df[col_name].mean())\n data.loc[col, \"SD Total\"] = \"({:.2f})\".format(survey_df[col_name].std())\n \n p = calc_p_whitney(survey_df[\"group\"], istest, iscontrol)\n data.loc[col, \"p group\"] = \"{:.4f}\".format(p)\n data.loc[col, \"N Test\"] = \"[{}]\".format(int(len(survey_df[istest])))\n data.loc[col, \"mean Test\"] = \"{:.2f}\".format(survey_df[col_name][istest].mean())\n data.loc[col, \"SD Test\"] = \"({:.2f})\".format(survey_df[col_name][istest].std())\n data.loc[col, \"N Control\"] = \"[{}]\".format(int(len(survey_df[iscontrol])))\n data.loc[col, \"mean Control\"] = \"{:.2f}\".format(survey_df[col_name][iscontrol].mean())\n data.loc[col, \"SD Control\"] = \"({:.2f})\".format(survey_df[col_name][iscontrol].std())\n \n p = calc_p_whitney(survey_df[\"FL_avg\"], isliterate, isilliterate)\n data.loc[col, \"p FL\"] = \"{:.4f}\".format(p)\n data.loc[col, \"N FL>4.5\"] = \"[{}]\".format(int(len(survey_df[isliterate])))\n data.loc[col, \"mean FL>4.5\"] = \"{:.2f}\".format(survey_df[col_name][isliterate].mean())\n data.loc[col, \"SD FL>4.5\"] = \"({:.2f})\".format(survey_df[col_name][isliterate].std())\n data.loc[col, \"N FL<=4.5\"] = \"[{}]\".format(int(len(survey_df[isilliterate])))\n data.loc[col, \"mean FL<=4.5\"] = \"{:.2f}\".format(survey_df[col_name][isilliterate].mean())\n data.loc[col, \"SD FL<=4.5\"] = \"({:.2f})\".format(survey_df[col_name][isilliterate].std())\n\n p = calc_p_whitney(survey_df[\"FL_avg\"], isliterate, isilliterate)\n data.loc[col, \"p FL\"] = \"{:.4f}\".format(p)\n data.loc[col, \"N FL>4.5\"] = \"[{}]\".format(int(len(survey_df[isliterate])))\n data.loc[col, \"mean FL>4.5\"] = \"{:.2f}\".format(survey_df[col_name][isliterate].mean())\n data.loc[col, \"SD FL>4.5\"] = \"({:.2f})\".format(survey_df[col_name][isliterate].std())\n data.loc[col, \"N FL<=4.5\"] = \"[{}]\".format(int(len(survey_df[isilliterate])))\n data.loc[col, \"mean FL<=4.5\"] = \"{:.2f}\".format(survey_df[col_name][isilliterate].mean())\n data.loc[col, \"SD FL<=4.5\"] = \"({:.2f})\".format(survey_df[col_name][isilliterate].std())\n\n\n data[\"index\"] = data.index\n data_dict = data.to_dict(\"rows\")\n\n table = dash_table.DataTable(\n id='table',\n columns=[ {\"name\": \"\", \"id\": \"index\"},\n {\"name\": \"u\", \"id\": \"u\"},\n {\"name\": \"p\", \"id\": \"p\"},\n {\"name\": \"Total mean\", \"id\": \"mean Total\"},\n {\"name\": \"(SD)\", \"id\": \"SD Total\"},\n {\"name\": \"[N]\", \"id\": \"Total N\"},\n {\"name\": \"Test mean\", \"id\": \"mean Test\"},\n {\"name\": \"(SD)\", \"id\": \"SD Test\"},\n {\"name\": \"[N]\", \"id\": \"Test N\"},\n {\"name\": \"Control mean\", \"id\": \"mean Control\"},\n {\"name\": \"(SD)\", \"id\": \"SD Control\"},\n {\"name\": \"[N]\", \"id\": \"Control N\"}],\n data=data_dict,\n style_as_list_view=True,\n style_cell={'padding': '5px'},\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['index','SD Total', 'SD Test', 'SD Control', 'Total N', 'Test N', 'Control N']\n ],\n )\n\n ret_div = html.Div([\n html.H1(\"Task {}\".format(task_nr)),\n html.H2(header),\n html.Div( [table],\n style={ 'padding-top': '10',\n 'padding-bottom': '30',\n 'padding-left': '30',\n 'padding-right': '5'}),\n render_box_per_col(\"nutri_score_{}\".format(task_nr), survey_df),\n render_hist_per_col(\"nutri_label_{}\".format(task_nr), survey_df.sort_values(by=\"nutri_label_{}\".format(task_nr)))\n ])\n \n return ret_div\n\ndef creat_mean_desc(col, survey_df, header = None):\n data = pd.DataFrame()\n istest = survey_df[\"group\"] == \"Test\"\n iscontrol = survey_df[\"group\"] == \"Control\"\n if isinstance(header, str):\n title = html.H3(header)\n else:\n title = html.H3(col)\n\n ret_div = html.Div([title,\n html.P(\"Total mean (SD) \\t\\t {:.2f} ({:.2f})\".format(survey_df[col].mean(), survey_df[col].std())),\n html.P(\"Test mean (SD) \\t\\t {:.2f} ({:.2f})\".format(survey_df[col][istest].mean(), survey_df[col][istest].std())),\n html.P(\"Control mean (SD) \\t\\t {:.2f} ({:.2f})\".format(survey_df[col][iscontrol].mean(), survey_df[col][iscontrol].std())),\n render_box_per_col(col, survey_df)])\n\n return ret_div\n\ndef create_count_desc(col, survey_df, header=None):\n data = pd.DataFrame()\n istest = survey_df[\"group\"] == \"Test\"\n iscontrol = survey_df[\"group\"] == \"Control\"\n survey_df.loc[survey_df[col].isna(),col] = \"Missing\"\n data[\"count Total\"] = survey_df[col].value_counts()\n data[\"% Total\"] = (data[\"count Total\"] / data[\"count Total\"].sum() * 100).apply(lambda x : \"({:.1f}%)\".format(x))\n data.loc[\"Total\", \"count Total\"] = data[\"count Total\"].sum()\n data[\"count Test\"] = survey_df[col][istest].value_counts()\n data[\"% Test\"] = (data[\"count Test\"] / data[\"count Test\"].sum() * 100).apply(lambda x : \"({:.1f}%)\".format(x))\n data.loc[\"Total\", \"count Test\"] = data[\"count Test\"].sum()\n data[\"count Control\"] = survey_df[col][iscontrol].value_counts()\n data[\"% Control\"] = (data[\"count Control\"] / data[\"count Control\"].sum() * 100).apply(lambda x : \"({:.1f}%)\".format(x))\n data.loc[\"Total\", \"count Control\"] = data[\"count Control\"].sum()\n data.loc[\"Total\", [\"% Total\",\"% Test\",\"% Control\"]] = \"\"\n data[\"index\"] = data.index\n\n data = data.sort_index()\n\n data_dict = data.to_dict(\"rows\")\n\n table = dash_table.DataTable(\n id='table',\n columns=[ {\"name\": \"\", \"id\": \"index\"},\n {\"name\": \"Total N\", \"id\": \"count Total\"},\n {\"name\": \"(%)\", \"id\": \"% Total\"},\n {\"name\": \"Test N\", \"id\": \"count Test\"},\n {\"name\": \"(%)\", \"id\": \"% Test\"},\n {\"name\": \"Control N\", \"id\": \"count Control\"},\n {\"name\": \"(%)\", \"id\": \"% Control\"},],\n data=data_dict,\n style_as_list_view=True,\n style_cell={'padding': '5px'},\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['index', '% Total', '% Test', '% Control']\n ],\n )\n\n if isinstance(header, str):\n title = html.H3(header)\n else:\n title = html.H3(col)\n\n ret_div = html.Div([title,\n html.Div( [table],\n style={ 'padding-top': '10',\n 'padding-bottom': '30',\n 'padding-left': '30',\n 'padding-right': '5'}),\n render_hist_per_col(col, survey_df), \n ])\n\n return ret_div\n\ndef get_question_text_save(col, questions_df, question_ids):\n try:\n question_text = questions_df[\" question.text,\"][question_ids[col]]\n except:\n question_text = \"Error: Question wasn't found\"\n return question_text\n\ndef create_survey(cols, survey_df, header):\n questionsfile = os.path.join(BASEPATH, \"questionlayout-evaluation.csv\")\n questions_df = pd.read_csv(questionsfile, sep=\";\", index_col=\"question.id\")\n questions_df[\"time_1\"] = \"task 1\"\n questions_df[\"time_2\"] = \"task 2\"\n questions_df[\"time_3\"] = \"task 3\"\n questions_df[\"time_4\"] = \"task 4\"\n\n question_ids = {\n \"IE1\":\"jcruLQD1jtsb\",\n \"IE2\":\"eaTgLd8mTqIl\",\n \"PE1\":\"q0mA3PRRFjx7\",\n \"PE2\":\"sBItcnzLbeab\",\n \"PE3\":\"HNBvOMYBB0aG\",\n \"EE1\":\"MEMNKBeL1Yx1\",\n \"EE2\":\"erPaRi4mPyPG\",\n \"EE3\":\"QVMeswBQSWAi\",\n \"SI1\":\"xdCMMXgxnem1\",\n \"SI2\":\"wfA9uqPz8cRt\",\n \"SI3\":\"xUlfUW6JGEav\",\n \"HM1\":\"JYEh0RF8Fm8b\",\n \"HM2\":\"DuGG9VdyhxCd\",\n \"PI1\":\"Y4v77TAeZzKs\",\n \"PI2\":\"QVzNIkgWgGxB\",\n \"PI3\":\"BQXqCdJgdxle\",\n \"BI1\":\"b4YNQSqEHFaE\",\n \"BI2\":\"GfV0SwI2TmuK\",\n \"BI3\":\"PEWOeMEEayNA\",\n \"FL1\":\"Wiq2wP97n7RO\",\n \"FL2\":\"zDVqi1Ti9Nwq\",\n \"FL3\":\"WeELc4DWjE6P\",\n \"time_1\":\"time_1\",\n \"time_2\":\"time_2\",\n \"time_3\":\"time_3\",\n \"time_4\":\"time_4\",\n }\n \n question_texts = {col: get_question_text_save(col, questions_df, question_ids) for col in cols}\n question_texts[\"Average\"] = \"--- Average ---\"\n\n survey_df_tmp = survey_df.loc[:,cols]\n survey_df_tmp.loc[:,\"Average\"] = survey_df_tmp.mean(axis=1,numeric_only=True)\n survey_df_tmp.loc[:,\"group\"] = survey_df.loc[:,\"group\"]\n cols.append(\"Average\")\n\n data = pd.DataFrame()\n istest = survey_df[\"group\"] == \"Test\"\n iscontrol = survey_df[\"group\"] == \"Control\"\n data[\"mean Total\"] = survey_df_tmp[cols].mean().apply(lambda x : \"{:.2f}\".format(x))\n data[\"SD Total\"] = survey_df_tmp[cols].std().apply(lambda x : \"({:.2f})\".format(x))\n data[\"mean Test\"] = survey_df_tmp[cols][istest].mean().apply(lambda x : \"{:.2f}\".format(x))\n data[\"SD Test\"] = survey_df_tmp[cols][istest].std().apply(lambda x : \"({:.2f})\".format(x))\n data[\"mean Control\"] = survey_df_tmp[cols][iscontrol].mean().apply(lambda x : \"{:.2f}\".format(x))\n data[\"SD Control\"] = survey_df_tmp[cols][iscontrol].std().apply(lambda x : \"({:.2f})\".format(x))\n data[\"question\"] = pd.Series(question_texts)\n \n for col in cols:\n _, data.loc[col, \"p (rank)\"], _, _ = calc_p_whitney(col, survey_df_tmp)\n _, data.loc[col, \"p (t)\"] = calc_p_t(col, survey_df_tmp)\n\n data[\"p (rank)\"] = data[\"p (rank)\"].apply(lambda x : \"{:.4f}\".format(x))\n data[\"p (t)\"] = data[\"p (t)\"].apply(lambda x : \"{:.4f}\".format(x))\n\n data_dict = data.to_dict(\"rows\")\n\n table = dash_table.DataTable(\n id='table',\n columns=[ {\"name\": \"\", \"id\": \"question\"},\n {\"name\": \"Total mean\", \"id\": \"mean Total\"},\n {\"name\": \"(SD)\", \"id\": \"SD Total\"},\n {\"name\": \"Test mean\", \"id\": \"mean Test\"},\n {\"name\": \"(SD)\", \"id\": \"SD Test\"},\n {\"name\": \"Control mean\", \"id\": \"mean Control\"},\n {\"name\": \"(SD)\", \"id\": \"SD Control\"},\n {\"name\": \"p (rank)\", \"id\": \"p (rank)\"},\n {\"name\": \"p (t)\", \"id\": \"p (t)\"}],\n data=data_dict,\n style_as_list_view=True,\n style_cell={'padding': '5px'},\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n style_cell_conditional=[\n {\n 'if': {'column_id': c},\n 'textAlign': 'left'\n } for c in ['question', 'SD Total', 'SD Test', 'SD Control', \"u\"]\n ],\n )\n\n ret_div = html.Div([html.H3(header),\n html.Div( [table],\n style={ 'padding-top': '10',\n 'padding-bottom': '30',\n 'padding-left': '30',\n 'padding-right': '5'})])\n\n return ret_div\n\n\ndef bmi_class(bmi):\n if bmi < 18.5:\n return \"0:) Underweight (BMI < 18.5)\"\n elif bmi < 25:\n return \"1.) Normal (18.5 ≤ BMI < 25)\"\n elif bmi < 30:\n return \"2.) Overweight (25 ≤ BMI < 30)\"\n else:\n return \"3.) Obese (30 ≤ BMI\"\n\ndef safe_dict(_key, _dict, _int=True):\n try:\n if _int:\n val = _dict[int(_key)]\n else:\n val = _dict[_key]\n except:\n val = None\n return val\n \n\napp.layout = html.Div([\n html.Button(\"Refresh\", id=\"refresh\"),\n html.Div([], \n id=\"graphs\", \n style={'width':'70%',\n 'padding-top': '40',\n 'padding-bottom': '10',\n 'padding-left': '50',\n 'padding-right': '50'}),\n])\n\n\n\[email protected](Output(\"graphs\", \"children\"),\n [Input(\"refresh\", \"n_clicks\")])\ndef update_survey(_):\n survey_df = combine_all_data()\n\n print(\"printing new data\")\n return [creat_mean_desc(\"age\", survey_df, \"Age\"),\n create_count_desc(\"age_class\", survey_df, \"Age\"),\n creat_mean_desc(\"bmi\", survey_df, \"Body Mass Index\"),\n create_count_desc(\"bmi_class\", survey_df, \"Weight\"),\n create_count_desc(\"gender\", survey_df, \"Gender\"),\n create_count_desc(\"education\", survey_df, \"Education\"),\n create_count_desc(\"snack_frequency\", survey_df, \"Machine Usage Frequency\"),\n creat_mean_desc(\"snack_frequency_int\", survey_df, \"Machine Usage Frequency\"),\n create_count_desc(\"ar_frequency\", survey_df, \"AR Usage Frequency\"),\n create_survey([\"ar_frequency_int\"],\n survey_df,\n \"AR Frequency\"),\n html.Hr(),\n table_group(1, survey_df, \"Choose a snack of your choice\"),\n html.Hr(),\n table_group(2, survey_df,\"Choose a drink of your choice\"),\n html.Hr(),\n table_group(3, survey_df,\"Choose the healthiest snack\"),\n html.Hr(),\n table_group(4, survey_df,\"Choose the healthiest drink\"),\n html.Hr(),\n create_survey([\"time_1\", \"time_2\",\"time_3\",\"time_4\"],\n survey_df,\n \"Time Taken per Task\"),\n html.Hr(), \n create_survey([\"IE1\", \"IE2\"],\n survey_df,\n \"Intervention Effect\"),\n create_survey([\"PE1\", \"PE2\", \"PE3\"],\n survey_df,\n \"Performance Expectancy\"),\n create_survey([\"EE1\", \"EE2\", \"EE3\"],\n survey_df,\n \"Effort Expectancy\"),\n create_survey([\"SI2\", \"SI3\"],\n survey_df,\n \"Social Influence\"),\n create_survey([\"HM1\", \"HM2\"],\n survey_df,\n \"Hedonic Motivations\"),\n create_survey([\"PI1\", \"PI2\", \"PI3\"],\n survey_df,\n \"Personal Innovativeness\"),\n create_survey([\"BI1\", \"BI2\", \"BI3\"],\n survey_df,\n \"Behavioural Intention\"),\n create_survey([\"FL2\", \"FL3\"],\n survey_df,\n \"Food Literacy (ohne FL1)\"),\n create_survey([\"FL1\", \"FL2\", \"FL3\"],\n survey_df,\n \"Food Literacy\"),\n create_survey([\"SI1\"],\n survey_df,\n \"Observation Bias\"),\n #render_table(survey_df)\n ]\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host=\"0.0.0.0\", port=80)", "id": "1695580", "language": "Python", "matching_score": 6.094690322875977, "max_stars_count": 0, "path": "app/dashboard.py" }, { "content": "import os\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, Schema\nfrom pydantic.dataclasses import dataclass\nfrom typing import List\nfrom datetime import datetime\nimport asyncio\nimport pandas as pd\nimport time\nimport redis\n\napp = FastAPI()\n\nBASE_DIR = \"/data\"\n\[email protected](\"/group/{user_id}\")\ndef get_group(user_id):\n basic_df = pd.DataFrame()\n for filename in os.listdir(BASE_DIR):\n if not \".csv\" in filename:\n continue\n elif filename.split(\"_\")[1] in [\"guess\", \"basic\", \"evaluation\", \"task\"]:\n print(filename)\n basic_df_tmp = pd.read_csv(os.path.join(BASE_DIR, filename), index_col=\"user_id\")\n basic_df = basic_df_tmp.combine_first(basic_df)\n \n new_gender = basic_df.loc[int(user_id), \"gender\"] == \"male\"\n new_age = basic_df.loc[int(user_id), \"age\"]\n new_weight = basic_df.loc[int(user_id), \"weight_guess\"]\n new_height = basic_df.loc[int(user_id), \"height_guess\"]\n\n exclude_index = basic_df.index.isin([int(user_id)])\n basic_df = basic_df[~exclude_index]\n\n c_cnt = basic_df[\"gender\"][basic_df[\"group\"]==\"Control\"].count()\n t_cnt = basic_df[\"gender\"][basic_df[\"group\"]==\"Test\"].count()\n\n c_gender = (basic_df[\"gender\"][basic_df[\"group\"]==\"Control\"]==\"male\").mean()\n t_gender = (basic_df[\"gender\"][basic_df[\"group\"]==\"Test\"]==\"male\").mean()\n \n c_age = basic_df[\"age\"][basic_df[\"group\"]==\"Control\"].mean()\n t_age = basic_df[\"age\"][basic_df[\"group\"]==\"Test\"].mean()\n \n c_weight = basic_df[\"weight_guess\"][basic_df[\"group\"]==\"Control\"].mean()\n t_weight = basic_df[\"weight_guess\"][basic_df[\"group\"]==\"Test\"].mean()\n \n c_height = basic_df[\"height_guess\"][basic_df[\"group\"]==\"Control\"].mean()\n t_height = basic_df[\"height_guess\"][basic_df[\"group\"]==\"Test\"].mean()\n \n cnt_div_t = abs(c_cnt - (t_cnt + 1))\n cnt_div_c = abs((c_cnt + 1) - t_cnt)\n\n gender_div_t = abs(c_gender - (t_gender * t_cnt + new_gender) / (t_cnt + 1))\n gender_div_c = abs((c_gender * c_cnt + new_gender) / (c_cnt + 1) - t_gender)\n\n age_div_t = abs(c_age - (t_age * t_cnt + new_age) / (t_cnt + 1))\n age_div_c = abs((c_age * c_cnt + new_age) / (c_cnt + 1) - t_age)\n\n weight_div_t = abs(c_weight - (t_weight * t_cnt + new_weight) / (t_cnt + 1))\n weight_div_c = abs((c_weight * c_cnt + new_weight) / (c_cnt + 1) - t_weight)\n\n height_div_t = abs(c_height - (t_height * t_cnt + new_height) / (t_cnt + 1))\n height_div_c = abs((c_height * c_cnt + new_height) / (c_cnt + 1) - t_height)\n\n div_t = cnt_div_t / 3 + gender_div_t * 7 + age_div_t + weight_div_t / 1.5 + height_div_t / 1.5\n div_c = cnt_div_c / 3 + gender_div_c * 7 + age_div_c + weight_div_c / 1.5 + height_div_c / 1.5\n\n if div_c < div_t:\n return {\"group\": \"control\"}\n else:\n return {\"group\": \"test\"}\n\n\nclass Tracking(BaseModel):\n userID: int\n task: int\n language: str\n group: str\n machineLayout: List[str]\n trackings: List[str]\n\n\nasync def join_async(string_base, string_list):\n string_return = string_base.join(string_list) + string_base\n return string_return\n\nasync def create_csv_async(filename, csv_rows):\n csv_content = await join_async(\"\\n\", csv_rows)\n with open(os.path.join(BASE_DIR, filename), \"w+\") as file:\n file.write(csv_content)\n\[email protected](\"/tracking\")\nasync def save_trackings(tracking: Tracking):\n #timestamp to mitigate file overwriting\n now = datetime.now()\n timestamp = \"-\".join([str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute), str(now.second)])\n\n print(\"Recieved new info at {}\\n--user: {}\\n--task: {}\\n--group:{}\\n--lang:{}\".format(timestamp,\n tracking.userID,\n tracking.task,\n tracking.group,\n tracking.language))\n\n #create csv file for the machine layout\n filename = \"{}_{}_{}_machineLayout_{}.csv\".format(tracking.userID, tracking.task, tracking.group, timestamp)\n task_machineLayout = asyncio.create_task(create_csv_async(filename, tracking.machineLayout))\n shielded_task_mL = asyncio.shield(task_machineLayout)\n\n #Create csv file for the trackings\n filename = \"{}_{}_{}_trackings_{}.csv\".format(tracking.userID, tracking.task, tracking.group, timestamp)\n task_trackings = asyncio.create_task(create_csv_async(filename, tracking.trackings))\n shielded_task_tr = asyncio.shield(task_trackings)\n\n #Todo create a video of the path that the user took\n await task_machineLayout\n if not task_machineLayout.exception() == None:\n print(task_machineLayout.exception())\n await task_trackings\n if not task_trackings.exception() == None:\n print(task_trackings.exception())\n return {\"response\": \"success\"}\n\nclass HiddenAdminValues(BaseModel):\n language: str = None\n user_id: str = None\n group: str = None\n\n#class FieldChoice(BaseModel):\n# id: str\n# label: str\n# ref: str\n\n#class FieldProperties(BaseModel):\n# choices: List[FieldChoice]\n# allow_multiple_selection: bool\n# allow_other_choice: bool\n\nclass Field(BaseModel):\n id: str\n title: str = None\n type: str\n# ref: str\n# properties: FieldProperties = None\n\nclass Definition(BaseModel):\n #id: str\n #title: str\n questions: List[Field] = Schema([], alias='fields')\n #hidden: List[str]\n\nclass Choice(BaseModel):\n label: str = \"\"\n\nclass Choices(BaseModel):\n labels: List[str] = None\n\nclass Answer(BaseModel):\n type: str\n choice: Choice = None\n choices: Choices = None\n text: str = None\n number: int = None\n question: Field = Schema(None, alias='field')\n\nclass FormResponse(BaseModel):\n #form_id: str\n #landed_at: str\n #submitted_at: str\n hidden: HiddenAdminValues = None\n definition: Definition\n answers: List[Answer]\n\nclass TypeformPayload(BaseModel):\n #event_id: str\n #event_type: str\n form_response: FormResponse\n\[email protected](\"/typeform/{step}\")\nasync def save_typeform_survey(step, payload: TypeformPayload):\n now = datetime.now()\n timestamp = \"-\".join([str(now.year), str(now.month), str(now.day), str(now.hour), str(now.minute), str(now.second)])\n\n header = []\n values = []\n\n q_id_dict = {\n \"fd2RZAQ92LwL\":\"user_id\",\n \"us6cowmLSpqC\":\"language\",\n \"N4BgdvOvMDd4\":\"weight_guess\",\n \"f4uxx985fXO3\":\"height_guess\",\n \"mvCGohGGtJJs\":\"gender\",\n \"ANEkf7up6lO4\":\"gender\",\n \"YqZit4V3JnNI\":\"age\",\n \"FfS6ByGviR2w\":\"age\",\n \"tYkANiSMQhHK\":\"education\",\n \"zYhZb7roQrYO\":\"education\",\n \"ApbFMPJforlD\":\"snack_frequency\",\n \"tnfl2FOpFBn8\":\"snack_frequency\",\n \"rm8EtBg5q8qx\":\"ar_frequency\",\n \"UBC7mOl9LPpO\":\"ar_frequency\",\n \"uUTub569gl8M\":\"user_id\",\n \"FppYTfu2qOUe\":\"group\",\n \"keyyc8BP77Ud\":\"language\",\n \"aWlNUCPyqAKS\":\"t_1\",\n \"bjSkpKUKgrzJ\":\"t_2\",\n \"hDxO5vsiABGK\":\"t_3\",\n \"jMo4userZWef\":\"t_4\",\n \"jcruLQD1jtsb\":\"IE1\",\n \"lQeuANlzabZO\":\"IE1\",\n \"xdCMMXgxnem1\":\"SI1\",\n \"cpFz6u4Miulb\":\"SI1\",\n \"eaTgLd8mTqIl\":\"IE2\",\n \"L90PEG9f4W3p\":\"IE2\",\n \"bNXJAnStwDfX\":\"EE1\",\n \"MEMNKBeL1Yx1\":\"EE1\",\n \"q0mA3PRRFjx7\":\"PE1\",\n \"VuQerOk1gweh\":\"PE1\",\n \"JYEh0RF8Fm8b\":\"HM1\",\n \"c9Dw9B0KswPW\":\"HM1\",\n \"WeELc4DWjE6P\":\"FL3\",\n \"V8lCzzTn6BRf\":\"FL3\",\n \"GfV0SwI2TmuK\":\"BI2\",\n \"lyboW2WkS3lJ\":\"BI2\",\n \"QVzNIkgWgGxB\":\"PI2\",\n \"Pdv12x3s88Uj\":\"PI2\",\n \"QVMeswBQSWAi\":\"EE3\",\n \"G4zCsxt5ujLA\":\"EE3\",\n \"HNBvOMYBB0aG\":\"PE3\",\n \"zpEpkexg0kg3\":\"PE3\",\n \"xUlfUW6JGEav\":\"SI3\",\n \"QnPCooEP9wLm\":\"SI3\",\n \"b4YNQSqEHFaE\":\"BI1\",\n \"Ek83Q8WpFJIK\":\"BI1\",\n \"Y4v77TAeZzKs\":\"PI1\",\n \"Vph6jKGJqd2H\":\"PI1\",\n \"erPaRi4mPyPG\":\"EE2\",\n \"MvYdT8yr5CJL\":\"EE2\",\n \"PEWOeMEEayNA\":\"BI3\",\n \"Knn4XXYAXDkh\":\"BI3\",\n \"DuGG9VdyhxCd\":\"HM2\",\n \"ztFU1N2uo1cV\":\"HM2\",\n \"Wiq2wP97n7RO\":\"FL1\",\n \"xtTPpLEKJ1a6\":\"FL1\",\n \"sBItcnzLbeab\":\"PE2\",\n \"WKGyYTm1SXGd\":\"PE2\",\n \"BQXqCdJgdxle\":\"PI3\",\n \"AedSxGtELKPw\":\"PI3\",\n \"wfA9uqPz8cRt\":\"SI2\",\n \"zfhxooYmpjqI\":\"SI2\",\n \"zDVqi1Ti9Nwq\":\"FL2\",\n \"ejbVOBOo8mor\":\"FL2\",\n \"sQO3OzQCqxV0\":\"colors1\",\n \"AOAvK7iBrh4V\":\"colors2\",\n \"eLaerlPJtb85\":\"colors1\",\n \"z53SflcCNkzP\":\"colors2\",\n \"DF0piBeuVzuj\":\"weight\",\n \"ylLRc5OsKsiw\":\"weight\",\n \"BIq0vwxQCtbT\":\"height\",\n \"HMuAhpqZqEYG\":\"height\",\n \"B6JXMrdqFECM\":\"diet\",\n \"IVU6MjgXFeCa\":\"diet\",\n }\n\n c_id_dict = {\n \"160cm\":\"0\",\n \"170cm\":\"1\",\n \"180cm\":\"2\",\n \"190cm\":\"3\",\n \"200cm\":\"4\",\n \"skinny\":\"0\",\n \"slim\":\"1\",\n \"normal\":\"2\",\n \"wide\":\"3\",\n \"fat\":\"4\",\n \"Male\": \"male\",\n \"Männlich\": \"male\",\n \"Female\": \"female\",\n \"Weiblich\": \"female\",\n \"Intersexual\": \"inter\",\n \"Intersexuell\": \"inter\",\n \"19 or younger\": \"0\", #<19\n \"19 oder jünger\":\"0\", #<19\n \"20 - 29\":\"1\", #20-29\n \"30 - 39\":\"2\", #30-39\n \"40 - 49\":\"3\", #40-49\n \"50 - 64\":\"4\", #50-64\n \"65 - 79\":\"5\", #65-79\n \"80 or older\":\"6\", #>80\n \"80 oder älter\":\"6\", #>80\n \"39 kg or less\":\"39-\", #<39\n \"39 kg oder weniger\":\"39-\", #<39\n \"40 - 49 kg\":\"40-49\", #40-49\n \"50 - 59 kg\":\"50-59\", #50-59\n \"60 - 69 kg\":\"60-69\", #60-69\n \"70 - 79 kg\":\"70-79\", #70-79\n \"80 - 89 kg\":\"80-89\", #80-89\n \"90 - 99 kg\":\"90-99\", #90-99\n \"100 - 109 kg\":\"100-109\", #100 - 109\n \"110 - 119 kg\":\"110-119\", #110 - 119\n \"120 - 129 kg\":\"120-129\", #120 - 129\n \"130 - 139 kg\":\"130-139\", #130 - 139\n \"140 - 149 kg\":\"140-149\", #140 - 149\n \"150 kg oder mehr\":\"150+\", #>150\n \"150 kg or more\":\"150+\", #>150\n \"139 cm or less\":\"139-\", #<139\n \"139 cm oder weniger\":\"139-\", #<139\n \"140 - 149 cm\":\"140-149\", #140-149\n \"150 - 159 cm\":\"150-159\", #150-159\n \"160 - 169 cm\":\"160-169\", #160-169\n \"170 - 179 cm\":\"170-179\", #170-179\n \"180 - 189 cm\":\"180-189\", #180-189\n \"190 - 199 cm\":\"190-199\", #190-199\n \"200 - 209 cm\":\"200-209\", #190-199\n \"210cm or more\":\"210+\", #>200\n \"210cm oder mehr\":\"210+\", #>200\n }\n\n for answer in payload.form_response.answers:\n if answer.question.id in q_id_dict:\n header.append(q_id_dict[answer.question.id])\n else:\n print(\"unknown question {}\".format(answer.question.id))\n header.append(answer.question.id)\n \n if answer.type == \"text\":\n values.append(answer.text)\n elif answer.type == \"choice\":\n if header[-1] in [\"weight_guess\",\"height_guess\",\"gender\", \"age\", \"weight\", \"height\"]:\n if answer.choice.label in c_id_dict:\n values.append(c_id_dict[answer.choice.label])\n else:\n values.append(answer.choice.label)\n else:\n values.append(answer.choice.label)\n elif answer.type == \"choices\":\n values.append(\",\".join(answer.choices.labels))\n elif answer.type == \"number\":\n values.append(str(answer.number))\n \n \n\n if \"basic\" in step:\n user_id = payload.form_response.hidden.user_id\n header.append(\"user_id\")\n values.append(user_id)\n language = payload.form_response.hidden.language\n header.append(\"language\")\n values.append(language)\n elif \"evaluation\" in step:\n user_id = payload.form_response.hidden.user_id\n header.append(\"user_id\")\n values.append(user_id)\n language = payload.form_response.hidden.language\n header.append(\"language\")\n values.append(language)\n group = payload.form_response.hidden.group\n header.append(\"group\")\n values.append(group)\n elif \"task\" in step:\n language = values[header.index(\"language\")]\n user_id = values[header.index(\"user_id\")]\n elif \"guess\" in step:\n language = values[header.index(\"language\")]\n user_id = values[header.index(\"user_id\")]\n else:\n return {\"response\", \"unknown survey type\"}\n\n print(\"got new {} survey input for {} at {}\".format(step, user_id, timestamp))\n\n survey_csv = [\";\".join(header), \";\".join(values)]\n \n filename = \"{}_{}_{}.csv\".format(user_id, step, timestamp)\n task_survey = asyncio.create_task(create_csv_async(filename, survey_csv))\n shielded_task_s = asyncio.shield(task_survey)\n\n if any(st == step for st in [\"basic\", \"evaluation\"]):\n questions_csv = [\"question.id; question.text,; question.type\"]\n for question in payload.form_response.definition.questions:\n questions_csv.append(\"{}; {}; {}\".format(question.id, question.title, question.type))\n \n filename = \"{}_questionlayout-{}_{}.csv\".format(user_id, step, timestamp)\n task_questions = asyncio.create_task(create_csv_async(filename, questions_csv))\n shielded_task_q = asyncio.shield(task_questions)\n\n await task_survey\n if not task_survey.exception() == None:\n print(task_survey.exception())\n\n if any(st == step for st in [\"basic\", \"evaluation\"]):\n await task_questions\n if not task_questions.exception() == None:\n print(task_questions.exception())\n\n return {\"response\": \"success\"}\n", "id": "11667168", "language": "Python", "matching_score": 1.2609388828277588, "max_stars_count": 0, "path": "app/main.py" }, { "content": "#import core\nimport random\nimport json\n\n#import public\nfrom sqlalchemy.sql.expression import func\nfrom flask import jsonify\n\n#import privat\nfrom .. import db\nfrom ..models import Question, Answer, AnswerMeta, QuestionText, Result\n\nfail_questions = ({\"questions\":[\n\t\t\t\t\t\t\t[{\"questionId\": \"1\", \"questionText\": \"1\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"2\", \"questionText\": \"2\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"3\", \"questionText\": \"3\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"4\", \"questionText\": \"4\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"5\", \"questionText\": \"5\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"6\", \"questionText\": \"6\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"7\", \"questionText\": \"7\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"8\", \"questionText\": \"8\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"9\", \"questionText\": \"9\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"10\", \"questionText\": \"10\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"11\", \"questionText\": \"11\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"12\", \"questionText\": \"12\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"13\", \"questionText\": \"13\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"14\", \"questionText\": \"14\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"15\", \"questionText\": \"15\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"16\", \"questionText\": \"16\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"17\", \"questionText\": \"17\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"18\", \"questionText\": \"18\"}],\n\t\t\t\t\t\t\t[{\"questionId\": \"19\", \"questionText\": \"19\"},\n\t\t\t\t\t\t\t\t{\"questionId\": \"20\", \"questionText\": \"20\"}],\n\t\t\t\t\t\t\t]})\n\n\n# build a json object with 10 questions where every\n# questions has questionsID, option1 and option2\ndef build_questions(language='EN', owner=None):\n\ttry:\n\t\t#query 20 random questions\n\t\tquestionList = QuestionText.query\\\n\t\t\t\t\t\t.filter(QuestionText.language == language)\\\n\t\t\t\t\t\t.order_by(func.rand())\\\n\t\t\t\t\t\t.limit(10)\\\n\t\t\t\t\t\t.all()\n\n\t\tquestionList2 = QuestionText.query\\\n\t\t\t\t\t\t.filter(QuestionText.language == language)\\\n\t\t\t\t\t\t.order_by(func.rand())\\\n\t\t\t\t\t\t.limit(10)\\\n\t\t\t\t\t\t.all()\n\texcept:\n\t\tprint(\"Failed to load 20 questions from the database\")\n\t\treturn json.dumps(fail_questions)\n\n\ttry:\n\t\tquestions = {\"questions\":\n\t\t\t\t\t\t[\n\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t{\"questionId\": question.questionId,\n\t\t\t\t\t\t\t\t\t\"questionText\": question.text},\n\t\t\t\t\t\t\t\t{\"questionId\": question2.questionId,\n\t\t\t\t\t\t\t\t\t\"questionText\": question2.text}\n\t\t\t\t\t\t\t] for question, question2 in zip(questionList, questionList2)\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\texcept:\n\t\tprint(\"Failed to turn list of 20 questions into 10 tuples of 2 questions\")\n\t\treturn json.dumps(fail_questions)\n\n\ttry:\n\t\treturn json.dumps(questions)\n\texcept:\n\t\tprint(\"failed to convert dict of questions to json\")\n\t\treturn json.dumps(fail_questions)\n\n\ndef getusertype():\n\t#TODO change query language\n\tresult_tmp = Result.query\\\n\t\t\t\t\t.filter(QuestionText.language == \"EN\")\\\n\t\t\t\t\t.order_by(func.rand())\\\n\t\t\t\t\t.first()\n\tresult_dict = {\"imageId\": result_tmp.image_id,\n\t\t\t\t\t\"resultName\": result_tmp.item,\n\t\t\t\t\t\"resultText\": result_tmp.text}\n\tprint(\"the user is a {0}\".format(result_tmp.item))\n\treturn json.dumps(result_dict)\n\n\ndef save_answers(answers, owner=None):\n\tfor answer in answers[\"answers\"]:\n\t\t#create new answer\n\t\ttry:\n\t\t\tif answer[\"answer\"][\"answerValue\"] in [True, \"true\", \"True\", \"TRUE\", \"1\"]:\n\t\t\t\tansValue = 1\n\t\t\telse:\n\t\t\t\tansValue = 0\n\n\t\t\tnew_answer = Answer(\n\t\t\t\t\t\tquestionId=int(answer[\"answer\"][\"questionId\"]),\n \taltQuestionId=int(answer[\"answer\"][\"altQuestionId\"]),\n\t\t\t\t\t\tanswerValue=ansValue,\n\t\t\t\t\t\tsource=owner\n\t\t\t\t\t)\n\t\t\tprint(\"created answer\")\n\t\texcept:\n\t\t\tprint(\"not able to build answer\")\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\t#save answer to db\n\t\t\tdb.session.add(new_answer)\n\t\t\tdb.session.commit()\n\t\t\tprint(\"commited answer\")\n\n\t\texcept:\n\t\t\tprint(\"Failed to save answer in db for questionID {0}\".format(answer[\"answer\"][\"questionId\"]))\n\t\t\tcontinue\n\n\t\ttry:\n\t\t\tassert len(answers[\"metadata\"]) >= 1\n\t\texcept AssertionError:\n\t\t\tprint(\"no meta Data\")\n\t\t\tcontinue\n\n\t\tfor meta in answers[\"metadata\"]:\n\t\t\t#create new meta data\n\t\t\ttry:\n\t\t\t\tnew_metadata = AnswerMeta(\n\t\t\t\t\t\tanswerId=new_answer.id,\n\t\t\t\t\t\tkey=str(meta[\"key\"]),\n\t\t\t\t\t\tvalue=str(meta[\"value\"])\n\t\t\t\t\t)\n\t\t\t\tprint(\"created meta\")\n\t\t\texcept:\n\t\t\t\tprint(\"not able to build metadata\")\n\t\t\t\tcontinue\n\n\t\t\ttry:\n\t\t\t\t#save metadata\n\t\t\t\tdb.session.add(new_metadata)\n\t\t\t\tdb.session.commit()\n\t\t\t\t#save metadata\n\t\t\t\tprint(\"commited meta\")\n\t\t\texcept:\n\t\t\t\tprint(\"Failed to save metadata in dbfor questionID {0}\".format(answer[\"questionId\"]))\n\t\t\t\tcontinue\n", "id": "9322309", "language": "Python", "matching_score": 4.731703281402588, "max_stars_count": 0, "path": "app/main/src.py" }, { "content": "#!/usr/bin/env python\nimport os\nimport subprocess\nfrom config import Config\n\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager, Shell\nfrom redis import Redis\nfrom rq import Connection, Queue, Worker\n\nfrom app import create_app, db\nfrom app.models import Role, User, fill_the_db, Question, QuestionText, Answer, AnswerMeta, Result\n\nfrom app.main.src import build_questions\n\nfrom sqlalchemy import Float\nfrom sqlalchemy.sql.expression import func, case, literal_column, cast\nimport json\n\nimport pandas as pd\n\nif os.path.exists('config.env'):\n print('Importing environment from .env file')\n for line in open('config.env'):\n var = line.strip().split('=')\n if len(var) == 2:\n os.environ[var[0]] = var[1]\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\n\ndef make_shell_context():\n return dict(app=app, db=db, User=User, Role=Role)\n\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\nmanager.add_command('db', MigrateCommand)\n\n\[email protected]\ndef test():\n \"\"\"Run the unit tests.\"\"\"\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n\[email protected]\ndef recreate_db():\n \"\"\"\n Recreates a local database. You probably should not use this on\n production.\n \"\"\"\n db.drop_all()\n db.create_all()\n db.session.commit()\n\n\[email protected]\ndef fill_db():\n fill_the_db.fill_the_db()\n\n\[email protected]\ndef test_questions():\n questionList = QuestionText.query\\\n .filter(QuestionText.language == 'EN')\\\n .order_by(func.rand())\\\n .limit(10)\\\n .all()\n questionList2 = QuestionText.query\\\n .filter(QuestionText.language == 'EN')\\\n .order_by(func.rand())\\\n .limit(10)\\\n .all()\n\n questions_tmp = []\n for question, question2 in zip(questionList, questionList2):\n print(question.__dict__)\n print(question2.__dict__)\n question_tpl = [{\"questionId\": question.questionId,\n \"questionText\": question.text},\n {\"questionId\": question2.questionId,\n \"questionText\": question2.text}]\n questions_tmp.extend(question_tpl)\n\n questions = {\"questions\": questions_tmp}\n\n print(json.dumps(questions))\n\n\[email protected]\ndef test_answers():\n print(Answer.query.order_by(Answer.id.desc()).first())\n print(Answer.query.count())\n print(AnswerMeta.query.order_by(AnswerMeta.id.desc()).first())\n\[email protected]\ndef test_results():\n print(\"number of different results {0}\".format(Result.query.count()))\n\[email protected]\ndef test_POST_request():\n owner = None\n answers = {\"answers\":\n [\n {\"answer\":\n {\"questionId\":38,\n \"answerValue\": \"True\",\n \"altQuestionId\":34}\n },\n {\"answer\":\n {\"questionId\":34,\n \"answerValue\": \"false\",\n \"altQuestionId\":38}\n }\n ],\n \"metadata\":\n [\n {\"key\":\"lang\",\n \"value\":\"DE\"},\n {\"key\":\"country\",\n \"value\":\"Switzerland\"}\n ]\n }\n\n for answer in answers[\"answers\"]:\n #create new answer\n if answer[\"answer\"][\"answerValue\"] in [\"true\", \"True\", \"TRUE\", \"1\", 1]:\n ansValue = True\n else:\n ansValue = False\n\n new_answer = Answer(\n questionId=int(answer[\"answer\"][\"questionId\"]),\n altQuestionId=int(answer[\"answer\"][\"altQuestionId\"]),\n answerValue=answer[\"answer\"][\"answerValue\"],\n source=owner\n )\n print(\"created answer\")\n\n #save answer to db\n db.session.add(new_answer)\n db.session.commit()\n print(\"commited answer\")\n\n for meta in answers[\"metadata\"]:\n #create new meta data\n \n new_metadata = AnswerMeta(\n answerId=new_answer.id,\n key=meta[\"key\"],\n value=meta[\"value\"]\n )\n print(\"created meta\")\n\n #save metadata\n db.session.add(new_metadata)\n db.session.commit()\n #save metadata\n print(\"commited meta\")\n\[email protected]\ndef analytics():\n numAns = func.count(Answer.questionId)\n trueAns = func.count(case([((Answer.answerValue == True),Answer.questionId)],else_=literal_column(\"NULL\")))\n numMaleAns = func.count(case([((AnswerMeta.value == \"male\"),Answer.questionId)],else_=literal_column(\"NULL\")))\n trueMaleAns = func.count(case([((Answer.answerValue == True) & (AnswerMeta.value == \"male\"),Answer.questionId)],else_=literal_column(\"NULL\")))\n numFemaleAns = func.count(case([((AnswerMeta.value == \"female\"),Answer.questionId)],else_=literal_column(\"NULL\")))\n trueFemaleAns = func.count(case([((Answer.answerValue == True) & (AnswerMeta.value == \"female\"),Answer.questionId)],else_=literal_column(\"NULL\")))\n\n #percAns = cast(trueAns / numAns, Float(asdecimal=False))\n #percMaleAns = cast(trueMaleAns / numMaleAns, Float(asdecimal=False))\n #percFemaleAns = cast(trueFemaleAns / numFemaleAns, Float(asdecimal=False)) \n\n analytics_data = db.session.query(Answer.questionId.label(\"questionID\"),\n QuestionText.text.label(\"questionText\"),\n numAns.label(\"numAns\"),\n trueAns.label(\"trueAns\"),\n numMaleAns.label(\"numMaleAns\"),\n trueMaleAns.label(\"trueMaleAns\"),\n numFemaleAns.label(\"numFemaleAns\"),\n trueFemaleAns.label(\"trueFemaleAns\")\n )\\\n .join(QuestionText, Answer.questionId == QuestionText.id)\\\n .join(AnswerMeta, Answer.id == AnswerMeta.answerId)\\\n .filter(QuestionText.language == \"EN\")\\\n .group_by(Answer.questionId)\\\n .all()\n\n analytics_df = pd.DataFrame(analytics_data,\n columns = [\"questionId\", \"questionText\",\"numAns\",\"trueAns\",\"numMaleAns\",\"trueMaleAns\",\"numFemaleAns\",\"trueFemaleAns\"])\n\n analytics_df[\"percAns\"] = analytics_df[\"trueAns\"] / analytics_df[\"numAns\"]\n analytics_df[\"percMaleAns\"] = analytics_df[\"trueMaleAns\"] / analytics_df[\"numFemaleAns\"]\n analytics_df[\"percFemaleAns\"] = analytics_df[\"trueFemaleAns\"] / analytics_df[\"numFemaleAns\"]\n\n\n print(analytics_df.describe())\n print(analytics_df[[\"questionId\",\"questionText\",\"percAns\",\"percMaleAns\",\"percFemaleAns\"]].sort_values(\"percAns\",ascending=False))\n\n\[email protected](\n '-n',\n '--number-users',\n default=10,\n type=int,\n help='Number of each model type to create',\n dest='number_users')\ndef add_fake_data(number_users):\n \"\"\"\n Adds fake data to the database.\n \"\"\"\n User.generate_fake(count=number_users)\n\n\[email protected]\ndef setup_dev():\n \"\"\"Runs the set-up needed for local development.\"\"\"\n setup_general()\n\n\[email protected]\ndef setup_prod():\n \"\"\"Runs the set-up needed for production.\"\"\"\n setup_general()\n\n\ndef setup_general():\n \"\"\"Runs the set-up needed for both local development and production.\n Also sets up first admin user.\"\"\"\n Role.insert_roles()\n admin_query = Role.query.filter_by(name='Administrator')\n if admin_query.first() is not None:\n if User.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:\n user = User(\n first_name='Admin',\n last_name='Account',\n password=Config.ADMIN_PASSWORD,\n confirmed=True,\n email=Config.ADMIN_EMAIL)\n db.session.add(user)\n db.session.commit()\n print('Added administrator {}'.format(user.full_name()))\n\n\[email protected]\ndef run_worker():\n \"\"\"Initializes a slim rq task queue.\"\"\"\n listen = ['default']\n conn = Redis(\n host=app.config['RQ_DEFAULT_HOST'],\n port=app.config['RQ_DEFAULT_PORT'],\n db=0,\n password=app.config['RQ_DEFAULT_PASSWORD'])\n\n with Connection(conn):\n worker = Worker(map(Queue, listen))\n worker.work()\n\n\[email protected]\ndef format():\n \"\"\"Runs the yapf and isort formatters over the project.\"\"\"\n isort = 'isort -rc *.py app/'\n yapf = 'yapf -r -i *.py app/'\n\n print('Running {}'.format(isort))\n subprocess.call(isort, shell=True)\n\n print('Running {}'.format(yapf))\n subprocess.call(yapf, shell=True)\n\n\nif __name__ == '__main__':\n manager.run()\n", "id": "12491343", "language": "Python", "matching_score": 2.369534730911255, "max_stars_count": 0, "path": "manage.py" }, { "content": "from flask import current_app\nfrom . import Question\nfrom sqlalchemy.sql import func\n\nfrom .. import db\n\n\nclass Answer(db.Model):\n __tablename__ = 'answer'\n id = db.Column(db.Integer, primary_key=True)\n questionId = db.Column(db.Integer, db.ForeignKey(Question.id))\n altQuestionId = db.Column(db.Integer, db.ForeignKey(Question.id))\n answerValue = db.Column(db.Boolean)\n timestamp = db.Column(db.DateTime, server_default=func.now())\n source = db.Column(db.Integer)\n\nclass AnswerMeta(db.Model):\n __tablename__ = 'answer_meta'\n id = db.Column(db.Integer, primary_key=True)\n answerId = db.Column(db.Integer, db.ForeignKey(Answer.id))\n key = db.Column(db.String(255))\n value = db.Column(db.String(255))\n", "id": "3492175", "language": "Python", "matching_score": 3.413670539855957, "max_stars_count": 0, "path": "app/models/answer.py" }, { "content": "from flask import current_app\n\nfrom .. import db\n\nclass Result(db.Model):\n __tablename__ = 'results'\n id = db.Column(db.Integer, primary_key=True)\n image_id = db.Column(db.Integer)\n language = db.Column(db.String(8),default='EN')\n item = db.Column(db.String(255))\n text = db.Column(db.Text)", "id": "4934983", "language": "Python", "matching_score": 3.0512349605560303, "max_stars_count": 0, "path": "app/models/result.py" }, { "content": "from flask import current_app\n\nfrom .. import db\n\n\nclass Question(db.Model):\n __tablename__ = 'questions'\n id = db.Column(db.Integer, primary_key=True)\n owner = db.Column(db.Integer)\n\nclass QuestionText(db.Model):\n __tablename__ = 'question_text'\n id = db.Column(db.Integer, primary_key=True)\n questionId = db.Column(db.Integer, db.ForeignKey(Question.id))\n language = db.Column(db.String(8),default='EN')\n text = db.Column(db.String(255))\n", "id": "12093288", "language": "Python", "matching_score": 0.5349512696266174, "max_stars_count": 0, "path": "app/models/question.py" }, { "content": "from .. import db \nfrom ..models import Question, QuestionText, Result\n\nadjectives = [\n\t\t\t\t#movement\n\t\t\t\t[\"fast movement\", \"schnelle Fortbewegung\", \"locomotion vite\"],\n\t\t\t\t[\"slow movement\", \"langsamme Fortbewegung\", \"locomotion doucement\"],\n\t\t\t\t[\"safe mobility\", \"sichere Mobilitaet\", \"mobilite assure\"],\n\t\t\t\t[\"quiet movement\", \"leise Fortbewegung\", \"locomotion bas\"],\n\t\t\t\t[\"noisy movement\", \"geraeuschvolle Fortbewegung\", \"locomotion bruyant\"],\n\t\t\t\t[\"autonomous movement\", \"Autopilot\", \"pilote automatique\"],\n\t\t\t\t#engine\n\t\t\t\t[\"diesel engine\", \"Diesel Motor\",\"moteur diesel\"],\n\t\t\t\t[\"gasoline engine\", \"Benzin Motor\", \"moteur benzine\"],\n\t\t\t\t[\"electric engine\", \"Elektromotor\", \"moteur electrique\"],\n\t\t\t\t[\"hydro engine\", \"Wasserstoff Motor\", \"moteur hydrogene\"],\n\t\t\t\t[\"hybrid engine\", \"hybrid Motor\", \"moteur hybride\"],\n\t\t\t\t#type\n\t\t\t\t[\"swimming vehicle\", \"schwimmendes Fahrzeug\", \"vehicule folttant\"],\n\t\t\t\t[\"flying vehicle\", \"fliegendes Fahrzeug\", \"vehicule volant\"],\n\t\t\t\t[\"road based vehicle\", \"Strassenfahrzeug\", \"vehicule pour la route\"],\n\t\t\t\t#size\n\t\t\t\t[\"small vehicle\", \"kleines Fahrzeug\", \"petit vehicule\"],\n\t\t\t\t[\"large vehicle\", \"grosses Fahrzeug\", \"grand vehicule\"],\n\t\t\t\t[\"wide vehicle\", \"breites Fahrzeug\", \"vehicule large\"],\n\t\t\t\t[\"slim vehicle\", \"duennes Fahrzeug\", \"vehicule fin\"],\n\t\t\t\t[\"high vehicle\", \"hohes Fahrzeug\", \"vehicule haut\"],\n\t\t\t\t[\"low vehicle\", \"niedriges Fahrzeug\", \"vehicule bas\"],\n\t\t\t\t#transformation\n\t\t\t\t[\"convertible\", \"Cabriolet\", \"cabriolet\"],\n\t\t\t\t[\"transformable vehicle\", \"transformierendes Fahrzeug\", \"vehicule transformable\"],\n\t\t\t\t#interieur\n\t\t\t\t[\"comfortable interieur\", \"gemuetliche Innenaustattung\", \"interieur confortable\"],\n\t\t\t\t[\"sporty interieur\", \"sportliche Innenaustattung\", \"interieur sportif\"],\n\t\t\t\t[\"elegant interieur\", \"elegante Innenaustattung\", \"interieur elegant\"],\n\t\t\t\t[\"sustainable interieur\", \"nachhaltige Innenaustattung\", \"interieur durable\"],\n\t\t\t\t[\"colorful interieur\", \"farbige Innenaustattung\", \"interieur colore\"],\n\t\t\t\t[\"expensive interieur\", \"teure Innenaustattung\", \"interieur cher\"],\n\t\t\t\t[\"retro interieur\", \"Retroinnenaustattung\", \"interieur retro\"],\n\t\t\t\t[\"futuristic interieur\", \"futuristische Innenaustattung\", \"interieur futuriste\"],\n\t\t\t\t#exterieur\n\t\t\t\t[\"protective car body\", \"behuetende Karosserie\", \"carrosserie protective\"],\n\t\t\t\t[\"sporty car body\", \"sportliche Karosserie\", \"carrosserie sportif\"],\n\t\t\t\t[\"elegant car body\", \"elegante Karosserie\", \"carrosserie elegant\"],\n\t\t\t\t[\"edged car body\", \"kantige Karosserie\", \"carosserie anguleux\"],\n\t\t\t\t[\"curved car body\", \"geschwungene Karosserie\", \"carosserie arque\"],\n\t\t\t\t[\"sustainable car body\", \"nachhaltige Karosserie\", \"carosserie durable\"],\n\t\t\t\t[\"expansive car body\", \"teure Karosserie\", \"carrosserie cher\"],\n\t\t\t\t[\"retro car body\", \"Retro-Karosserie\", \"carrosserie retro\"],\n\t\t\t\t[\"futuristic car body\", \"futuristische Karosserie\", \"carrosserie futuriste\"],\n\t\t\t\t[\"invisible car body\", \"durchsichtige Karosserie\", \"carosserie \"],\n\t\t\t\t#transmission\n\t\t\t\t[\"manual transmission\", \"Handschaltung\", \"changement de vitesse manuel\"],\n\t\t\t\t[\"automatic transmission\", \"Automatik Schaltung\", \"changement de vitesse automatique\"],\n\t\t\t\t#price\n\t\t\t\t[\"affordable base configuration\", \"guenstige Grundaustattung\", \"equipement de base bon pris\"],\n\t\t\t\t[\"all-inclusive base configuration\", \"allumfassende Grundaustattung\", \"equipement de base universel\"],\n\t\t\t\t[\"affordable add-ons\", \"guenstige Erweiterungen\", \"equipement extension bon pris\"],\n\t\t\t\t[\"special add-ons\", \"besondere Erweiterungen\", \"equipement extension privilegie\"],\n\t\t\t\t#other\n\t\t\t\t[\"large storage\", \"grosser Gepaeckraum\", \"grandes compartiment a bagages\"]\n\t\t\t]\n\nuser_results = [\n\t\t\t\t[\"1\", \"EN\", \"Bananacycle\",\"You need a bananacycle to explore your fruity side!\"],\n\t\t\t\t[\"2\", \"EN\", \"Volkswagen Bulli\",\"You need a VW Bulli to roam the world as a free spirit!\"],\n\t\t\t\t[\"3\", \"EN\", \"Bamboo Car\",\"You need this bamboo electric car to stop the climate warming.\"],\n\t\t\t\t[\"4\", \"EN\", \"Flintstones Car\",\"You still live in the stone-ages! That's how you get from A to B!\"],\n\t\t\t\t[\"5\", \"EN\", \"Hyper Car\",\"You need something fast to get away from your past haunting you.\"],\n\t\t\t\t[\"6\", \"En\", \"Batmobil\", \"You need a batmobil to accomodate all of this super power\"]\n\t\t\t\t]\n\ndef fill_the_db():\n\ttry:\n\t\tprint Question.query.first()\n\texcept:\n\t\tprint(\"no element in question\")\n\n\ttry:\n\t\tprint QuestionText.query.first()\n\texcept:\n\t\tprint(\"no element in questionText\")\n\n\tfor adjective in adjectives:\n\t\t\tnew_question = Question(owner = \"FB\")\n\t\t\tdb.session.add(new_question)\n\t\t\tdb.session.commit()\n\t\t\tnew_questionText = QuestionText(questionId = new_question.id,\n\t\t\t\t\t\t\t\t\tlanguage = 'EN',\n\t\t\t\t\t\t\t\t\ttext = adjective[0])\n\t\t\tdb.session.add(new_questionText)\n\t\t\tdb.session.commit()\n\t\t\tnew_questionText = QuestionText(questionId = new_question.id,\n\t\t\t\t\t\t\t\t\tlanguage = 'DE',\n\t\t\t\t\t\t\t\t\ttext = adjective[1])\n\t\t\tdb.session.add(new_questionText)\n\t\t\tdb.session.commit()\n\t\t\tnew_questionText = QuestionText(questionId = new_question.id,\n\t\t\t\t\t\t\t\t\tlanguage = 'FR',\n\t\t\t\t\t\t\t\t\ttext = adjective[2])\n\t\t\tdb.session.add(new_questionText)\n\t\t\tdb.session.commit()\n\t\n\tfor user_rslt in user_results:\n\t\tnew_result = Result(\n\t\t\t\t\t\t\t image_id = int(user_rslt[0]),\n\t\t\t\t\t\t\t language = user_rslt[1],\n\t\t\t\t\t\t\t item = user_rslt[2],\n\t\t\t\t\t\t\t text = user_rslt[3]\n\t\t\t\t\t\t\t)\n\t\tdb.session.add(new_result)\n\t\tdb.session.commit()", "id": "10329466", "language": "Python", "matching_score": 0.5183194279670715, "max_stars_count": 0, "path": "app/models/fill_the_db.py" }, { "content": "# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for resnet_v1_beta module.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport google3\nimport numpy as np\nimport tensorflow as tf\n\nfrom deeplab.core import nas_genotypes\nfrom deeplab.core import nas_network\n\narg_scope = tf.contrib.framework.arg_scope\nslim = tf.contrib.slim\n\n\ndef create_test_input(batch, height, width, channels):\n \"\"\"Creates test input tensor.\"\"\"\n if None in [batch, height, width, channels]:\n return tf.placeholder(tf.float32, (batch, height, width, channels))\n else:\n return tf.to_float(\n np.tile(\n np.reshape(\n np.reshape(np.arange(height), [height, 1]) +\n np.reshape(np.arange(width), [1, width]),\n [1, height, width, 1]),\n [batch, 1, 1, channels]))\n\n\nclass NASNetworkTest(tf.test.TestCase):\n \"\"\"Tests with complete small NAS networks.\"\"\"\n\n def _pnasnet_small(self,\n images,\n num_classes,\n is_training=True,\n output_stride=16,\n final_endpoint=None):\n \"\"\"Build PNASNet model backbone.\"\"\"\n hparams = tf.contrib.training.HParams(\n filter_scaling_rate=2.0,\n num_conv_filters=10,\n drop_path_keep_prob=1.0,\n total_training_steps=200000,\n )\n if not is_training:\n hparams.set_hparam('drop_path_keep_prob', 1.0)\n\n backbone = [1, 2, 2]\n cell = nas_genotypes.PNASCell(hparams.num_conv_filters,\n hparams.drop_path_keep_prob,\n len(backbone),\n hparams.total_training_steps)\n with arg_scope([slim.dropout, slim.batch_norm], is_training=is_training):\n return nas_network._build_nas_base(\n images,\n cell=cell,\n backbone=backbone,\n num_classes=num_classes,\n hparams=hparams,\n reuse=tf.AUTO_REUSE,\n scope='pnasnet_small',\n final_endpoint=final_endpoint)\n\n def testFullyConvolutionalEndpointShapes(self):\n num_classes = 10\n inputs = create_test_input(2, 321, 321, 3)\n with slim.arg_scope(nas_network.nas_arg_scope()):\n _, end_points = self._pnasnet_small(inputs,\n num_classes)\n endpoint_to_shape = {\n 'Stem': [2, 81, 81, 128],\n 'Cell_0': [2, 41, 41, 100],\n 'Cell_1': [2, 21, 21, 200],\n 'Cell_2': [2, 21, 21, 200]}\n for endpoint, shape in endpoint_to_shape.iteritems():\n self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "id": "9991552", "language": "Python", "matching_score": 0.4657503068447113, "max_stars_count": 2, "path": "research/deeplab/core/nas_network_test.py" }, { "content": "from sklearn.metrics.pairwise import cosine_similarity\nfrom scipy import sparse\nimport numpy as np\nimport math\nimport logging\nlogger = logging.getLogger(__name__)\n\ndef predict_most_similar(visits, num_users, num_jobs, UserJobs, k=20, cut_off=300, log_discrete=True, epsilon=1e-9):\n \"\"\"\n Cosine Distance based\n \n creates a sparse matrix of the time a user spent on a job\n top cut off at cut_off sec (to overcome outliers). Time values\n are turned into log disrcete values between 0 and cut_off\n on a 0-255 basis (uint8) if log_discrete=True.\n \n Most of this is fairly slow due to the list structure which limits vectorization.\n \n args:\n visits: a list of objects with a user_id, job_id and duration value\n num_users: integer, number of users = max user_id\n num_jobs: integer, number of jobs = max job_id\n UserJobs: django or SQLAlechmy model where the similarities are saved\n k: integer, top k users to use for the prediction\n cut_off: integer, top cut off time in seconds\n log_discrete: boolean, if true converts to log discrete values\n epsilon: zero division shift\n \"\"\"\n \n tic = datetime.now()\n #we only operate on the user vectors\n #this expects integer ids as users if this isn't the case you might want\n # to have a dict for row & col keys\n M_t = sparse.csr_matrix((num_users, num_jobs), dtype=np.uint8)\n \n #TODO can you vectorize this?\n for visit in visits:\n def calc_time(val):\n if val > 300:\n val = 300\n if log_discrete:\n return int(math.log(val, cut_off) * 255)\n else:\n return int(val / cut_off * 255)\n \n M_t[visit.user_id, visit.job_id] = calc_time(visit.duration)\n logger.debug(\"M_t took {} ms\".format((datetime.now() - tic).microseconds))\n \n tic = datetime.now()\n M_s = cosine_similarity(M_t, M_t, dense_output=False)\n logger.debug(\"M_s took {} ms\".format((datetime.now() - tic).microseconds))\n \n #row by row to save memory\n #if it is guaranteed that there is a userjob per user and job then\n # this can be speed up by inversing it and iterating through the userjob query \n # --> less SQL querries\n # further you could switch to a batch update with save()\n tic = datetime.now()\n M_k = np.argsort(M_s.toarray(), axis=1)[:-k-1:-1]\n for user_id in range(num_users):\n top_k_users = np.argsort(M_s.getcol(user_id).toarray())[:-k-1:-1].squeeze()\n sim_sum = np.sum(np.abs(M_s[user_id, top_k_users]))\n if sim_sum != 0:\n pred = M_s[user_id, top_k_users].dot(M_t[top_k_users,:]) / sim_sum\n else:\n pred = np.zeros((1,num_jobs))\n for job_id in range(num_jobs):\n userjob = UserJobs.objects.filter(user_id=user_id, job_id=job_id).first()\n if userjob is None:\n UserJobs.create(user_id=user_id, job_id=job_id, similarity_Skill=None, similarity_CF=pred[0, job_id])\n else:\n userjob.similarity_CF = pred[0, job_id]\n \n logger.debug(\"Prediction took {} ms\".format((datetime.now() - tic).microseconds))\n", "id": "3038138", "language": "Python", "matching_score": 5.909060478210449, "max_stars_count": 0, "path": "collaborative_cosinedist.py" }, { "content": "from scipy import sparse\nimport numpy as np\nimport math\nimport implicit\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef predict_most_similar(visits, num_users, num_jobs, UserJobs, factors=50, cut_off=300, log_discrete=True):\n \"\"\"\n Matrix Factorization based\n \n Still Collaborative filtering but this time based on alternating\n least squares with an efficient implementation from implicit.\n \n Still not very fast as some of the list to matrix stuff still applies.\n But it should scale better. Maybe it is worth storing this in memory\n and requesting values when a user needs some\n \n args:\n visits: a list of objects with a user_id, job_id and duration value\n num_users: integer, number of users = max user_id\n num_jobs: integer, number of jobs = max job_id\n UserJobs: django or SQLAlechmy model where the similarities are saved\n cut_off: integer, top cut off time in seconds\n log_discrete: boolean, if true converts to log discrete values\n \"\"\"\n \n tic = datetime.now()\n #we only operate on the user vectors\n #this expects integer ids as users if this isn't the case you might want\n # to have a dict for row & col keys\n M_t = sparse.csr_matrix((num_jobs, num_users), dtype=np.uint8)\n \n #TODO can you vectorize this?\n for visit in visits:\n def calc_time(val):\n if val > 300:\n val = 300\n if log_discrete:\n return int(math.log(val, cut_off) * 255)\n else:\n return int(val / cut_off * 255)\n \n M_t[visit.job_id, visit.user_id] = calc_time(visit.duration)\n logger.debug(\"M_t took {} ms\".format((datetime.now() - tic).microseconds))\n \n tic = datetime.now()\n # initialize a model\n model = implicit.als.AlternatingLeastSquares(factors=factors)\n logger.debug(\"Loading model took {} ms\".format((datetime.now() - tic).microseconds))\n \n tic = datetime.now()\n # train the model on a sparse matrix of item/user/confidence weights\n model.fit(M_t)\n logger.debug(\"Fitting model took {} ms\".format((datetime.now() - tic).microseconds))\n \n tic = datetime.now()\n # recommend items for a user\n for user_id in range(num_users):\n preds = model.recommend(user_id, M_t.T)\n only saves the non-zero ones\n for pred in preds:\n userjob = UserJobs.objects.filter(user_id=user_id, job_id=pred[0]).first()\n if userjob is None:\n UserJobs.create(user_id=user_id, job_id=pred[0], similarity_Skill=None, similarity_CF=pred[1])\n else:\n userjob.similarity_CF = pred[1]\n logger.debug(\"Predicting took {} ms\".format((datetime.now() - tic).microseconds))\n", "id": "11672533", "language": "Python", "matching_score": 3.2163052558898926, "max_stars_count": 0, "path": "collaborative_ALS.py" }, { "content": "import random\nfrom datetime import datetime, timedelta\nfrom collaborative_cosinedist import predict_most_similar\n\nclass DBTable:\n def __init__(self, elems=[]):\n self.elems = elems\n \n def add(self, elem):\n self.elems.append(elem)\n \n def filter(self, **kwargs):\n return DBTable([elem for elem in self.elems if all(getattr(elem, key, False) == val for key, val in kwargs.items())])\n \n def all(self):\n if len(self.elems) == 0:\n return None\n return self.elems\n \n def first(self):\n if len(self.elems) == 0:\n return None\n return self.elems[0]\n\nclass Model:\n def __init__(self):\n self.elems = []\n self._elem_unsaved = None\n self.objects = DBTable([])\n \n class ElemBase:\n def __init__(self):\n pass\n \n def __str__(self):\n return \", \".join(self.__dict__)\n \n def create(self, **kwargs):\n self.objects.add(self.Elem(**kwargs))\n\nclass UserJob(Model):\n class Elem(Model.ElemBase):\n def __init__(self, user_id, job_id, similarity_Skill, similarity_CF):\n self.user_id = user_id\n self.job_id = job_id\n self.similarity_Skill = similarity_Skill\n self.similarity_CF = similarity_CF\n\nclass Visit(Model):\n class Elem(Model.ElemBase):\n def __init__(self, user_id, job_id, duration):\n self.user_id = user_id\n self.job_id = job_id\n self.duration = duration\n \n \nif __name__ == \"__main__\":\n date = datetime.now()\n\n num_users = 50\n num_jobs = 20\n \n visits = Visit()\n for idx in range(int(num_jobs * num_jobs / 2)):\n visits.create(user_id=random.randrange(num_users),\n job_id=random.randrange(num_jobs),\n duration=math.exp(random.randrange(57000)/10000))\n\n userjob = UserJob()\n predict_most_similar(visits.objects.all(), num_users, num_jobs, userjob)\n \n", "id": "1781096", "language": "Python", "matching_score": 3.0027716159820557, "max_stars_count": 0, "path": "test_collaborative_cosinedist.py" }, { "content": "import random\nfrom datetime import datetime, timedelta\nfrom skill_based import update_users_jobs_similarity\n\nclass DBTable:\n def __init__(self, elems=[]):\n self.elems = elems\n \n def add(self, elem):\n self.elems.append(elem)\n \n def filter(self, **kwargs):\n return DBTable([elem for elem in self.elems if all(getattr(elem, key, False) == val for key, val in kwargs.items())])\n \n def all(self):\n if len(self.elems) == 0:\n return None\n return self.elems\n \n def first(self):\n if len(self.elems) == 0:\n return None\n return self.elems[0]\n\nclass Model:\n def __init__(self):\n self.elems = []\n self._elem_unsaved = None\n self.objects = DBTable([])\n \n class Elem:\n def __init__(self):\n pass\n \n def __str__(self):\n return \", \".join(self.__dict__)\n \n def create(self, **kwargs):\n self.objects.add(self.Elem(**kwargs))\n \n\nclass Job(Model):\n class Elem:\n def __init__(self, id, skills, date_updated):\n self.id = id\n self.skills = skills\n self.date_updated = date_updated\n \n def __str__(self):\n return \", \".join(self.__dict__)\n \n \nclass User(Model):\n class Elem:\n def __init__(self, id, skills, date_updated):\n self.id = id\n self.skills = skills\n self.date_updated = date_updated\n \n def __str__(self):\n return \", \".join(self.__dict__)\n\nclass UserJob(Model):\n class Elem:\n def __init__(self, user_id, job_id, similarity):\n self.user_id = user_id\n self.job_id = job_id\n self.similarity = similarity\n \n def __str__(self):\n return \", \".join(self.__dict__)\n\nskills = [\"Python\", \"C++\", \"Javascript\", \"HTML\", \"CSS\", \"Golang\", \"Perl\", \"Ruby\", \"C#\", \"SQL\", \"Swift\", \"XCode\", \"Java\", \"Kotlin\", \"CAD\", \"CAM\", \"FEM\", \"Scrum\", \"Software Development\", \"Data Science\", \"Web Development\", \"Android Development\", \"iOS Development\", \"Market Analysis\", \"Marketing\", \"Business\", \"Sales\", \"Project Management\", \"Accounting\", \"Strategy\", \"Pricing\", \"Banking\", \"Retail\", \"Manufacturing\", \"Consumer Goods\", \"Software\", \"IT\"]\n \nif __name__ == \"__main__\":\n date = datetime.now()\n\n users = User()\n for idx in range(50):\n users.create(id=idx,\n skills=DBTable([skills[random.randrange(len(skills))] for _ in range(random.randrange(3,10))]),\n date_updated=date - timedelta(seconds=100))\n\n\n jobs = Job()\n for idx in range(30):\n jobs.create(id=idx, \n skills=DBTable([skills[random.randrange(len(skills))] for _ in range(random.randrange(3,10))]),\n date_updated=date - timedelta(seconds=100))\n\n\n userjob = UserJob()\n update_users_jobs_similarity(users.objects.all(), jobs.objects.all(), userjob, date)\n \n for uj in userjob.objects.all():\n print(userjob)\n", "id": "578813", "language": "Python", "matching_score": 2.0647952556610107, "max_stars_count": 0, "path": "test_skill_based.py" }, { "content": "import gensim.downloader as api\nfrom datetime import timedelta\nimport numpy as np\n\ndef calc_similarity(sim_func, user_tags, job_tags):\n \"\"\"\n calculates the similarity between to lists of tags.\n Sorts by pairs for strongest similarity.\n Adds a penalty for extra length in any list.\n \n args:\n sim_func: a numpy vectorized version of the model similarity function\n user_tags: list 1 of words\n job_tags: list 2 of words\n \"\"\"\n #calculat an array of of all pairwise similarities\n M_similarities = sim_func(np.array([[tag.lower() for tag in user_tags]]),\n np.array([[tag.lower() for tag in job_tags]]).T)\n M_similarities = np.ma.array(M_similarities, mask=False)\n \n \n #we only need to consider the shorter length\n dim_x, dim_y = M_similarities.shape\n if dim_x > dim_y:\n M_similarities = M_similarities.T\n dim_x, dim_y = dim_y, dim_x\n user_tags, job_tags = job_tags, user_tags\n \n #pick the strongest pairs\n sim_sum = 0\n \n M_similarities = M_similarities[np.argsort(M_similarities.max(axis=1))[::-1],:]\n for idx in range(M_similarities.shape[0]):\n idy = np.argmax(M_similarities[idx,])\n M_similarities.mask[:,idy] = True\n sim_sum += M_similarities[idx,idy]\n \n #apply a penalty\n return sim_sum * dim_x / dim_y\n\n \n# In a flask setup this function would be run as a celery task\ndef update_users_jobs_similarity(users, jobs, UserJob, date):\n \"\"\"\n updates the similarity between users and jobs for those where\n something changed since date\n \n args:\n users: list of users\n jobs: list of jobs\n UserJob: sqlalechmy or django.models Model\n date: datetime\n \"\"\"\n # keep the model alive for the duration of the update\n tic = datetime.now()\n #model = api.load(\"conceptnet-numberbatch-17-06-300\")\n def similarity_safe(wordA, wordB):\n try:\n return model.similarity(wordA, wordB)\n except:\n return 0.0\n vsim = np.vectorize(similarity_safe)\n print(\"download took: \", (datetime.now()-tic).seconds)\n \n tic = datetime.now()\n #we only want to update combinations where either side was updated\n #since date\n def updated(user, job, date):\n #change date_updated to the name of your update datetime\n return ((user.date_updated - date).seconds > 0 or\n (job.date_updated - date).seconds > 0)\n \n for user in users:\n for job in [job for job in jobs if updated(user, job, date)]:\n #we combined interests and skills, replace .skills and \n # .interests by you fields\n #add preprocessing to turn them into a list if necessary\n user_tags = user.skills.all()\n job_tags = job.skills.all()\n sim = calc_similarity(vsim, user_tags, job_tags)\n userjob = UserJob.objects.filter(user_id=user.id, job_id=job.id).first()\n if userjob is None:\n UserJob.create(user_id=user.id, job_id=job.id, similarity=sim)\n else:\n userjob.similarity = sim\n print(\"update took: \", (datetime.now()-tic).seconds)\n", "id": "5449082", "language": "Python", "matching_score": 1.761297583580017, "max_stars_count": 0, "path": "skill_based.py" } ]
2.217165
dylex
[ { "content": "\"\"\"py.test fixtures imported from Jupyterhub testing\"\"\"\n\nfrom jupyterhub.tests.conftest import *\n", "id": "428047", "language": "Python", "matching_score": 0.17221158742904663, "max_stars_count": 123, "path": "batchspawner/tests/conftest.py" }, { "content": "\"\"\"Test BatchSpawner and subclasses\"\"\"\n\nfrom unittest import mock\nfrom .. import BatchSpawnerRegexStates\nfrom traitlets import Unicode\nimport time\nimport pytest\nfrom jupyterhub import orm, version_info\n\ntry:\n from jupyterhub.objects import Hub\n from jupyterhub.user import User\nexcept:\n pass\n\ntesthost = \"userhost123\"\ntestjob = \"12345\"\n\nclass BatchDummy(BatchSpawnerRegexStates):\n batch_submit_cmd = Unicode('cat > /dev/null; echo '+testjob)\n batch_query_cmd = Unicode('echo RUN '+testhost)\n batch_cancel_cmd = Unicode('echo STOP')\n batch_script = Unicode('{cmd}')\n state_pending_re = Unicode('PEND')\n state_running_re = Unicode('RUN')\n state_exechost_re = Unicode('RUN (.*)$')\n\ndef new_spawner(db, **kwargs):\n kwargs.setdefault('cmd', ['singleuser_command'])\n user = db.query(orm.User).first()\n if version_info < (0,8):\n hub = db.query(orm.Hub).first()\n else:\n hub = Hub()\n user = User(user, {})\n kwargs.setdefault('hub', hub)\n kwargs.setdefault('user', user)\n kwargs.setdefault('INTERRUPT_TIMEOUT', 1)\n kwargs.setdefault('TERM_TIMEOUT', 1)\n kwargs.setdefault('KILL_TIMEOUT', 1)\n kwargs.setdefault('poll_interval', 1)\n if version_info < (0,8):\n return BatchDummy(db=db, **kwargs)\n else:\n print(\"JupyterHub >=0.8 detected, using new spawner creation\")\n return user._new_spawner('', spawner_class=BatchDummy, **kwargs)\n\ndef test_stress_submit(db, io_loop):\n for i in range(200):\n time.sleep(0.01)\n test_spawner_start_stop_poll(db, io_loop)\n\ndef check_ip(spawner, value):\n if version_info < (0,7):\n assert spawner.user.server.ip == value\n else:\n assert spawner.ip == value\n\ndef test_spawner_start_stop_poll(db, io_loop):\n spawner = new_spawner(db=db)\n\n status = io_loop.run_sync(spawner.poll, timeout=5)\n assert status == 1\n assert spawner.job_id == ''\n assert spawner.get_state() == {}\n\n io_loop.run_sync(spawner.start, timeout=5)\n check_ip(spawner, testhost)\n assert spawner.job_id == testjob\n\n status = io_loop.run_sync(spawner.poll, timeout=5)\n assert status is None\n spawner.batch_query_cmd = 'echo NOPE'\n io_loop.run_sync(spawner.stop, timeout=5)\n status = io_loop.run_sync(spawner.poll, timeout=5)\n assert status == 1\n assert spawner.get_state() == {}\n\ndef test_spawner_state_reload(db, io_loop):\n spawner = new_spawner(db=db)\n assert spawner.get_state() == {}\n\n io_loop.run_sync(spawner.start, timeout=30)\n check_ip(spawner, testhost)\n assert spawner.job_id == testjob\n\n state = spawner.get_state()\n assert state == dict(job_id=testjob, job_status='RUN '+testhost)\n spawner = new_spawner(db=db)\n spawner.clear_state()\n assert spawner.get_state() == {}\n spawner.load_state(state)\n if version_info < (0,7):\n check_ip(spawner, testhost)\n else:\n check_ip(spawner, '0.0.0.0')\n assert spawner.job_id == testjob\n\ndef test_submit_failure(db, io_loop):\n spawner = new_spawner(db=db)\n assert spawner.get_state() == {}\n spawner.batch_submit_cmd = 'cat > /dev/null; true'\n with pytest.raises(AssertionError) as e_info:\n io_loop.run_sync(spawner.start, timeout=30)\n assert spawner.job_id == ''\n assert spawner.job_status == ''\n\ndef test_pending_fails(db, io_loop):\n spawner = new_spawner(db=db)\n assert spawner.get_state() == {}\n spawner.batch_query_cmd = 'echo xyz'\n with pytest.raises(AssertionError) as e_info:\n io_loop.run_sync(spawner.start, timeout=30)\n assert spawner.job_id == ''\n assert spawner.job_status == ''\n", "id": "11159374", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "batchspawner/tests/test_spawners.py" }, { "content": "from .batchspawner import *\n", "id": "698786", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "batchspawner/__init__.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n\nfrom wecall.utils.interval import Interval\n\n\nclass IntervalRegionIterator(object):\n\n def __init__(self, iterator, make_interval):\n self.__make_interval = make_interval\n self.__iterator = iterator\n self.__item = None\n self.__interval = None\n self.__previous_results = []\n self.__previous_interval = None\n\n def __next(self):\n self.__item = next(self.__iterator)\n self.__interval = self.__make_interval(self.__item)\n\n def __call__(self, interval):\n # validation\n if self.__previous_interval is not None and interval.start < self.__previous_interval.start:\n raise Exception()\n self.__previous_interval = interval\n\n # pre-process cached reads\n next_results = []\n for new_interval, item in self.__previous_results:\n if new_interval.fast_overlap(interval):\n next_results.append((new_interval, item))\n\n # add reads to cache\n try:\n if self.__item is None:\n self.__next()\n while True:\n if self.__interval.fast_overlap(interval):\n next_results.append((self.__interval, self.__item))\n elif self.__interval.end <= interval.start:\n pass\n else:\n break\n previous_start = self.__interval.start\n self.__next()\n assert self.__interval.start >= previous_start\n except StopIteration:\n pass\n\n self.__previous_results = next_results\n return [item for new_interval, item in self.__previous_results]\n\n\nclass BAMRegionIterator(IntervalRegionIterator):\n\n def __init__(self, fetch_iterator):\n IntervalRegionIterator.__init__(\n self,\n fetch_iterator,\n lambda read: Interval(read.pos, read.aend)\n )\n", "id": "2197941", "language": "Python", "matching_score": 2.2327332496643066, "max_stars_count": 8, "path": "python/wecall/bamutils/bam_region_iterator.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom pysam import AlignedRead\nfrom wecall.bamutils.bam_region_iterator import BAMRegionIterator\nfrom wecall.utils.interval import Interval\n\n\ndef make_read(pos, aend):\n read = AlignedRead()\n read.pos = pos\n read.seq = 'N' * (aend - pos)\n read.cigarstring = '{}M'.format(aend - pos)\n assert read.aend == aend, '{} != {}'.format(read.aend, aend)\n return read\n\n\nclass TestBAMFileIterator(TestCase):\n def test_should_return_all_reads_if_entire_fetch_region_requested(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n actual_results = list(iterator(Interval(0, 4)))\n self.assertEqual(all_reads, actual_results)\n\n def test_should_only_return_reads_with_start_overlapping_interval(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n actual_results = list(iterator(Interval(0, 2)))\n self.assertEqual(all_reads[0:2], actual_results)\n\n def test_should_only_return_reads_with_end_overlapping_interval(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n actual_results = list(iterator(Interval(2, 4)))\n self.assertEqual(all_reads[2:4], actual_results)\n\n def test_should_return_overlapping_reads_from_middle_of_query_region(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n make_read(4, 5),\n make_read(5, 6),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n actual_results = list(iterator(Interval(2, 4)))\n self.assertEqual(all_reads[2:4], actual_results)\n\n def test_should_return_nothing_if_interval_does_not_overlap_any_reads(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n actual_results = list(iterator(Interval(4, 6)))\n self.assertEqual([], actual_results)\n\n def test_should_return_two_sets_of_values_for_intervals_in_correct_order(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n make_read(4, 5),\n make_read(5, 6),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n self.assertEqual(all_reads[0:2], list(iterator(Interval(0, 2))))\n self.assertEqual(all_reads[3:5], list(iterator(Interval(3, 5))))\n\n def test_should_not_miss_any_reads_between_regions(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n make_read(4, 5),\n make_read(5, 6),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n self.assertEqual(all_reads[0:2], list(iterator(Interval(0, 2))))\n self.assertEqual(all_reads[2:3], list(iterator(Interval(2, 3))))\n\n def test_should_fail_for_regions_in_incorrect_order(self):\n all_reads = [\n make_read(0, 1),\n make_read(1, 2),\n make_read(2, 3),\n make_read(3, 4),\n make_read(4, 5),\n make_read(5, 6),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n self.assertEqual(all_reads[3:5], list(iterator(Interval(3, 5))))\n with self.assertRaises(Exception):\n print((iterator(Interval(0, 2))))\n\n def test_should_return_reads_that_overlap_both_intervals_twice(self):\n all_reads = [\n make_read(0, 2),\n make_read(1, 3),\n ]\n\n iterator = BAMRegionIterator((read for read in all_reads))\n\n self.assertEqual(all_reads[0:1], list(iterator(Interval(0, 1))))\n self.assertEqual(all_reads[0:2], list(iterator(Interval(1, 2))))\n", "id": "2887501", "language": "Python", "matching_score": 1.284842610359192, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_bam_file_iterator.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport functools\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.chromosome import standardise_chromosome, get_chromosome_index, \\\n chromosome_comp\n\n\[email protected]_ordering\nclass Interval(object):\n \"\"\"\n Stores an interval on a contiguous piece of DNA.\n There is a special null type introduced for algebraic completeness.\n \"\"\"\n\n # Magic methods\n\n def __repr__(self):\n return \"<{}>\".format(\n \", \".join((repr(x) for x in (\n self.start,\n self.end,\n )))\n )\n\n def __hash__(self):\n return hash((self.start, self.end))\n\n def __str__(self):\n return \"{!s}-{!s}\".format(self.start, self.end)\n\n def __eq__(self, other):\n return all((self.start == other.start, self.end == other.end))\n\n def __lt__(self, other):\n if self.start == other.start:\n return self.end < other.end\n else:\n return self.start < other.start\n\n def __init__(self, start=None, end=None):\n \"\"\"\n By default a null interval is created.\n \"\"\"\n if start is None or end is None or start > end:\n self.start = self.end = None\n else:\n self.start = start\n self.end = end\n assert self.__is_valid()\n\n # Private methods\n\n def __is_valid(self):\n return (\n all((self.start is None, self.end is None)) or\n not any((self.start is None, self.end is None))\n )\n\n def __compatible(lhs, rhs):\n assert lhs.__is_valid() and rhs.__is_valid()\n return True\n\n # Properties\n\n @property\n def is_null(self):\n \"\"\"\n A `null` set in this context is an empty set which has an indeterminate\n position.\n \"\"\"\n assert self.__is_valid()\n return all((self.start is None, self.end is None))\n\n @property\n def is_empty(self):\n \"\"\"\n An `empty` set in this context is a set of zero length with a well\n defined position.\n \"\"\"\n assert self.__is_valid()\n return any((self.start is None, self.end is None)) or self.length == 0\n\n @property\n def length(self):\n assert self.__is_valid()\n if any((self.start is None, self.end is None)):\n return 0\n else:\n return self.end - self.start\n\n # Public methods\n\n def toDict(self):\n return {\n \"start\": self.start,\n \"end\": self.end,\n }\n\n @staticmethod\n def fromDict(dct):\n return Interval(dct[\"start\"], dct[\"end\"])\n\n def overlap_size(lhs, rhs):\n \"\"\"\n For compatible intervals returns a quantity of overlap, otherwise raise an exception.\n Returns None if intervals don't overlap.\n \"\"\"\n if not lhs.__compatible(rhs):\n return None\n if lhs.is_null or rhs.is_null:\n return None\n else:\n size = min(lhs.end, rhs.end) - max(lhs.start, rhs.start)\n if size >= 0:\n return size\n else:\n return None\n\n def __contains__(self, other):\n return all((\n other.start >= self.start,\n other.end <= self.end,\n ))\n\n def fast_overlap(lhs, rhs):\n return (min(lhs.end, rhs.end) - max(lhs.start, rhs.start)) > 0\n\n def overlap(lhs, rhs):\n \"\"\"\n For compatible intervals returns a boolean value, otherwise raise an exception.\n Returns true if intervals overlap.\n \"\"\"\n size = Interval.overlap_size(lhs, rhs)\n if size is None:\n return False\n else:\n return size > 0\n\n def overlapOrTouch(lhs, rhs):\n \"\"\"\n For compatible intervals returns a boolean value, otherwise raise an exception.\n Returns true if intervals overlap or touch.\n \"\"\"\n size = Interval.overlap_size(lhs, rhs)\n if size is None:\n return False\n else:\n return size >= 0\n\n def intersection(lhs, rhs):\n \"\"\"\n Always returns single interval which may be null when it would\n otherwise be not well-defined.\n Not expected to throw for compatible intervals.\n \"\"\"\n assert lhs.__compatible(rhs)\n if lhs.is_null or rhs.is_null:\n return Interval()\n else:\n return Interval(\n max(lhs.start, rhs.start),\n min(lhs.end, rhs.end)\n )\n\n def combination(lhs, rhs):\n \"\"\"\n Returns lhs with as much of rhs as possible.\n \"\"\"\n assert lhs.__compatible(rhs)\n if not lhs.overlapOrTouch(\n rhs) and not lhs.end == rhs.start and not rhs.end == lhs.start:\n return lhs\n else:\n return Interval(\n min(lhs.start, rhs.start) if lhs.start is not None and rhs.start is not None else None,\n max(lhs.end, rhs.end) if lhs.end is not None and rhs.end is not None else None\n )\n\n def leftDifference(lhs, rhs):\n \"\"\"\n Always returns single interval which may be null when it would\n otherwise be not well-defined.\n \"\"\"\n assert lhs.__compatible(rhs)\n if lhs.is_null or rhs.is_null:\n return Interval()\n else:\n return Interval(\n lhs.start,\n max(min(rhs.start, lhs.end), lhs.start)\n )\n\n def rightDifference(lhs, rhs):\n \"\"\"\n Always returns single interval which may be null when it would\n otherwise be not well-defined.\n \"\"\"\n assert lhs.__compatible(rhs)\n if lhs.is_null or rhs.is_null:\n return Interval()\n else:\n return Interval(\n min(max(rhs.end, lhs.start), lhs.end),\n lhs.end\n )\n\n\ndef interval_coverage(sub_intervals, main_interval):\n \"\"\"\n Find length within main_interval that is covered by (overlapping) sub-intervals.\n :param sub_intervals:\n :param main_interval:\n :return:\n \"\"\"\n length = 0\n interval_to_add = None\n sorted_intervals = sorted(sub_intervals)\n for interval in sorted_intervals:\n current_interval = interval\n if current_interval.overlap(main_interval):\n # Trim the interval\n current_interval = current_interval.intersection(main_interval)\n\n if interval_to_add is None:\n interval_to_add = current_interval\n\n if interval_to_add.overlap(current_interval):\n interval_to_add = current_interval.combination(interval_to_add)\n else:\n length += interval_to_add.length\n interval_to_add = current_interval\n\n if interval_to_add is not None:\n length += interval_to_add.length\n\n return length\n\n\[email protected]_ordering\nclass ChromInterval(object):\n \"\"\"\n Class to represent a continuous interval of DNA (on one Chromosome!!)\n \"\"\"\n\n def __init__(self, chrom, start=None, end=None):\n \"\"\"\n \"\"\"\n self.chrom = chrom\n self.interval = Interval(start, end)\n\n def __hash__(self):\n return hash((self.chrom, self.interval))\n\n def __repr__(self):\n return \"<ChromInterval: chrom={}, interval={}>\".format(\n self.chrom, self.interval)\n\n def __str__(self):\n if self.interval.is_null:\n # Currently can not run Edna on only second half of chromosome.\n return \"{!s}\".format(self.chrom)\n else:\n return \"{!s}:{!s}\".format(self.chrom, self.interval)\n\n def __eq__(self, other):\n return self.chrom == other.chrom and self.interval == other.interval\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __lt__(self, other):\n if self.chrom == other.chrom:\n return self.interval < other.interval\n else:\n return chromosome_comp(self.chrom, other.chrom)\n\n def __gt__(self, other):\n return not self < other and not self == other\n\n @classmethod\n def from_string(cls, region_string):\n if ':' in region_string:\n chrom, interval = region_string.split(':')\n start, end = [int(x) for x in interval.split('-')]\n return cls(chrom, start, end)\n else:\n chrom = region_string\n return cls(chrom)\n\n def overlap(lhs, rhs):\n if lhs.chrom != rhs.chrom:\n return False\n elif lhs.interval.is_null:\n return True\n elif rhs.interval.is_null:\n return True\n else:\n return lhs.interval.overlap(rhs.interval)\n\n def overlapOrTouch(lhs, rhs):\n if lhs.chrom != rhs.chrom:\n return False\n elif lhs.interval.is_null:\n return True\n elif rhs.interval.is_null:\n return True\n else:\n return lhs.interval.overlapOrTouch(rhs.interval)\n\n @property\n def chrom_index(self):\n return get_chromosome_index(self.chrom)\n\n @property\n def start(self):\n return self.interval.start\n\n @property\n def end(self):\n return self.interval.end\n\n @end.setter\n def end(self, value):\n self.interval.end = value\n\n def __len__(self):\n return self.length\n\n @property\n def length(self):\n return self.interval.length\n\n def intersection(lhs, rhs):\n assert(lhs.chrom == rhs.chrom)\n # interval.is_null = the whole chromosome.\n if lhs.interval.is_null:\n return rhs\n elif rhs.interval.is_null:\n return lhs\n else:\n new_interval = lhs.interval.intersection(rhs.interval)\n return ChromInterval(\n lhs.chrom,\n new_interval.start,\n new_interval.end\n )\n\n def normalised(self):\n return ChromInterval(\n standardise_chromosome(\n self.chrom),\n self.interval.start,\n self.interval.end)\n\n\nWHOLE_GENOME = ChromInterval(None)\n\n\ndef read_interval(interval_string):\n start_string, end_string = tuple(interval_string.split(\"-\"))\n start, end = int(start_string), int(end_string)\n if end <= start:\n raise weCallException(\n \"Interval {} does not have start < end\".format(interval_string))\n return Interval(start, end)\n\n\ndef read_chrom_interval(region_string):\n if all(deliminator not in region_string for deliminator in ':,.-'):\n return ChromInterval(region_string)\n else:\n chrom, interval_string = tuple(region_string.split(\":\"))\n # constructor of chrom interval needs to be changed.\n tmp_interval = read_interval(interval_string)\n return ChromInterval(chrom, tmp_interval.start, tmp_interval.end)\n\n\ndef read_full_chrom_interval(region_string, reference_genome):\n if all(deliminator not in region_string for deliminator in ':,.-'):\n chrom = region_string\n return ChromInterval(\n chrom, 0, reference_genome.get_chrom_length(chrom))\n else:\n chrom, interval_string = tuple(region_string.split(\":\"))\n # constructor of chrom interval needs to be changed.\n tmp_interval = read_interval(interval_string)\n return ChromInterval(chrom, tmp_interval.start, tmp_interval.end)\n\n\ndef read_chrom_intervals(region_line):\n for region_string in region_line.split(\",\"):\n yield read_chrom_interval(region_string)\n\n\ndef read_full_chrom_intervals(region_line, reference_genome):\n for region_string in region_line.split(\",\"):\n yield read_full_chrom_interval(region_string, reference_genome)\n\n\ndef merge_intervals(sorted_intervals):\n it = iter(sorted_intervals)\n try:\n merged_intervals = [next(it)]\n except StopIteration:\n return []\n for interval in it:\n if merged_intervals[-1].overlapOrTouch(interval):\n merged_intervals[-1].end = interval.end\n else:\n merged_intervals.append(interval)\n\n return merged_intervals\n", "id": "10902221", "language": "Python", "matching_score": 2.3786730766296387, "max_stars_count": 8, "path": "python/wecall/utils/interval.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.common.exceptions import weCallException\nfrom wecall.utils import interval\nimport unittest\nfrom wecall.utils.interval import read_chrom_intervals, read_chrom_interval, read_interval, ChromInterval, \\\n merge_intervals\n\n\nclass TestInterval(unittest.TestCase):\n\n def setUp(self):\n ref_start = 10\n ref_end = 40\n ref_mid = (ref_start + ref_end) // 2\n\n test_pos = [\n ref_start - 5,\n ref_start - 1,\n ref_start,\n ref_mid,\n ref_end - 1,\n ref_end,\n ref_end + 5,\n ]\n\n self.ref_interval = interval.Interval(ref_start, ref_end)\n\n # Matrix of expected output from contigIntervalsOverlap function. Deals\n # explicitly with corner cases instead of relying on alternative\n # function.\n overlaps = [\n [False, False, True, True, True, True, True, ],\n [False, False, True, True, True, True, True, ],\n [False, False, True, True, True, True, True, ],\n [False, False, False, True, True, True, True, ],\n [False, False, False, False, True, True, True, ],\n [False, False, False, False, False, True, True, ],\n [False, False, False, False, False, False, False, ],\n ]\n\n valid_intervals = [\n [True, True, True, True, True, True, True, ],\n [False, True, True, True, True, True, True, ],\n [False, False, True, True, True, True, True, ],\n [False, False, False, True, True, True, True, ],\n [False, False, False, False, True, True, True, ],\n [False, False, False, False, False, True, True, ],\n [False, False, False, False, False, False, True, ],\n ]\n\n intersects = [\n [None, None, (ref_start, ref_start), (ref_start, test_pos[3]), (ref_start, test_pos[4]), (ref_start, ref_end), (ref_start, ref_end), ], # noqa\n [None, None, (ref_start, ref_start), (ref_start, test_pos[3]), (ref_start, test_pos[4]), (ref_start, ref_end), (ref_start, ref_end), ], # noqa\n [None, None, (ref_start, ref_start), (ref_start, test_pos[3]), (ref_start, test_pos[4]), (ref_start, ref_end), (ref_start, ref_end), ], # noqa\n [None, None, None, (test_pos[3], test_pos[3]), (test_pos[3], test_pos[4]), (test_pos[3], ref_end), (test_pos[3], ref_end), ], # noqa\n [None, None, None, None, (test_pos[4], test_pos[4]), (test_pos[4], ref_end), (test_pos[4], ref_end), ], # noqa\n [None, None, None, None, None, (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, None, None, None, None, None, None, ], # noqa\n ]\n\n combinations = [\n [(ref_start, ref_end), (ref_start, ref_end), (test_pos[0], ref_end), (test_pos[0], ref_end), (test_pos[0], ref_end), (test_pos[0], ref_end), (test_pos[0], test_pos[6]), ], # noqa\n [(ref_start, ref_end), (ref_start, ref_end), (test_pos[1], ref_end), (test_pos[1], ref_end), (test_pos[1], ref_end), (test_pos[1], ref_end), (test_pos[1], test_pos[6]), ], # noqa\n [(ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, test_pos[6]), ], # noqa\n [(ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, test_pos[6]), ], # noqa\n [(ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, test_pos[6]), ], # noqa\n [(ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, test_pos[6]), ], # noqa\n [(ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), ], # noqa\n ]\n\n left_differences = [\n [(ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), ], # noqa\n [None, (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), ], # noqa\n [None, None, (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), (ref_start, ref_start), ], # noqa\n [None, None, None, (ref_start, test_pos[3]), (ref_start, test_pos[3]), (ref_start, test_pos[3]), (ref_start, test_pos[3]), ], # noqa\n [None, None, None, None, (ref_start, test_pos[4]), (ref_start, test_pos[4]), (ref_start, test_pos[4]), ], # noqa\n [None, None, None, None, None, (ref_start, ref_end), (ref_start, ref_end), ], # noqa\n [None, None, None, None, None, None, (ref_start, ref_end), ], # noqa\n ]\n\n right_differences = [\n [(ref_start, ref_end), (ref_start, ref_end), (ref_start, ref_end), (test_pos[3], ref_end), (test_pos[4], ref_end), (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, (ref_start, ref_end), (ref_start, ref_end), (test_pos[3], ref_end), (test_pos[4], ref_end), (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, None, (ref_start, ref_end), (test_pos[3], ref_end), (test_pos[4], ref_end), (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, None, None, (test_pos[3], ref_end), (test_pos[4], ref_end), (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, None, None, None, (test_pos[4], ref_end), (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, None, None, None, None, (ref_end, ref_end), (ref_end, ref_end), ], # noqa\n [None, None, None, None, None, None, (ref_end, ref_end), ], # noqa\n ]\n\n self.test_intervals = []\n\n # Expecting no exceptions.\n for start_index, start_pos in enumerate(test_pos):\n for end_index, end_pos in enumerate(test_pos):\n\n test_interval = interval.Interval(start_pos, end_pos)\n\n intersect = self.ref_interval.intersection(test_interval)\n combination = self.ref_interval.combination(test_interval)\n\n left_difference = self.ref_interval.leftDifference(test_interval) # noqa\n right_difference = self.ref_interval.rightDifference(test_interval) # noqa\n union = left_difference.combination(intersect).combination(right_difference) # noqa\n\n # Find actual overlap with interval.\n actual_overlap = test_interval.overlapOrTouch(self.ref_interval) # noqa\n # Find actual overlap, reverse arguments.\n ractual_overlap = self.ref_interval.overlapOrTouch(test_interval) # noqa\n\n # Note in tests use expected_overlap rather than actual_overlap or ractual_overlap. # noqa\n self.test_intervals.append({\n \"interval\": test_interval,\n \"expected_overlap\": overlaps[start_index][end_index],\n \"actual_overlap\": actual_overlap,\n \"ractual_overlap\": ractual_overlap,\n \"start_index\": start_index,\n \"end_index\": end_index,\n \"expected_intersect\": intersects[start_index][end_index],\n \"actual_intersect\": intersect,\n \"expected_combination\": combinations[start_index][end_index], # noqa\n \"actual_combination\": combination,\n \"expected_left_difference\": left_differences[start_index][end_index], # noqa\n \"actual_left_difference\": left_difference,\n \"expected_right_difference\": right_differences[start_index][end_index], # noqa\n \"actual_right_difference\": right_difference,\n \"actual_union\": union,\n \"valid_interval\": valid_intervals[start_index][end_index],\n })\n\n # Manually add interesting cases.\n\n def test_contigIntervalsOverlap(self):\n\n for item in self.test_intervals:\n self.assertTrue(\n item[\"expected_overlap\"] is item[\"actual_overlap\"],\n \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"expected: {expected_overlap}\",\n \"actual: {actual_overlap}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n )\n self.assertTrue(\n item[\"expected_overlap\"] is item[\"ractual_overlap\"],\n \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"expected: {expected_overlap}\",\n \"r-actual: {ractual_overlap}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n )\n\n def __standardTest(self, expected, actual):\n return actual.is_null \\\n if expected is None else actual is not None and actual.start == expected[0] and actual.end == expected[1]\n\n def test_intersection(self):\n\n for item in self.test_intervals:\n expected = item[\"expected_intersect\"]\n actual = item[\"actual_intersect\"]\n\n self.assertTrue(\n self.__standardTest(expected, actual),\n \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"expected: {expected_intersect}\",\n \"actual: {actual_intersect}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n )\n\n def test_combine(self):\n for item in self.test_intervals:\n expected = item[\"expected_combination\"]\n actual = item[\"actual_combination\"]\n\n self.assertTrue(\n self.__standardTest(expected, actual),\n \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"expected: {expected_combination}\",\n \"actual: {actual_combination}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n )\n\n def test_left_differences(self):\n for item in self.test_intervals:\n expected = item[\"expected_left_difference\"]\n actual = item[\"actual_left_difference\"]\n\n self.assertTrue(\n self.__standardTest(expected, actual),\n \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"expected: {expected_left_difference}\",\n \"actual: {actual_left_difference}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n )\n\n def test_right_differences(self):\n for item in self.test_intervals:\n expected = item[\"expected_right_difference\"]\n actual = item[\"actual_right_difference\"]\n\n self.assertTrue(\n self.__standardTest(expected, actual),\n \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"expected: {expected_right_difference}\",\n \"actual: {actual_right_difference}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n )\n\n def test_partition_union_validity(self):\n \"\"\"\n Test validity of the result of `L(A\\B) + (AnB) + R(A\\B)`.\n \"\"\"\n\n for item in self.test_intervals:\n\n failure_message = \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"left_difference: {actual_left_difference}\",\n \"intersect: {actual_intersect}\",\n \"right_difference: {actual_right_difference}\",\n \"union: {actual_union}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n\n self.assertTrue(\n item[\"actual_union\"].is_null != item[\"valid_interval\"],\n failure_message\n )\n\n def test_partition_union(self):\n \"\"\"\n Tests that `A = L(A\\B) + (AnB) + R(A\\B)` when B is valid.\n \"\"\"\n\n for item in self.test_intervals:\n\n failure_message = \", \".join((x.format(self.ref_interval, **item) for x in (\n # test-specific data\n \"left_difference: {actual_left_difference}\",\n \"intersect: {actual_intersect}\",\n \"right_difference: {actual_right_difference}\",\n \"union: {actual_union}\",\n # data common to all tests\n \"interval: {interval!r}\",\n \"reference: {0!r}\",\n \"start-index: {start_index}\",\n \"end-index: {end_index}\",\n )))\n\n if item[\"valid_interval\"]:\n\n self.assertTrue(\n self.ref_interval == item[\"actual_union\"],\n failure_message\n )\n\n def test_contigInterval_equal(self):\n contigs = [\"chr1\", \"chr2\"]\n starts = [0, 5]\n ends = [10, 20]\n ref_interval = interval.Interval(starts[0], ends[0])\n\n self.assertEqual(ref_interval, ref_interval)\n\n for index1, contig in enumerate(contigs):\n for index2, start in enumerate(starts):\n for index3, end in enumerate(ends):\n if index2 == 0 and index3 == 0:\n self.assertEqual(\n ref_interval,\n interval.Interval(start, end)\n )\n else:\n self.assertNotEqual(\n ref_interval,\n interval.Interval(start, end)\n )\n\n def test_interval_serialisation(self):\n interval_a = interval.Interval(10, 20)\n interval_b = interval.Interval(30, 40)\n\n self.assertEqual(\n interval_a,\n interval.Interval.fromDict(interval.Interval.toDict(interval_a))\n )\n\n self.assertEqual(\n interval_b,\n interval.Interval.fromDict(interval.Interval.toDict(interval_b))\n )\n\n\nclass TestIntervalCoverage(unittest.TestCase):\n\n def test_overlap_size(self):\n self.assertEqual(\n interval.Interval.overlap_size(\n interval.Interval(0, 10),\n interval.Interval(20, 300),\n ),\n None\n )\n\n self.assertEqual(\n interval.Interval.overlap_size(\n interval.Interval(0, 10),\n interval.Interval(5, 12),\n ),\n 5\n )\n\n self.assertEqual(\n interval.Interval.overlap_size(\n interval.Interval(0, 10),\n interval.Interval(2, 5),\n ),\n 3\n )\n\n def test_interval_coverage_zero_length(self):\n self.assertEqual(\n interval.interval_coverage(\n {}, interval.Interval(\n 0, 10)), 0)\n\n self.assertEqual(\n interval.interval_coverage(\n {interval.Interval(1, 1)},\n interval.Interval(0, 10)\n ),\n 0\n )\n\n def test_interval_coverage_outside_main_interval(self):\n self.assertEqual(\n interval.interval_coverage(\n {\n interval.Interval(-2, 0),\n interval.Interval(10, 12)\n },\n interval.Interval(0, 10)\n ),\n 0\n )\n\n def test_interval_coverage_overlap_left(self):\n self.assertEqual(\n interval.interval_coverage(\n {interval.Interval(-2, 1)},\n interval.Interval(0, 10)\n ),\n 1\n )\n\n def test_interval_coverage_overlap_right(self):\n self.assertEqual(\n interval.interval_coverage(\n {interval.Interval(9, 11)},\n interval.Interval(0, 10)\n ),\n 1\n )\n\n def test_interval_coverage_inclusion(self):\n self.assertEqual(\n interval.interval_coverage(\n {interval.Interval(-1, 11)},\n interval.Interval(0, 10)\n ),\n 10\n )\n\n self.assertEqual(\n interval.interval_coverage(\n {interval.Interval(0, 10)},\n interval.Interval(0, 10)\n ),\n 10\n )\n\n def test_interval_coverage_middle(self):\n self.assertEqual(\n interval.interval_coverage(\n {\n interval.Interval(1, 4),\n interval.Interval(5, 6)\n },\n interval.Interval(0, 10)\n ),\n 4\n )\n\n self.assertEqual(\n interval.interval_coverage(\n {\n interval.Interval(5, 6),\n interval.Interval(1, 4),\n },\n interval.Interval(0, 10)\n ),\n 4\n )\n\n self.assertEqual(\n interval.interval_coverage(\n {\n interval.Interval(1, 4),\n interval.Interval(3, 6)\n },\n interval.Interval(0, 10)\n ),\n 5\n )\n\n self.assertEqual(\n interval.interval_coverage(\n {\n interval.Interval(1, 3),\n interval.Interval(1, 6),\n interval.Interval(5, 7),\n },\n interval.Interval(0, 10)\n ),\n 6\n )\n\n self.assertEqual(\n interval.interval_coverage(\n {\n interval.Interval(1, 6),\n interval.Interval(1, 3),\n interval.Interval(5, 7),\n },\n interval.Interval(0, 10)\n ),\n 6\n )\n\n\nclass TestChromInterval(unittest.TestCase):\n def test_equality(self):\n self.assertTrue(ChromInterval(\"1\", 0, 0) == ChromInterval(\"1\", 0, 0))\n self.assertFalse(ChromInterval(\"1\", 0, 0) != ChromInterval(\"1\", 0, 0))\n\n def test_overlap(self):\n intvl1 = interval.ChromInterval(\"chr1\", 1, 10)\n intvl2 = interval.ChromInterval(\"chr2\", 1, 10)\n self.assertFalse(intvl1.overlap(intvl2))\n self.assertTrue(intvl1.overlap(intvl1))\n\n def test_should_raise_assertion_for_different_chroms(self):\n intvl1 = interval.ChromInterval(\"chr1\", 1, 10)\n intvl2 = interval.ChromInterval(\"chr2\", 1, 10)\n self.assertRaises(\n AssertionError,\n interval.ChromInterval.intersection,\n intvl1,\n intvl2)\n\n def test_should_form_sensible_chrom_interval(self):\n intvl1 = interval.ChromInterval(\"chr1\", 1, 10)\n intvl2 = interval.ChromInterval(\"chr1\", 5, 15)\n self.assertEqual(\n intvl1.intersection(intvl2),\n interval.ChromInterval(\n \"chr1\",\n 5,\n 10))\n\n def test_chrom_interval_in_short_form_should_overlap(self):\n intvl1 = interval.ChromInterval(\"chr1\")\n intvl2 = interval.ChromInterval(\"chr1\", 5, 15)\n self.assertTrue(intvl1.overlap(intvl2))\n\n def test_chrom_interval_in_short_form_should_give_valid_intersection(self):\n intvl1 = interval.ChromInterval(\"chr1\")\n intvl2 = interval.ChromInterval(\"chr1\", 5, 15)\n self.assertEqual(intvl1.intersection(intvl2), intvl2)\n\n def test_should_initialise_from_region_string(self):\n interval = ChromInterval.from_string(\"20:1-10\")\n self.assertEqual(interval.chrom, \"20\")\n self.assertEqual(interval.start, 1)\n self.assertEqual(interval.end, 10)\n\n def test_should_initialise_from_chrom_only_region_string(self):\n interval = ChromInterval.from_string(\"20\")\n self.assertEqual(interval.chrom, \"20\")\n\n\nclass TestChromIntervalListMerging(unittest.TestCase):\n def test_should_merge_identical_intervals(self):\n chrom_interval = interval.ChromInterval(\"chr1\", 1, 10)\n interval_list = [chrom_interval, chrom_interval]\n\n merged_intervals = merge_intervals(interval_list)\n self.assertListEqual([chrom_interval], merged_intervals)\n\n def test_should_merge_overlapping_intervals(self):\n interval_list = [\n interval.ChromInterval(\"chr1\", 1, 10),\n interval.ChromInterval(\"chr1\", 8, 15)\n ]\n\n merged_intervals = merge_intervals(interval_list)\n self.assertListEqual(\n [interval.ChromInterval(\"chr1\", 1, 15)], merged_intervals)\n\n def test_should_merge_touching_intervals(self):\n interval_list = [\n interval.ChromInterval(\"chr1\", 1, 10),\n interval.ChromInterval(\"chr1\", 10, 15)\n ]\n\n merged_intervals = merge_intervals(interval_list)\n self.assertListEqual(\n [interval.ChromInterval(\"chr1\", 1, 15)], merged_intervals)\n\n def test_should_not_merge_non_overlapping_intervals(self):\n interval_list = [\n interval.ChromInterval(\"chr1\", 1, 10),\n interval.ChromInterval(\"chr1\", 11, 15)\n ]\n\n merged_intervals = merge_intervals(interval_list)\n self.assertListEqual(interval_list, merged_intervals)\n\n\nclass TestReadInterval(unittest.TestCase):\n def test_should_fail_if_string_not_formatted_correctly(self):\n interval_rep = \"kasj,a\"\n self.assertRaises(Exception, read_interval, interval_rep)\n\n def test_should_fail_if_start_is_not_castable_to_int(self):\n interval_rep = \"xxx-1000\"\n self.assertRaises(ValueError, read_interval, interval_rep)\n\n def test_should_fail_if_end_is_not_castable_to_int(self):\n interval_rep = \"0-xxx\"\n self.assertRaises(ValueError, read_interval, interval_rep)\n\n def test_should_fail_if_string_contains_too_many_deliminators(self):\n interval_rep = \"0-100-1000\"\n self.assertRaises(ValueError, read_interval, interval_rep)\n\n def test_should_fail_if_end_is_less_or_equal_to_start(self):\n interval_rep = \"100-100\"\n self.assertRaises(weCallException, read_interval, interval_rep)\n\n def test_should_parse_correctly_formatted_string(self):\n interval_rep = \"10-100\"\n self.assertEqual(\n read_interval(interval_rep),\n interval.Interval(\n 10,\n 100))\n\n def test_should_parse_string_representation_of_interval_class(self):\n test_interval = interval.Interval(19, 20)\n self.assertEqual(\n read_interval(\n str(test_interval)), interval.Interval(\n 19, 20))\n\n\nclass TestReadChromInterval(unittest.TestCase):\n def test_should_read_chrom_represenation_correctly(self):\n chrom_representation = \"20\"\n\n test_interval = read_chrom_interval(chrom_representation)\n\n self.assertEqual(\n test_interval,\n interval.ChromInterval(chrom_representation))\n\n def test_should_fail_if_missing_chrom(self):\n region_string = \"0-100\"\n self.assertRaises(ValueError, read_chrom_interval, region_string)\n\n def test_should_read_full_region_string_correctly(self):\n region_string = \"20:0-100\"\n test_chrom_interval = read_chrom_interval(region_string)\n self.assertEqual(\n test_chrom_interval,\n interval.ChromInterval(\n \"20\",\n 0,\n 100))\n\n def test_should_fail_if_string_contains_too_many_deliminators(self):\n region_string = \"20:0-100:20\"\n self.assertRaises(ValueError, read_chrom_interval, region_string)\n\n\nclass TestReadChromIntervals(unittest.TestCase):\n def test_should_read_list_of_comma_separated_chrom_intervals_into_list_of_correct_size(self):\n chrom_represenation = \",\".join([str(_) for _ in range(0, 2)])\n intervals = list(read_chrom_intervals(chrom_represenation))\n\n self.assertEqual(len(intervals), 2)\n\n\nclass TestChromIntervalLessThan(unittest.TestCase):\n def test_should_fail_if_chromosome_is_not_standarised(self):\n test_interval_1 = ChromInterval(\"BAA\", 0, 100000)\n test_interval_2 = ChromInterval(\"20\", 0, 100000)\n self.assertTrue(test_interval_2 < test_interval_1)\n\n def test_should_return_true_if_chrom_is_smaller_in_standard_chrom_ordering(self):\n test_interval_1 = ChromInterval(\"1\", 1000000, 2000000)\n test_interval_2 = ChromInterval(\"2\", 0, 1)\n self.assertTrue(test_interval_1 < test_interval_2)\n\n def test_should_return_false_if_chrom_is_greater_in_standard_chrom_ordering(self):\n test_interval_1 = ChromInterval(\"2\", 0, 1)\n test_interval_2 = ChromInterval(\"1\", 1000000, 2000000)\n self.assertFalse(test_interval_1 < test_interval_2)\n\n def test_should_return_true_if_chroms_are_equal_and_interval_compares_less(self):\n test_interval_1 = ChromInterval(\"20\", 0, 1)\n test_interval_2 = ChromInterval(\"20\", 10000000, 20000000)\n self.assertTrue(test_interval_1 < test_interval_2)\n\n def test_should_return_false_if_chroms_are_equal_and_interval_does_not_compares_less(self):\n test_interval_1 = ChromInterval(\"20\", 10000000, 20000000)\n test_interval_2 = ChromInterval(\"20\", 0, 1)\n self.assertFalse(test_interval_1 < test_interval_2)\n\n\nclass TestChromIntervalGreaterThan(unittest.TestCase):\n def test_should_sort_chromosome_is_not_standarised(self):\n test_interval_1 = ChromInterval(\"BAA\", 0, 100000)\n test_interval_2 = ChromInterval(\"20\", 0, 100000)\n self.assertTrue(test_interval_1 > test_interval_2)\n\n def test_should_return_false_if_chrom_is_smaller_in_standard_chrom_ordering(self):\n test_interval_1 = ChromInterval(\"1\", 1000000, 2000000)\n test_interval_2 = ChromInterval(\"2\", 0, 1)\n self.assertFalse(test_interval_1 > test_interval_2)\n\n def test_should_return_true_if_chrom_is_greater_in_standard_chrom_ordering(self):\n test_interval_1 = ChromInterval(\"2\", 0, 1)\n test_interval_2 = ChromInterval(\"1\", 1000000, 2000000)\n self.assertTrue(test_interval_1 > test_interval_2)\n\n def test_should_return_false_if_chroms_are_equal_and_interval_compares_less(self):\n test_interval_1 = ChromInterval(\"20\", 0, 1)\n test_interval_2 = ChromInterval(\"20\", 10000000, 20000000)\n self.assertFalse(test_interval_1 > test_interval_2)\n\n def test_should_return_true_if_chroms_are_equal_and_interval_does_not_compares_less(self):\n test_interval_1 = ChromInterval(\"20\", 10000000, 20000000)\n test_interval_2 = ChromInterval(\"20\", 0, 1)\n self.assertTrue(test_interval_1 > test_interval_2)\n", "id": "360178", "language": "Python", "matching_score": 3.6109256744384766, "max_stars_count": 8, "path": "test/test_utils/utils/test_interval.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport unittest\n\nfrom wecall.genomics import variant\nfrom wecall.utils.interval import ChromInterval\n\n\nclass TestvVariant(unittest.TestCase):\n\n def test_eq(self):\n reference = variant.Variant(\"1\", 10, \"ACGT\", \"ACC\")\n self.assertEqual(reference, variant.Variant(\"1\", 10, \"ACGT\", \"ACC\"))\n self.assertNotEqual(reference, variant.Variant(\"2\", 10, \"ACGT\", \"ACC\"))\n self.assertNotEqual(reference, variant.Variant(\"1\", 11, \"ACGT\", \"ACC\"))\n self.assertNotEqual(reference, variant.Variant(\"1\", 10, \"ACGC\", \"ACC\"))\n self.assertNotEqual(reference, variant.Variant(\"1\", 10, \"ACGT\", \"ACT\"))\n\n def test_ne(self):\n \"\"\"\n Note: self.assert[Not]Equal() doesn't work in this case.\n \"\"\"\n reference = variant.Variant(\"1\", 10, \"ACGT\", \"ACC\")\n self.assertFalse(reference != variant.Variant(\"1\", 10, \"ACGT\", \"ACC\"))\n self.assertTrue(reference != variant.Variant(\"2\", 10, \"ACGT\", \"ACC\"))\n self.assertTrue(reference != variant.Variant(\"1\", 11, \"ACGT\", \"ACC\"))\n self.assertTrue(reference != variant.Variant(\"1\", 10, \"ACGC\", \"ACC\"))\n self.assertTrue(reference != variant.Variant(\"1\", 10, \"ACGT\", \"ACT\"))\n\n def test_lt(self):\n reference = variant.Variant(\"1\", 10, \"ACGT\", \"ACC\")\n self.assertFalse(reference < reference)\n self.assertGreaterEqual(\n reference, variant.Variant(\n \"1\", 10, \"ACGT\", \"ACC\"))\n self.assertLess(reference, variant.Variant(\"2\", 10, \"ACGT\", \"ACC\"))\n self.assertLess(reference, variant.Variant(\"1\", 11, \"ACGT\", \"ACC\"))\n self.assertLess(reference, variant.Variant(\"1\", 10, \"CCGT\", \"ACC\"))\n self.assertLess(reference, variant.Variant(\"1\", 10, \"ACGT\", \"ACT\"))\n\n def test_as_key(self):\n variant_obj = variant.Variant(\"1\", 10, \"ACGT\", \"ACC\")\n expected_key = (\"1\", 10, 14, \"ACGT\", \"ACC\")\n self.assertEqual(\n hash(expected_key),\n hash(variant_obj),\n \"Variant hash is broken.\")\n\n def test_variant_type(self):\n chrom = \"1\"\n pos_from = 100\n self.assertEqual(variant.Variant(chrom, pos_from, 'T', '.').type, variant.TYPE_REF)\n self.assertEqual(variant.Variant(chrom, pos_from, 'C', 'C').type, variant.TYPE_REF)\n self.assertEqual(variant.Variant(chrom, pos_from, 'CTC', 'CTC').type, variant.TYPE_REF)\n self.assertEqual(variant.Variant(chrom, pos_from, 'T', 'TA').type, variant.TYPE_INS)\n self.assertEqual(variant.Variant(chrom, pos_from, 'CTG', 'CTAG').type, variant.TYPE_INS)\n self.assertEqual(variant.Variant(chrom, pos_from, 'TA', 'A').type, variant.TYPE_DEL)\n self.assertEqual(variant.Variant(chrom, pos_from, 'AT', 'A').type, variant.TYPE_DEL)\n self.assertEqual(variant.Variant(chrom, pos_from, 'T', 'A').type, variant.TYPE_SNP)\n self.assertEqual(variant.Variant(chrom, pos_from, 'TGT', 'AGT').type, variant.TYPE_SNP)\n self.assertEqual(variant.Variant(chrom, pos_from, 'TGTT', 'TGAT').type, variant.TYPE_SNP)\n self.assertEqual(variant.Variant(chrom, pos_from, 'AGTT', 'TGAT').type, variant.TYPE_MNP)\n self.assertEqual(variant.Variant(chrom, pos_from, 'AGTTATAT', 'TGATAAAT').type, variant.TYPE_MNP)\n self.assertEqual(variant.Variant(chrom, pos_from, 'A', '<INS:ME_ALU>').type, variant.TYPE_SYM)\n\n\nclass TestVariantOverlapsRegion(unittest.TestCase):\n\n def test_should_overlap_insertion_at_region_end(self):\n ins = variant.Variant('1', 15, 'A', 'AAAAA')\n self.assertTrue(ins.overlap(ChromInterval('1', 15, 16)))\n\n def test_should_not_overlap_insertion_after_region(self):\n ins = variant.Variant('1', 15, 'A', 'AAAAA')\n self.assertFalse(ins.overlap(ChromInterval('1', 14, 15)))\n\n def test_should_not_overlap_insertion_before_region(self):\n ins = variant.Variant('1', 15, 'A', 'AAAAA')\n self.assertFalse(ins.overlap(ChromInterval('1', 16, 20)))\n\n def test_should_overlap_deletion_at_region_start(self):\n deletion = variant.Variant('1', 15, 'AAAAA', 'A')\n self.assertTrue(deletion.overlap(ChromInterval('1', 19, 20)))\n\n def test_should_overlap_deletion_at_region_end(self):\n deletion = variant.Variant('1', 15, 'AAAAA', 'A')\n self.assertTrue(deletion.overlap(ChromInterval('1', 15, 16)))\n\n def test_should_not_overlap_deletion_before_region(self):\n deletion = variant.Variant('1', 15, 'AAAAA', 'A')\n self.assertFalse(deletion.overlap(ChromInterval('1', 14, 15)))\n\n def test_should_not_overlap_deletion_after_region(self):\n deletion = variant.Variant('1', 15, 'AAAAA', 'A')\n self.assertFalse(deletion.overlap(ChromInterval('1', 20, 21)))\n\n def test_should_overlap_mnp_at_region_start(self):\n mnp = variant.Variant('1', 15, 'ACGT', 'GCGC')\n self.assertTrue(mnp.overlap(ChromInterval('1', 18, 19)))\n\n def test_should_overlap_mnp_at_region_end(self):\n mnp = variant.Variant('1', 15, 'ACGT', 'GCGC')\n self.assertTrue(mnp.overlap(ChromInterval('1', 15, 16)))\n\n def test_should_not_overlap_mnp_before_region(self):\n mnp = variant.Variant('1', 15, 'ACGT', 'GCGC')\n self.assertFalse(mnp.overlap(ChromInterval('1', 14, 15)))\n\n def test_should_not_overlap_mnp_after_region(self):\n mnp = variant.Variant('1', 15, 'ACGT', 'GCGC')\n self.assertFalse(mnp.overlap(ChromInterval('1', 19, 20)))\n", "id": "9932829", "language": "Python", "matching_score": 3.6128857135772705, "max_stars_count": 8, "path": "test/test_utils/genomics/test_variant.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport re\nimport functools\n\nfrom wecall.genomics.chromosome import chromosome_comp\nfrom wecall.utils.interval import ChromInterval\n\n\"\"\"\nThe variant module defines the standardised, internal representation of variant data which\nis used throughout the wecall code.\n\"\"\"\n\n\n# Module constants\nTYPE_REF = 0\nTYPE_SNP = 1\nTYPE_INS = 2\nTYPE_DEL = 3\nTYPE_MNP = 4\nTYPE_SYM = 5\n\nSMALL_VARIANT_TYPES = [TYPE_REF, TYPE_SNP, TYPE_INS, TYPE_DEL, TYPE_MNP]\n\nTYPE_TO_STR = {\n TYPE_REF: 'REF',\n TYPE_SNP: 'SNP',\n TYPE_INS: 'INS',\n TYPE_DEL: 'DEL',\n TYPE_MNP: 'MNP',\n TYPE_SYM: 'SYM'\n}\n\nSTR_TO_TYPE = {value: key for key, value in list(TYPE_TO_STR.items())}\n\nDB_VARIANT_TYPES = {\n \"All\": {\n TYPE_SNP,\n TYPE_INS,\n TYPE_DEL,\n TYPE_MNP\n },\n \"SNP\": {TYPE_SNP},\n \"MNP\": {TYPE_MNP},\n \"Ins\": {TYPE_INS},\n \"Del\": {TYPE_DEL},\n \"INDEL\": {TYPE_DEL, TYPE_INS},\n}\n\n\ndef mnp_to_snps(var):\n \"\"\"\n Split multi-nucleotide replacement variant objects into multiple\n objects representing SNPs.\n\n Apparently this is deprectated - for what reason and what should it be replaced with?\n \"\"\"\n assert var.type == TYPE_MNP, \"Invalid variant type\"\n\n for index, (refBase, altBase) in enumerate(zip(var.ref, var.alt)):\n if refBase != altBase:\n yield Variant(var.chrom, var.pos_from + index, refBase, altBase)\n\n\ndef variant_type(ref, alt):\n \"\"\"\n Determine the type of variant we have, based on the REF and ALT\n strings:\n \"\"\"\n # all REFs\n if ref == alt or alt == '.' or alt == '<NON_REF>':\n return TYPE_REF\n\n # Symbolic alleles - (typically ignored or filtered out)\n if alt.startswith(('<')):\n return TYPE_SYM\n\n # simple SNPs\n if len(ref) == 1 and len(alt) == 1:\n return TYPE_SNP\n\n # INDELs\n elif len(ref) < len(alt):\n return TYPE_INS\n elif len(ref) > len(alt):\n return TYPE_DEL\n\n # MNPs or complex SNPs\n else:\n # NOTE: len(ref) == len(alt) here.\n n_diff = len([(c1, c2) for c1, c2 in zip(ref, alt) if c1 != c2])\n assert(n_diff > 0)\n return TYPE_SNP if n_diff == 1 else TYPE_MNP\n\n\[email protected]_ordering\nclass Variant(object):\n \"\"\"\n Standard internal variant representation. To be used everywhere in the\n wecall code when we are storing and passing around variant data.\n \"\"\"\n\n __slots__ = ('chrom', 'pos_from', 'ref', 'alt')\n\n def __init__(self, chrom, pos_from, ref, alt):\n self.chrom = chrom\n self.pos_from = pos_from\n self.ref = ref\n self.alt = alt\n\n @property\n def pos_to(self):\n return self.pos_from + len(self.ref)\n\n @property\n def insert_size(self):\n return len(self.alt) - self.length\n\n @property\n def length(self):\n return len(self.ref)\n\n @property\n def type(self):\n return variant_type(self.ref, self.alt)\n\n @property\n def one_indexed_pos_from(self):\n return self.pos_from + 1\n\n @property\n def one_indexed_pos_to(self):\n return self.pos_to + 1\n\n def overlap(self, chrom_interval):\n return chrom_interval.overlap(\n ChromInterval(\n self.chrom,\n self.pos_from,\n self.pos_to))\n\n def __hash__(self):\n \"\"\"\n This allows the Variant to be used as e.g. a key for a\n dictionary, or stored in a set.\n \"\"\"\n return hash((\n self.chrom,\n self.pos_from,\n self.pos_to,\n self.ref,\n self.alt\n ))\n\n def __eq__(self, other):\n \"\"\"\n NOTE: This is non-symmetric (by def self cannot be None)\n \"\"\"\n # TODO: deal with ambiguous representations caused by non-left-aligned\n # data\n if other is None:\n return False\n\n return self.chrom == other.chrom and\\\n self.pos_from == other.pos_from and\\\n self.pos_to == other.pos_to and\\\n self.ref == other.ref and\\\n self.alt == other.alt\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __lt__(self, other):\n \"\"\"\n Define comparison operator for variants, to allow sorting.\n \"\"\"\n # TODO: deal with ambiguous representations caused by non-left-aligned\n # data\n if self.chrom != other.chrom:\n return chromosome_comp(self.chrom, other.chrom)\n elif self.pos_from != other.pos_from:\n return self.pos_from < other.pos_from\n elif self.pos_to != other.pos_to:\n return self.pos_to < other.pos_to\n elif self.ref != other.ref:\n return self.ref < other.ref\n elif self.alt != other.alt:\n return self.alt < other.alt\n else:\n return False\n\n def __str__(self):\n return \"Variant({}: {} - {}, {} --> {})\".format(self.chrom,\n self.pos_from, self.pos_to, self.ref, self.alt)\n\n def __repr__(self):\n return self.__str__()\n", "id": "9326292", "language": "Python", "matching_score": 1.7779046297073364, "max_stars_count": 8, "path": "python/wecall/genomics/variant.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport functools\n\nfrom wecall.genomics.variant import Variant\nfrom wecall.utils.interval import ChromInterval\n\n\[email protected]_ordering\nclass BEDRecord(object):\n\n def __init__(\n self,\n chrom,\n start,\n end,\n label=None,\n score=None,\n strand=None,\n build=None):\n self.chrom_interval = ChromInterval(chrom, start, end)\n self.build = build\n self.label = label\n self.score = score\n self.strand = strand\n\n def __hash__(self):\n return hash((\n self.chrom_interval,\n self.build,\n self.label,\n self.score,\n self.strand\n ))\n\n def __repr__(self):\n return \"<{}:{}>\".format(\n type(self).__name__,\n \", \".join((\n \"chrom_interval: {}\".format(self.chrom_interval),\n \"build: {}\".format(self.build),\n \"label: {}\".format(self.label),\n \"score: {}\".format(self.score),\n \"strand: {}\".format(self.strand),\n ))\n )\n\n def __len__(self):\n return len(self.chrom_interval)\n\n def __lt__(self, other):\n return self.chrom_interval < other.chrom_interval\n\n def __eq__(self, other):\n return all((\n self.chrom_interval == other.chrom_interval,\n self.build == other.build,\n self.label == other.label,\n self.score == other.score,\n self.strand == other.strand\n ))\n\n @property\n def interval(self):\n return self.chrom_interval.interval\n\n @property\n def chrom(self):\n return self.chrom_interval.chrom\n\n @property\n def start(self):\n return self.chrom_interval.start\n\n @property\n def end(self):\n return self.chrom_interval.end\n\n @property\n def variant(self):\n return Variant(self.chrom, self.start, '.', '.')\n\n def __str__(self):\n return \"\\t\".join(\n str(r) if r is not None else '.' for r in [\n self.chrom,\n self.start,\n self.end,\n self.label,\n self.score,\n self.strand])\n", "id": "11248206", "language": "Python", "matching_score": 0.7013764381408691, "max_stars_count": 8, "path": "python/wecall/bedutils/bedrecord.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.chromosome import standardise_chromosome\nimport pysam\n\n\nclass TabixWrapper(object):\n\n def __init__(self, tabix_filename):\n self.__tabix_file = pysam.Tabixfile(tabix_filename, 'r')\n self.__contig_mapping = {standardise_chromosome(\n contig): contig for contig in self.__tabix_file.contigs}\n\n @property\n def header(self):\n return (line for line in self.__tabix_file.header)\n\n @property\n def contigs(self):\n return self.__tabix_file.contigs\n\n def fetch_generator(self, chrom_interval):\n # Tabix will throw a ValueError if the chromosome specified is not\n # present in the index for this file.\n try:\n if chrom_interval.chrom is None:\n return self.__tabix_file.fetch()\n else:\n return self.__tabix_file.fetch(\n self.__contig_mapping.get(\n chrom_interval.chrom,\n chrom_interval.chrom),\n chrom_interval.interval.start,\n chrom_interval.interval.end)\n except ValueError:\n raise StopIteration\n\n def fetch_region(self, region):\n try:\n return self.__tabix_file.fetch(region=region)\n except ValueError:\n raise StopIteration\n\n def close(self):\n self.__tabix_file.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, ex_type, value, traceback):\n self.close()\n", "id": "12798468", "language": "Python", "matching_score": 2.429433584213257, "max_stars_count": 8, "path": "python/wecall/utils/tabix_wrapper.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nimport wecall.utils.interval\nfrom wecall.vcfutils.writer import VCFWriterContextManager\nfrom os import path, remove\nfrom wecall.utils.tabix_wrapper import TabixWrapper\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall.utils.tabix_indexer import TabixIndexer\n\n\nclass TestTabixWrapper(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.vcf_output = path.join(self.work_dir, self.id() + \".vcf\")\n with VCFWriterContextManager(self.vcf_output) as vcf_writer:\n vcf_writer.write_variant(Variant(\"20\", 61097, \"C\", \"T\"))\n\n indexer = TabixIndexer(self.vcf_output, \"vcf\")\n indexer.index()\n self.vcf_output = indexer.compressed_filename\n self.tabix_file = TabixWrapper(self.vcf_output)\n\n def tearDown(self):\n remove(self.vcf_output)\n remove(self.vcf_output + \".tbi\")\n\n def test_should_yield_records_from_tabix_file_with_standard_chrom_interval(self):\n records = list(\n self.tabix_file.fetch_generator(\n wecall.utils.interval.ChromInterval(\n \"20\", 61097, 61098)))\n self.assertEqual(len(records), 1)\n\n def test_should_not_yield_any_records_for_region_after_record(self):\n records = list(\n self.tabix_file.fetch_generator(\n wecall.utils.interval.ChromInterval(\n \"20\", 61098, 70000)))\n self.assertEqual(len(records), 0)\n\n def test_should_not_yield_any_records_for_region_before_record(self):\n records = list(\n self.tabix_file.fetch_generator(\n wecall.utils.interval.ChromInterval(\n \"20\", 60000, 61097)))\n self.assertEqual(len(records), 0)\n\n def test_should_yield_regions_for_whole_chromosome_chrom_interval_format(self):\n records = list(self.tabix_file.fetch_generator(\n wecall.utils.interval.ChromInterval(\"20\")))\n self.assertEqual(len(records), 1)\n\n def test_should_raise_stop_iteration_for_invalid_chromosome(self):\n chrom_interval = wecall.utils.interval.ChromInterval(\"Hello\")\n\n try:\n next(self.tabix_file.fetch_generator(chrom_interval))\n raised = False\n except StopIteration:\n raised = True\n\n self.assertTrue(raised)\n", "id": "1512843", "language": "Python", "matching_score": 2.1860549449920654, "max_stars_count": 8, "path": "test/test_utils/utils/test_tabix_wrapper.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.utils.tabix_indexer import TabixIndexer\n\n\ndef bed_line_from_chrom_interval(chrom_interval):\n return \"\\t\".join(\n str(item) for item in [\n chrom_interval.chrom,\n chrom_interval.start,\n chrom_interval.end\n ]\n )\n\n\nclass BEDWriter(object):\n\n def __init__(self, output_stream):\n self.__output_stream = output_stream\n\n def write_chrom_interval(self, chrom_interval):\n self.__output_stream.write(\"{}\\n\".format(\n bed_line_from_chrom_interval(chrom_interval)))\n\n def write_bed_record(self, bed_record):\n self.__output_stream.write(\"{}\\n\".format(str(bed_record)))\n\n def write_chrom_intervals(self, chrom_intervals):\n for chrom_interval in chrom_intervals:\n self.write_chrom_interval(chrom_interval)\n\n\nclass BEDWriterContextManager(object):\n\n def __init__(self, filename):\n self.filename = filename\n\n def __enter__(self):\n self.fp = open(self.filename, 'w')\n self.vcf_writer = BEDWriter(self.fp)\n return self.vcf_writer\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.fp.close()\n\n\nclass BEDIndexer(TabixIndexer):\n\n def __init__(self, filename):\n TabixIndexer.__init__(self, filename, \"bed\")\n", "id": "1953253", "language": "Python", "matching_score": 2.8422579765319824, "max_stars_count": 8, "path": "python/wecall/bedutils/bedwriter.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.bedutils.bedrecord import BEDRecord\nfrom wecall.bedutils.bedwriter import bed_line_from_chrom_interval, BEDWriter\nfrom wecall.utils.interval import ChromInterval\n\n\nclass MockStream(object):\n def __init__(self):\n self.lines = []\n\n def write(self, line):\n self.lines.append(line)\n\n\nclass TestBedLineFromChromInterval(TestCase):\n def test_should_write_tab_delimited_region(self):\n region = ChromInterval(\"20\", 1, 2)\n\n output_line = bed_line_from_chrom_interval(region)\n self.assertEqual(output_line, \"20\\t1\\t2\")\n\n\nclass TestBEDWriterWritesChromIntervals(TestCase):\n def test_should_write_line_to_stream(self):\n output_stream = MockStream()\n\n writer = BEDWriter(output_stream)\n writer.write_chrom_interval(ChromInterval(\"20\", 1, 2))\n\n # Then\n self.assertEqual(output_stream.lines, [\"20\\t1\\t2\\n\"])\n\n def test_should_write_bed_record_to_stream(self):\n output_stream = MockStream()\n\n writer = BEDWriter(output_stream)\n writer.write_bed_record(BEDRecord('1', 1, 2, None, 5, 'd', 'bah'))\n\n self.assertEqual(output_stream.lines, ['1\\t1\\t2\\t.\\t5\\td\\n'])\n", "id": "2112792", "language": "Python", "matching_score": 1.3474972248077393, "max_stars_count": 8, "path": "test/test_utils/bedutils/test_bedwriter.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom io import StringIO\nimport unittest\nimport datetime\nfrom wecall.vcfutils.schema import Schema\nfrom wecall.vcfutils.writer import encode_VCF_string, VCFWriter\n\n\nclass TestVCFWriter(unittest.TestCase):\n\n def test_should_write_empty_file_containing_expected_version_number(self):\n mock_file = StringIO()\n empty_schema = Schema()\n writer = VCFWriter(mock_file)\n writer.write_header(empty_schema)\n expected_file = '##fileformat=VCFv4.2\\n#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n'\n self.assertEqual(expected_file, mock_file.getvalue())\n\n def test_should_write_file_metadata_in_expected_format(self):\n mock_file = StringIO()\n date = datetime.datetime.utcnow().strftime('%F')\n schema = Schema()\n schema.file_metadata['fileDate'] = date\n\n writer = VCFWriter(mock_file)\n writer.write_header(schema)\n\n expected_file = '##fileformat=VCFv4.2\\n' \\\n '##fileDate={date!s}\\n' \\\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n' \\\n .format(date=date)\n self.assertEqual(expected_file, mock_file.getvalue())\n\n def test_should_write_info_data_in_expected_format(self):\n mock_file = StringIO()\n schema = Schema()\n schema.set_info_data('key', '1', 'String', 'sample info field')\n\n writer = VCFWriter(mock_file)\n writer.write_header(schema)\n\n expected_file = '##fileformat=VCFv4.2\\n' \\\n '##INFO=<ID=key,Number=1,Type=String,Description=\"sample info field\">\\n' \\\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n'\n self.assertEqual(expected_file, mock_file.getvalue())\n\n def test_should_write_sample_data_in_expected_format(self):\n mock_file = StringIO()\n schema = Schema()\n schema.set_sample_data('key', '1', 'String', 'a sample field')\n\n writer = VCFWriter(mock_file)\n writer.write_header(schema)\n\n expected_file = '##fileformat=VCFv4.2\\n' \\\n '##FORMAT=<ID=key,Number=1,Type=String,Description=\"a sample field\">\\n' \\\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n'\n self.assertEqual(expected_file, mock_file.getvalue())\n\n def test_should_write_filter_in_expected_format(self):\n mock_file = StringIO()\n schema = Schema()\n schema.set_filter('key', 'a filter')\n\n writer = VCFWriter(mock_file)\n writer.write_header(schema)\n\n expected_file = '##fileformat=VCFv4.2\\n' \\\n '##FILTER=<ID=key,Description=\"a filter\">\\n' \\\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n'\n self.assertEqual(expected_file, mock_file.getvalue())\n\n def test_should_write_contig_in_expected_format(self):\n mock_file = StringIO()\n schema = Schema()\n schema.set_contig('key', 666)\n\n writer = VCFWriter(mock_file)\n writer.write_header(schema)\n\n expected_file = '##fileformat=VCFv4.2\\n' \\\n '##contig=<ID=key,length=666>\\n' \\\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n'\n self.assertEqual(expected_file, mock_file.getvalue())\n\n def test_should_write_sample_names_in_column_header_line(self):\n mock_file = StringIO()\n schema = Schema()\n schema.samples.append('FOO')\n\n writer = VCFWriter(mock_file)\n writer.write_header(schema)\n\n expected_file = '##fileformat=VCFv4.2\\n' \\\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tFOO\\n'\n self.assertEqual(expected_file, mock_file.getvalue())\n\n\nclass TestVCFStringWriting(unittest.TestCase):\n\n def test_should_encode_empty_VCF_string(self):\n self.assertEqual('\"\"', encode_VCF_string(''))\n\n def test_should_encode_simple_VCF_string(self):\n self.assertEqual('\"foo\"', encode_VCF_string('foo'))\n\n def test_should_encode_VCF_string_with_single_double_quote(self):\n self.assertEqual('\"\\\\\"\"', encode_VCF_string('\"'))\n\n def test_should_encode_VCF_string_with_single_backslash(self):\n self.assertEqual('\"\\\\\\\\\"', encode_VCF_string('\\\\'))\n\n def test_should_encode_complex_VCF_string(self):\n self.assertEqual(\n '\"abc\\\\\\\\def\\\\\\\"ghi\"',\n encode_VCF_string('abc\\\\def\"ghi'))\n", "id": "3393400", "language": "Python", "matching_score": 5.278133869171143, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_writer.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nimport re\nimport unittest\n\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall.vcfutils.parser import VCFReader, VCFReaderContextManager, decode_VCF_string, \\\n parse_VCF_comma_separated_pair_value\nfrom wecall.vcfutils.schema import Schema\nfrom wecall.vcfutils.writer import VCFWriterContextManager\nfrom wecall_test_drivers.base_test import BaseTest\n\n\nclass ParserTest(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.data_dir = os.path.join(os.path.dirname(__file__), \"example_data\")\n\n def variant_is_equal(self, var1, var2):\n self.assertEqual(var1.chrom, var2[0])\n self.assertEqual(var1.pos_from, var2[1])\n self.assertEqual(var1.ids, var2[2])\n self.assertEqual(var1.ref, var2[3])\n self.assertEqual(var1.alt, var2[4])\n\n def test_read_VCF_line(self):\n with open(os.path.join(self.data_dir, \"vcf_example.vcf\"), \"r\") as vcf_file:\n vcf_handler = VCFReader(vcf_file)\n vcf_handler.read_header()\n self.assertEqual(len(vcf_handler.header.file_metadata), 7)\n self.assertEqual(len(vcf_handler.header.samples), 2)\n\n records = list(vcf_handler.read_records())\n self.assertEqual(len(records), 2)\n\n # test first record fully\n self.variant_is_equal(records[0], (\"20\", 9, set(), \"CT\", \"C\")) # zero=based representation\n self.assertEqual(records[0].filters, set())\n self.assertEqual(records[0].passes_filter, True)\n\n self.assertEqual(len(records[0].info), 12)\n self.assertEqual(records[0].info[\"PP\"], [3000])\n self.assertEqual(records[0].info[\"DP\"], [250])\n self.assertEqual(records[0].info[\"DPR\"], [140])\n self.assertEqual(records[0].info[\"DPF\"], [110])\n self.assertEqual(records[0].info[\"VC\"], [100])\n self.assertEqual(records[0].info[\"VCR\"], [49])\n self.assertEqual(records[0].info[\"VCF\"], [51])\n self.assertEqual(records[0].info[\"ABPV\"], [0.2])\n self.assertEqual(records[0].info[\"SBPV\"], [0.3])\n self.assertEqual(records[0].info[\"MQ\"], [70])\n self.assertEqual(records[0].info[\"BR\"], [31])\n self.assertEqual(records[0].info[\"QD\"], [None])\n\n self.assertEqual(records[0].samples, ['sample1', 'sample2'])\n self.assertEqual(records[0].sample_info.get_field('sample1', \"GT\"), GenotypeCall(\"0/1\"))\n self.assertEqual(records[0].sample_info.get_field('sample2', \"GT\"), GenotypeCall(\"1/1\"))\n\n self.assertEqual(records[0].sample_info.get_field('sample1', 'PL'), [3000, 0, 3000])\n self.assertEqual(records[0].sample_info.get_field('sample2', 'PL'), [114, 0, 0])\n\n self.assertEqual(records[0].sample_info.get_field('sample1', 'GQ'), [1000])\n self.assertEqual(records[0].sample_info.get_field('sample2', 'GQ'), [None])\n\n # check that ordering in the dictionaries is preserved\n expected_keys = [\"PP\", \"DP\", \"DPR\", \"DPF\", \"VC\", \"VCR\",\n \"VCF\", \"ABPV\", \"SBPV\", \"MQ\", \"BR\", \"QD\"]\n\n self.assertEqual(list(records[0].info.keys()), expected_keys)\n\n # ensure last record is still being read correctly\n self.variant_is_equal(records[-1], (\"20\", 10, set(), \"T\", \"G\"))\n\n def test_reads_simple_file(self):\n filename = os.path.join(self.work_dir, \"test.vcf\")\n\n with VCFWriterContextManager(filename) as left_vcf:\n left_vcf.write_variant(Variant(\"1\", 1, \"A\", \"T\"))\n left_vcf.write_variant(Variant(\"2\", 1, \"A\", \"T\"))\n left_vcf.write_variant(Variant(\"10\", 1, \"A\", \"T\"))\n\n expected_variants = [\n Variant(\"1\", 1, \"A\", \"T\"),\n Variant(\"2\", 1, \"A\", \"T\"),\n Variant(\"10\", 1, \"A\", \"T\"),\n ]\n\n with VCFReaderContextManager(filename) as vcf_reader:\n actual_variants = [record.variant for record in vcf_reader.read_records()]\n\n self.assertEqual(expected_variants, actual_variants)\n\n\nclass TestVCFStringParsing(unittest.TestCase):\n\n def test_should_decode_empty_VCF_string(self):\n self.assertEqual('', decode_VCF_string('\"\"'))\n\n def test_should_decode_simple_VCF_string(self):\n self.assertEqual('foo', decode_VCF_string('\"foo\"'))\n\n def test_should_decode_VCF_string_with_single_double_quote(self):\n self.assertEqual('\"', decode_VCF_string('\"\\\\\"\"'))\n\n def test_should_decode_VCF_string_with_single_backslash(self):\n self.assertEqual('\\\\', decode_VCF_string('\"\\\\\\\\\"'))\n\n def test_should_decode_complex_VCF_string(self):\n self.assertEqual(\n 'abc\\\\def\"ghi',\n decode_VCF_string('\"abc\\\\\\\\def\\\\\\\"ghi\"'))\n\n def test_should_fail_to_decode_unquoted_string(self):\n with self.assertRaisesRegex(Exception, 'expected a VCF encoded string: \\'foo\\''):\n print(decode_VCF_string('foo'))\n\n def test_should_fail_to_decode_string_with_stray_backslash(self):\n with self.assertRaisesRegex(Exception, re.escape('expected a VCF encoded string: \\'\"\\\\\\\\\"\\'')):\n print(decode_VCF_string('\"\\\\\"'))\n\n def test_should_fail_to_decode_string_with_unencoded_double_quote(self):\n with self.assertRaisesRegex(Exception, 'expected a VCF encoded string: \\'\"\\\"\"\\''):\n print(decode_VCF_string('\"\\\"\"'))\n\n\nclass TestCommaSeparatedPairParser(unittest.TestCase):\n\n def test_should_parse_simple_comma_separated_pairs(self):\n parsed = parse_VCF_comma_separated_pair_value('<first=foo,second=bar>')\n expected = {'first': 'foo', 'second': 'bar'}\n self.assertEqual(expected, parsed)\n\n def test_should_parse_empty_simple_value(self):\n parsed = parse_VCF_comma_separated_pair_value('<first=,second=bar>')\n expected = {'first': '', 'second': 'bar'}\n self.assertEqual(expected, parsed)\n\n def test_should_fail_to_parse_non_bracketed_string(self):\n with self.assertRaisesRegex(Exception, 'expected braced key-value pairs: \\'first=foo\\''):\n print(parse_VCF_comma_separated_pair_value('first=foo'))\n\n def test_should_parse_quoted_comma_separated_pairs(self):\n parsed = parse_VCF_comma_separated_pair_value(\n '<first=\"foo\",second=\"bar\">')\n expected = {'first': '\"foo\"', 'second': '\"bar\"'}\n self.assertEqual(expected, parsed)\n\n def test_should_parse_empty_quoted_value(self):\n parsed = parse_VCF_comma_separated_pair_value('<first=\"\">')\n expected = {'first': '\"\"'}\n self.assertEqual(expected, parsed)\n\n def test_should_parse_values_with_quoted_commas(self):\n parsed = parse_VCF_comma_separated_pair_value('<first=\"foo,bar\">')\n expected = {'first': '\"foo,bar\"'}\n self.assertEqual(expected, parsed)\n\n def test_should_parse_values_with_quoted_double_quote(self):\n parsed = parse_VCF_comma_separated_pair_value('<first=\"foo\\\\\\\"bar\">')\n expected = {'first': '\"foo\\\\\\\"bar\"'}\n self.assertEqual(expected, parsed)\n\n def test_should_fail_with_badly_quoted_double_quote(self):\n with self.assertRaisesRegex(Exception, 'failed to parse key-value pairs from \\'<first=\"foo\\\"bar\">\\''):\n print(parse_VCF_comma_separated_pair_value('<first=\"foo\\\"bar\">'))\n\n\nclass TestHeaderParsing(unittest.TestCase):\n\n # version parsing\n\n def test_should_parse_well_formatted_version(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n self.assertEqual(expected, header)\n\n def test_should_store_header_as_attribute_of_parser(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n self.assertEqual(header, reader.header)\n\n def test_should_fail_with_unexpected_version(self):\n lines = [\n '##fileformat=VCFv0.0\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(Exception, 'unexpected version: \\'0.0\\''):\n print(reader.read_header())\n\n def test_should_fail_to_parse_malformed_header_line(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##malformed line!\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(Exception, 'failed to parse header line: \\'##malformed line!\\''):\n print(reader.read_header())\n\n def test_should_fail_if_version_is_not_defined(self):\n lines = [\n '##notFileformat=foo\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(Exception, 'unrecognised file format line: \\'##notFileformat=foo\\''):\n print(reader.read_header())\n\n # file metadata parsing\n\n def test_should_parse_well_formatted_file_metadata(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##fileDate=2013-07-08\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.file_metadata['fileDate'] = '2013-07-08'\n self.assertEqual(expected, header)\n\n # info data parsing\n\n def test_should_parse_minimal_info_header_fields(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##INFO=<ID=key,Number=1,Type=String,Description=\"description\">\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.set_info_data('key', '1', 'String', 'description')\n self.assertEqual(expected, header)\n\n def test_should_parse_all_info_header_fields(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##INFO=<ID=key,Number=1,Type=String,Description=\"description\",Source=\"foo\",Version=\"bar\">\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.set_info_data(\n 'key',\n '1',\n 'String',\n 'description',\n 'foo',\n 'bar')\n self.assertEqual(expected, header)\n\n # sample data parsing\n\n def test_should_parse_valid_sample_header_fields(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##FORMAT=<ID=key,Number=1,Type=String,Description=\"description\">\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.set_sample_data('key', '1', 'String', 'description')\n self.assertEqual(expected, header)\n\n # filter parsing\n\n def test_should_parse_valid_filter_header_fields(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##FILTER=<ID=key,Description=\"description\">\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.set_filter('key', 'description')\n self.assertEqual(expected, header)\n\n # contig parsing\n\n def test_should_parse_valid_contig_header_fields(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '##contig=<ID=key,length=666>\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.set_contig('key', 666)\n self.assertEqual(expected, header)\n\n # column headers + sample names\n\n def test_should_parse_required_column_headers(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n self.assertEqual(expected, header)\n\n def test_should_fail_without_required_column_headers(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(\n Exception,\n re.escape(\"expected column header line: '#CHROM\\\\tPOS\\\\tID\\\\tREF\\\\tALT\\\\tQUAL\\\\tFILTER'\")\n ):\n print(reader.read_header())\n\n def test_should_parse_column_headers_with_format_but_no_samples(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n self.assertEqual(expected, header)\n\n def test_should_parse_column_headers_with_complex_sample_names(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tOWEN_TOBY-RHYS.JONES\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.samples = ['OWEN_TOBY-RHYS.JONES']\n self.assertEqual(expected, header)\n\n def test_should_not_parse_column_headers_with_sample_names_containing_white_space(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tOWEN JONES\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(\n Exception,\n re.escape(\n 'expected column header line: '\n '\\'#CHROM\\\\tPOS\\\\tID\\\\tREF\\\\tALT\\\\tQUAL\\\\tFILTER\\\\tINFO\\\\tFORMAT\\\\tOWEN JONES\\''\n )\n ):\n print(reader.read_header())\n\n def test_should_fail_with_malformed_format_column_header(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFOO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(\n Exception,\n re.escape('expected column header line: \\'#CHROM\\\\tPOS\\\\tID\\\\tREF\\\\tALT\\\\tQUAL\\\\tFILTER\\\\tINFO\\\\tFOO\\'')\n ):\n print(reader.read_header())\n\n def test_should_parse_column_headers_with_samples(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tFOO\\tBAR\\n',\n ]\n reader = VCFReader(iter(lines))\n\n header = reader.read_header()\n\n expected = Schema()\n expected.samples.append('FOO')\n expected.samples.append('BAR')\n self.assertEqual(expected, header)\n\n def test_should_fail_if_column_header_line_is_missing(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n 'the line after the header\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(Exception, 'expected column header line: \\'the line after the header\\''):\n print(reader.read_header())\n\n def test_should_fail_on_unexpected_EOF(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n ]\n reader = VCFReader(iter(lines))\n\n with self.assertRaisesRegex(Exception, 'unexpected EOF'):\n print(reader.read_header())\n\n\nclass TestRecordParsing(unittest.TestCase):\n\n # version parsing\n\n def test_should_parse_single_record(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n 'chr0\\t0\\t.\\tP\\tQ\\t0\\tPASS\\t\\n',\n ]\n reader = VCFReader(iter(lines))\n\n record_count = len(list(reader.read_records()))\n\n self.assertEqual(1, record_count)\n\n def test_should_parse_header_when_parsing_records(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n 'chr0\\t0\\t.\\tP\\tQ\\t0\\tPASS\\t\\n',\n ]\n reader = VCFReader(iter(lines))\n\n self.assertIsNone(reader.header)\n list(reader.read_records())\n self.assertIsNotNone(reader.header)\n\n def test_should_parse_empty_file(self):\n lines = [\n '##fileformat=VCFv4.2\\n',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\n',\n ]\n reader = VCFReader(iter(lines))\n\n record_count = len(list(reader.read_records()))\n\n self.assertEqual(0, record_count)\n", "id": "1108619", "language": "Python", "matching_score": 4.57262659072876, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_parser.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n\nimport datetime\nimport gzip\nimport re\nfrom wecall.genomics.chromosome import chromosome_comp\nfrom wecall.utils.tabix_wrapper import TabixWrapper\nimport os\nimport wecall.vcfutils.schema\nimport wecall.vcfutils.record\nfrom wecall.utils.interval import WHOLE_GENOME\n\n\nADAPTER_KEY = 'ADAPTER'\n\n\ndef read_records_functor(vcf_handler):\n return vcf_handler.read_records\n\n\ndef read_variants_functor(vcf_handler):\n\n def read_variants(*args, **kwargs):\n return (\n record.variant for record in vcf_handler.read_records(\n *args, **kwargs))\n\n return read_variants\n\n\nclass VCFReader(object):\n\n def __init__(self, input_stream):\n self.header = None\n self.__input_stream = input_stream\n self.__next_line = None # one line of lookahead\n self.__lines = self.__readline()\n\n def read_header(self):\n header = wecall.vcfutils.schema.Schema()\n\n # get the version from a VCF file\n line = next(self.__lines).strip()\n version_match = re.match(\n '^##fileformat=VCFv(?P<version>[\\d\\.]+)$', line)\n if version_match:\n version = version_match.group('version')\n else:\n raise Exception('unrecognised file format line: {!r}'.format(line))\n\n if version not in {\n '4.0',\n '4.1',\n '4.2'}: # replace this test with a layer of indirection\n raise Exception('unexpected version: {!r}'.format(version))\n\n header.set_vcf_format(version)\n\n # parse a VCF4.2 format header:\n header_regex = re.compile('^##(?P<key>[-_~\\w\\d.]+)=(?P<value>.*)$')\n while True:\n line = next(self.__lines, None)\n if line is None:\n break\n line = line.strip()\n header_match = header_regex.match(line)\n if header_match:\n key, value = header_match.group('key', 'value')\n if key == 'INFO': # replace if-elif-else with layer of indirection\n data = parse_VCF_comma_separated_pair_value(value)\n source = decode_VCF_string(\n data['Source']) if 'Source' in data else None\n version = decode_VCF_string(\n data['Version']) if 'Version' in data else None\n header.set_info_data(\n data['ID'],\n data['Number'],\n data['Type'],\n decode_VCF_string(data['Description']),\n source,\n version,\n )\n elif key == 'FORMAT':\n data = parse_VCF_comma_separated_pair_value(value)\n header.set_sample_data(\n data['ID'],\n data['Number'],\n data['Type'],\n decode_VCF_string(data['Description']),\n )\n elif key == 'FILTER':\n data = parse_VCF_comma_separated_pair_value(value)\n header.set_filter(\n data['ID'],\n decode_VCF_string(data['Description']),\n )\n elif key == 'contig':\n data = parse_VCF_comma_separated_pair_value(value)\n header.set_contig(\n data['ID'],\n get_int_or_none(data, 'length'),\n )\n elif key == ADAPTER_KEY:\n data = parse_VCF_comma_separated_pair_value(value)\n if 'ID' in data:\n try:\n date = datetime.datetime.strptime(\n data['date'], '%Y-%m-%dT%H:%M:%S')\n except ValueError:\n date = datetime.datetime.strptime(\n data['date'], '%Y-%m-%d')\n header.set_adapter(data['ID'], data['hash'], date)\n else:\n header.set_adapter(\n data['adapters'],\n data['githash'],\n datetime.datetime.strptime(\n data['date'],\n '%Y-%m-%d'),\n )\n else: # general file metadata\n header.file_metadata[key] = value\n elif line.startswith('##'):\n raise Exception(\n 'failed to parse header line: {!r}'.format(line))\n else:\n break\n\n if line is None:\n raise Exception('unexpected EOF')\n\n column_match = re.match(\n '^#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO(?:\\tFORMAT(?P<samples>(?:\\t\\S+)*))?$',\n line)\n if column_match:\n sample_group = column_match.group('samples')\n if sample_group is not None:\n samples = [_f for _f in sample_group.split('\\t') if _f]\n header.samples.extend(samples)\n else:\n raise Exception('expected column header line: {!r}'.format(line))\n\n self.header = header\n return header\n\n def read_records(self):\n if self.header is None:\n self.header = self.read_header()\n for line_number, line in enumerate(self.__readline()):\n try:\n if line is None:\n raise StopIteration()\n for rec in wecall.vcfutils.record.read_records(\n self.header, line):\n yield rec\n except Exception as e:\n raise type(e)(\n \"Error processing VCF data line {}: {}\".format(\n line_number + 1, e))\n\n def __readline(self):\n for line in self.__input_stream:\n prevline = self.__next_line\n self.__next_line = line\n if prevline:\n yield prevline\n line, self.__next_line = self.__next_line, None\n yield line\n\n\ndef get_int_or_none(map, key):\n try:\n return int(map[key])\n except KeyError:\n return None\n\n\ndef parse_VCF_comma_separated_pair_value(value):\n braced_value_regex = re.compile('^<(?P<value>.*)>$')\n # value ::= quoted value | non-quoted value\n # quoted value ::= '\"' ( escaped character | not('\"') ) * '\"'\n # non-quoted value ::= ( not('\"', ',') not(',') * ) ?\n comma_separated_pair_regex = re.compile(\n '^(?P<key>[^=]+)=(?P<value>(?:\"(?:(?:\\\\\\\\.)|[^\"])*\")|(?:[^,\"][^,]*)?),?')\n info_match = braced_value_regex.match(value)\n if info_match:\n stripped_value = info_match.group('value')\n data = {}\n end = 0\n while end != len(stripped_value):\n csp_match = comma_separated_pair_regex.match(stripped_value[end:])\n if csp_match:\n end += len(csp_match.group(0))\n data[csp_match.group('key')] = csp_match.group('value')\n else:\n raise ValueError(\n 'failed to parse key-value pairs from {!r}'.format(value))\n return data\n else:\n raise ValueError('expected braced key-value pairs: {!r}'.format(value))\n\n\ndef decode_VCF_string(string):\n if not re.match('^\"(?:[^\"\\\\\\\\]|\\\\\\\\\"|\\\\\\\\\\\\\\\\)*\"$', string):\n raise ValueError('expected a VCF encoded string: {!r}'.format(string))\n else:\n return string[1:-1].replace('\\\\\"', '\"').replace('\\\\\\\\', '\\\\')\n\n\nclass RecordAndKey(object):\n\n def __init__(self, record, key):\n self.key = key\n self.record = record\n\n def __repr__(self):\n return \"<Key={}, Record={!r}\".format(self.key, self.record.variant)\n\n def __eq__(self, other):\n return self.record == other.record and self.key == other.key\n\n def __lt__(self, other):\n if self.record == other.record:\n return self.key < other.key\n else:\n return self.record < other.record\n\n def __gt__(self, other):\n if self.record == other.record:\n return self.key > other.key\n else:\n return self.record > other.record\n\n\nclass VCFReaderContextManager(object):\n\n def __init__(self, filename):\n self.filename = filename\n\n def __enter__(self):\n if os.path.splitext(self.filename)[1] == \".gz\":\n self.fp = gzip.open(self.filename, \"rt\")\n else:\n self.fp = open(self.filename, \"r\")\n self.vcf_reader = VCFReader(self.fp)\n return self.vcf_reader\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.fp.close()\n", "id": "1862073", "language": "Python", "matching_score": 3.9952597618103027, "max_stars_count": 8, "path": "python/wecall/vcfutils/parser.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n\n\nimport datetime\nfrom wecall.vcfutils.record import vcf_row_from_record, vcf_row_from_variant\nfrom wecall.vcfutils.schema import Schema\nfrom wecall.vcfutils.parser import ADAPTER_KEY\n\n\ndef write_record_functor(vcf_handler):\n return vcf_handler.write_record\n\n\ndef write_variant_functor(vcf_handler):\n return vcf_handler.write_variant\n\n\nclass VCFWriter(object):\n\n def __init__(self, stream):\n self.__fp = stream\n\n def write_header(self, header):\n # TODO: handle format version number in a sensible way\n print(\n '##fileformat=VCFv{}'.format(\n \"4.2\" if header.vcf_format is None else header.vcf_format),\n file=self.__fp)\n for key, value in list(header.file_metadata.items()):\n print(\n '##{key!s}={value!s}'.format(\n key=key,\n value=value),\n file=self.__fp)\n for key, value in header.iter_info_data():\n info_items = [\n 'ID={}'.format(key),\n 'Number={}'.format(value.number),\n 'Type={}'.format(value.data_type),\n 'Description={}'.format(encode_VCF_string(value.description)),\n ]\n if value.source is not None:\n info_items.append(\n 'Source={}'.format(\n encode_VCF_string(\n value.source)))\n if value.source is not None:\n info_items.append(\n 'Version={}'.format(\n encode_VCF_string(\n value.version)))\n print('##INFO=<{}>'.format(','.join(info_items)), file=self.__fp)\n for key, value in header.iter_sample_data():\n print(\n '##FORMAT=<ID={},Number={},Type={},Description={}>'.format(\n key,\n value.number,\n value.data_type,\n encode_VCF_string(\n value.description)),\n file=self.__fp)\n for key, value in header.iter_filters():\n print('##FILTER=<ID={},Description={}>'.format(\n key, encode_VCF_string(value.description)\n ), file=self.__fp)\n for key, value in header.iter_contigs():\n print('##contig=<ID={},length={}>'.format(\n key, value.length\n ), file=self.__fp)\n for adapter in header.iter_adapters():\n print(\n '##{}=<ID={},date={},hash={}>'.format(\n ADAPTER_KEY,\n adapter.adapter,\n adapter.date.strftime('%F'),\n adapter.hash),\n file=self.__fp)\n columns = [\n 'CHROM',\n 'POS',\n 'ID',\n 'REF',\n 'ALT',\n 'QUAL',\n 'FILTER',\n 'INFO']\n if header.samples:\n columns.append('FORMAT')\n columns.extend(header.samples)\n print('#' + '\\t'.join(columns), file=self.__fp)\n\n def write_variant(self, var, *args, **kwargs):\n self.__write(vcf_row_from_variant, var, *args, **kwargs)\n\n def write_record(self, rec, *args, **kwargs):\n self.__write(vcf_row_from_record, rec, *args, **kwargs)\n\n def write_variants(self, record_stream):\n for var in record_stream:\n self.write_variant(var)\n\n def write_records(self, record_stream):\n for rec in record_stream:\n self.write_record(rec)\n\n def __write(self, rec_type_formatter, rec_ob, *args, **kwargs):\n self.__fp.write(\n \"{}\\n\".format(\n rec_type_formatter(\n rec_ob,\n *args,\n **kwargs)))\n\n\ndef encode_VCF_string(string):\n return '\"' + string.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"') + '\"'\n\n\nclass VCFWriterContextManager(object):\n\n def __init__(self, filename, header=None):\n self.filename = filename\n self.header = header\n\n def __enter__(self):\n self.fp = open(self.filename, 'w')\n if self.header is None:\n self.header = Schema()\n self.header.file_metadata['fileDate'] = datetime.date.today(\n ).strftime('%F')\n self.vcf_writer = VCFWriter(self.fp)\n self.vcf_writer.write_header(self.header)\n return self.vcf_writer\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.fp.close()\n", "id": "10866736", "language": "Python", "matching_score": 1.7216434478759766, "max_stars_count": 8, "path": "python/wecall/vcfutils/writer.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport wecall.genomics.variant\nimport wecall.vcfutils.parser\n\n\nclass VariantCallSet(object):\n \"\"\"\n Variant Call container to be used ONLY when call set is small. Not in benchmark.\n \"\"\"\n\n def __init__(self, test_case):\n self.__test_case = test_case\n self.__vcf_header = None\n self.__vcf_variants = {}\n\n def __len__(self):\n return len(self.__vcf_variants)\n\n def get_variants(self):\n return set(self.__vcf_variants.keys())\n\n def get_variant_records(self):\n return self.__vcf_variants\n\n def get_variants_with_genotypes(self):\n variants_with_genotypes = {}\n for variant, record in self.__vcf_variants.items():\n variants_with_genotypes[variant] = record.genotypes\n\n return variants_with_genotypes\n\n def add_vcf_variants(self, output_vcf_path):\n with wecall.vcfutils.parser.VCFReaderContextManager(output_vcf_path) as vcf_reader:\n vcf_reader.read_header()\n self.__vcf_header = vcf_reader.header\n for record in vcf_reader.read_records():\n self.__test_case.assertFalse(\n record.variant in self.__vcf_variants,\n \"Repeated variants in VCF.\")\n self.__vcf_variants[record.variant] = record\n\n # ensure genotype likelihoods are within range throughout\n for record in self.__vcf_variants.values():\n for sample_name in record.sample_info.get_sample_names():\n try:\n for GL_value in record.sample_info.get_field(\n sample_name, 'GL'):\n self.__test_case.assertTrue(GL_value <= 0.0)\n except KeyError:\n pass\n\n def expect_info_header(\n self,\n ID,\n number=None,\n data_type=None,\n description=None):\n self.__test_case.assertIn(\n ID, {key for key, _ in self.__vcf_header.iter_info_data()})\n item = self.__vcf_header.get_info_data(ID)\n if number is not None:\n self.__test_case.assertEqual(number, item.number)\n if data_type is not None:\n self.__test_case.assertEqual(data_type, item.data_type)\n if description is not None:\n self.__test_case.assertEqual(description, item.description)\n return self\n\n def assertVariantNotCalled(self, chrom, vcf_pos, ref, alt):\n expected_variant = self.__build_variant(chrom, vcf_pos, ref, alt)\n self.__test_case.assertNotIn(expected_variant, self.__vcf_variants)\n\n def __assertVariantCalled(self, chrom, vcf_pos, ref, alt):\n expected_variant = self.__build_variant(chrom, vcf_pos, ref, alt)\n self.__test_case.assertIn(expected_variant, self.__vcf_variants)\n return self.__vcf_variants[expected_variant]\n\n def expect_variant(\n self,\n variant,\n filters=None,\n variant_ids=None,\n require_info_annotations=None,\n require_info_annotation_keys=None,\n missing_info_annotations=None,\n ):\n self.assertVariantCalledWithMetadata(\n variant.chrom,\n variant.one_indexed_pos_from,\n variant.ref,\n variant.alt,\n filters,\n variant_ids,\n require_info_annotations,\n require_info_annotation_keys,\n missing_info_annotations)\n return self\n\n def assertVariantCalledWithMetadata(\n self, chrom, vcf_pos, ref, alt,\n filters=None,\n variant_ids=None,\n require_info_annotations=None,\n require_info_annotation_keys=None,\n missing_info_annotations=None,\n ):\n generated_record = self.__assertVariantCalled(chrom, vcf_pos, ref, alt)\n if filters is not None:\n self.__check_filters(generated_record, filters)\n if variant_ids is not None:\n self.__check_id(generated_record, variant_ids)\n if require_info_annotation_keys is not None:\n self.__check_required_annotation_keys(\n generated_record.info, require_info_annotation_keys)\n if require_info_annotations is not None:\n self.__check_required_annotations(\n generated_record.info, require_info_annotations)\n if missing_info_annotations is not None:\n self.__check_missing_annotations(\n generated_record.info, missing_info_annotations)\n\n def assertVariantCalledWithSampleMetadata(\n self, chrom, vcf_pos, ref, alt,\n sample_name,\n require_format_annotations=None,\n require_format_annotation_keys=None,\n missing_format_annotations=None\n ):\n generated_record = self.__assertVariantCalled(chrom, vcf_pos, ref, alt)\n self.__test_case.assertTrue(\n generated_record.sample_info.has_sample(sample_name))\n sample_data = generated_record.sample_info.get_genotype_data(\n sample_name)\n\n if require_format_annotation_keys is not None:\n self.__check_required_annotation_keys(\n sample_data, require_format_annotation_keys)\n if require_format_annotations is not None:\n self.__check_required_annotations(\n sample_data, require_format_annotations)\n if missing_format_annotations is not None:\n self.__check_missing_annotations(\n sample_data, missing_format_annotations)\n\n def __check_filters(self, record, filters):\n self.__test_case.assertEqual(record.filters, set(filters))\n\n def __check_id(self, record, variant_ids):\n self.__test_case.assertEqual(record.ids, variant_ids)\n if variant_ids != set():\n self.__test_case.assertIn(\"CV\", record.info)\n\n def __check_required_annotations(self, data, annotations):\n self.__test_case.assertDictContainsSubset(annotations, data)\n\n def __check_missing_annotations(self, data, annotations):\n for item in annotations:\n self.__test_case.assertNotIn(item, data)\n\n def __check_required_annotation_keys(self, data, annotations):\n for item in annotations:\n self.__test_case.assertIn(item, data)\n\n def __build_variant(self, chrom, vcf_pos, ref, alt):\n zero_indexed_pos = vcf_pos - 1\n return wecall.genomics.variant.Variant(\n chrom, zero_indexed_pos, ref, alt)\n", "id": "1542605", "language": "Python", "matching_score": 4.395876884460449, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/variant_callset.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nfrom wecall.genomics.variant import Variant\nfrom wecall.utils.interval import ChromInterval\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall.vcfutils.parser import VCFReaderContextManager\n\nref_alt = \"<NON_REF>\"\n\n\nclass VCFExpectation(object):\n\n def __init__(self, test_case, path):\n self.__test_case = test_case\n self.__path = path\n self.__test_case.assertTrue(os.path.exists(self.__path))\n with VCFReaderContextManager(self.__path) as vcf_reader:\n self.__schema = vcf_reader.read_header()\n self.__records = list(vcf_reader.read_records())\n\n # ensure genotype likelihoods are within range throughout\n for record in self.__records:\n for sample_name in record.sample_info.get_sample_names():\n try:\n for GL_value in record.sample_info.get_field(\n sample_name, 'GL'):\n self.__test_case.assertTrue(GL_value <= 0.0)\n except KeyError:\n pass\n\n def __eq__(self, other):\n return self.__records == other.__records\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def record_count(self, expected):\n self.__test_case.assertEqual(expected, len(self.__records))\n return self\n\n def has_info_meta_data(self, key):\n self.__test_case.assertIn(\n key, {key for key, value in self.__schema.iter_info_data()})\n return VCFInfoMetadataExpectation(\n self.__test_case, self.__schema.get_info_data(key))\n\n def has_filter(self, key):\n self.__test_case.assertIn(\n key, {key for key, value in self.__schema.iter_filters()})\n return VCFFilterMetadataExpectation(\n self.__test_case, self.__schema.get_filter(key))\n\n def has_reference_calls_for_region(self, chrom, start, end):\n return self.has_reference_calls(ChromInterval(chrom, start, end))\n\n def has_reference_calls(self, chrom_interval):\n records = {record.variant: record for record in self.__records if record.variant.overlap(\n chrom_interval)}\n # check all are ref calls.\n current_position = chrom_interval.start\n for variant in sorted(records.keys()):\n self.__test_case.assertEqual(variant.pos_from, current_position)\n self.__test_case.assertEqual(variant.alt, ref_alt)\n record = records[variant]\n info_expectation = VCFRecordExpectation(\n self.__test_case, record).with_info()\n\n info_expectation.with_field(\"BEG\", [current_position + 1])\n current_end = record.info[\"END\"][0]\n info_expectation.with_field(\n \"LEN\", [current_end - current_position])\n\n # Due to mix of closed intervals and 1 --> 0 indexing changes this\n # is true!\n current_position = current_end\n self.__test_case.assertEqual(chrom_interval.end, current_position)\n\n def has_record(self, chrom, pos, ref, alt):\n return self.has_record_for_variant(Variant(chrom, pos, ref, alt))\n\n def has_record_for_variant(self, variant):\n records = {record.variant: record for record in self.__records}\n self.__test_case.assertIn(variant, records.keys())\n return VCFRecordExpectation(self.__test_case, records[variant])\n\n def missing_record_for_variant(self, variant):\n self.__test_case.assertNotIn(\n variant, {record.variant for record in self.__records})\n return self\n\n def has_record_for_variants(self, *variants):\n records = {record.variant: record for record in self.__records}\n records_to_check = list()\n for variant in variants:\n self.__test_case.assertIn(variant, records)\n records_to_check.append(records[variant])\n return VCFRecordListExpectation(\n self.__test_case, [\n VCFRecordExpectation(\n self.__test_case, record) for record in records_to_check])\n\n def with_samples(self, sample_names):\n self.__test_case.assertEqual(sample_names, self.__schema.samples)\n return self\n\n\nclass VCFRecordListExpectation(object):\n def __init__(self, test_case, records):\n self.__test_case = test_case\n self.__record_expectations = records\n\n def with_sample(self, sample_name):\n sample_expectations = [record_expecation.with_sample(\n sample_name) for record_expecation in self.__record_expectations]\n return VCFSampleDataListExpectation(\n self.__test_case, sample_expectations)\n\n\nclass VCFRecordExpectation(object):\n def __init__(self, test_case, record):\n self.__test_case = test_case\n self.__record = record\n\n def with_sample(self, sample_name):\n self.__test_case.assertTrue(\n self.__record.sample_info.has_sample(sample_name))\n return VCFSampleDataExpectation(\n self.__test_case,\n self.__record.sample_info,\n sample_name)\n\n def with_from_multi_allelic_record(self, expected=True):\n self.__test_case.assertEqual(expected, self.__record.from_multi_alt)\n return self\n\n def with_sample_info(self, expected_sample_info):\n self.__test_case.assertEqual(\n expected_sample_info,\n self.__record.sample_info)\n return self\n\n def with_info_data(self, expected_info):\n self.__test_case.assertEqual(expected_info, self.__record.info)\n return self\n\n def with_quality(self, expected_quality):\n self.__test_case.assertEqual(expected_quality, self.__record.quality)\n return self\n\n def with_no_filters(self):\n return self.with_filters(set())\n\n def with_filters(self, expected_filters):\n self.__test_case.assertEqual(expected_filters, self.__record.filters)\n return self\n\n def with_ids(self, expected_ids):\n self.__test_case.assertEqual(expected_ids, self.__record.ids)\n return self\n\n def with_info(self):\n return VCFInfoFieldExpectation(self.__test_case, self.__record.info)\n\n\nclass VCFInfoFieldExpectation(object):\n def __init__(self, test_case, info_data):\n self.__test_case = test_case\n self.__info_data = info_data\n\n def with_field(self, key, values):\n self.__test_case.assertEqual(values, self.__info_data[key])\n return self\n\n def has_keys(self, *expected_keys):\n self.__test_case.assertEqual(\n set(expected_keys), set(\n self.__info_data.keys()))\n return self\n\n\nclass VCFFilterMetadataExpectation(object):\n\n def __init__(self, test_case, filter_meta_data):\n self.__test_case = test_case\n self.__filter_meta_data = filter_meta_data\n\n def has_description(self, expected_description):\n self.__test_case.assertEqual(\n expected_description,\n self.__filter_meta_data.description)\n return self\n\n\nclass VCFInfoMetadataExpectation(object):\n\n def __init__(self, test_case, info_meta_data):\n self.__test_case = test_case\n self.__info_meta_data = info_meta_data\n\n def has_description(self, expected_description):\n self.__test_case.assertEqual(\n expected_description,\n self.__info_meta_data.description)\n return self\n\n def has_data_type(self, expected_type):\n self.__test_case.assertEqual(\n expected_type, self.__info_meta_data.data_type)\n return self\n\n\nclass VCFSampleDataListExpectation(object):\n def __init__(self, test_case, sample_data_expectations):\n self.__test_case = test_case\n self.__sample_data_expectations = sample_data_expectations\n\n def has_phased_genotypes(self, *genotype_strings):\n actual_genotypes = tuple(str(sample_data_expectation.get_genotype(\n )) for sample_data_expectation in self.__sample_data_expectations)\n actual_reverse_genotypes = tuple(\n str(sample_data_expectation.get_genotype())[::-1]\n for sample_data_expectation in self.__sample_data_expectations\n )\n\n expected_genotypes = tuple(genotype_strings)\n expected_reverse_genotypes = tuple(\n genotype_string[::-1] for genotype_string in genotype_strings\n )\n assert(len(actual_genotypes) == len(expected_genotypes))\n self.__test_case.assertEqual(\n {expected_genotypes, expected_reverse_genotypes},\n {actual_genotypes, actual_reverse_genotypes}\n )\n return self\n\n def has_exact_phased_genotypes(self, *genotype_strings):\n actual_genotypes = tuple(str(sample_data_expectation.get_genotype(\n )) for sample_data_expectation in self.__sample_data_expectations)\n\n expected_genotypes = tuple(genotype_strings)\n assert(len(actual_genotypes) == len(expected_genotypes))\n self.__test_case.assertEqual(expected_genotypes, actual_genotypes)\n return self\n\n def has_phase_set_id(self, expected):\n for sample_data_expectation in self.__sample_data_expectations:\n sample_data_expectation.has_phase_set_id(expected)\n return self\n\n def has_phase_set_quality(self, expected):\n for sample_data_expectation in self.__sample_data_expectations:\n sample_data_expectation.has_phase_set_quality(expected)\n return self\n\n\nclass VCFSampleDataExpectation(object):\n\n phred_likelihood_key = 'PL'\n read_depth_key = 'DP'\n min_read_depth_key = 'MIN_DP'\n allelic_depth_key = 'AD'\n phase_set_key = 'PS'\n phase_set_quality = 'PQ'\n variant_allelic_frequency_key = 'VAF'\n\n def __init__(self, test_case, sample_data, sample_name):\n self.__test_case = test_case\n self.__sample_name = sample_name\n self.__sample_data = sample_data\n\n def get_genotype(self):\n genotypes = self.__sample_data.genotypes()\n self.__test_case.assertIn(self.__sample_name, genotypes)\n return genotypes[self.__sample_name]\n\n def has_phase_set_id(self, expected_id):\n self.__test_case.assertTrue(\n self.__sample_data.has_genotype_key(\n self.phase_set_key))\n self.__test_case.assertEqual(\n [expected_id], self.__sample_data.get_field(\n self.__sample_name, self.phase_set_key))\n return self\n\n def has_phase_set_quality(self, expected):\n self.__test_case.assertTrue(\n self.__sample_data.has_genotype_key(\n self.phase_set_quality))\n self.__test_case.assertEqual(\n [expected], self.__sample_data.get_field(\n self.__sample_name, self.phase_set_quality))\n return self\n\n def has_phased_genotype(self, genotype_string):\n actual_genotype_call = self.get_genotype()\n expected_genotype_call = GenotypeCall(genotype_string)\n\n self.__test_case.assertEqual(\n expected_genotype_call,\n actual_genotype_call)\n self.__test_case.assertEqual(\n expected_genotype_call.phased,\n actual_genotype_call.phased)\n return self\n\n def has_genotype(self, genotype_string):\n actual_genotype_call = self.get_genotype()\n expected_genotype_call = GenotypeCall(genotype_string)\n\n self.__test_case.assertEqual(\n expected_genotype_call,\n actual_genotype_call)\n return self\n\n def has_allelic_read_support(self, reference, *alts):\n self.__test_case.assertTrue(\n self.__sample_data.has_genotype_key(\n self.allelic_depth_key))\n ref_and_allelic_depths = self.__sample_data.get_field(\n self.__sample_name, self.allelic_depth_key)\n self.__test_case.assertEqual(\n len(ref_and_allelic_depths), 1 + len(alts))\n self.__test_case.assertEqual(ref_and_allelic_depths[0], reference)\n self.__test_case.assertEqual(ref_and_allelic_depths[1:], list(alts))\n return self\n\n def has_variant_allelic_frequency(self, *allelic_frequencies):\n self.__test_case.assertTrue(\n self.__sample_data.has_genotype_key(\n self.variant_allelic_frequency_key))\n actual_allele_frequencies = self.__sample_data.get_field(\n self.__sample_name,\n self.variant_allelic_frequency_key\n )\n self.__test_case.assertEqual(\n len(actual_allele_frequencies),\n len(allelic_frequencies))\n\n for actual_allele_frequency, expected_allele_frequency in zip(\n actual_allele_frequencies, allelic_frequencies):\n if expected_allele_frequency == \".\":\n self.__test_case.assertEqual(\n expected_allele_frequency, actual_allele_frequency)\n else:\n self.__test_case.assertAlmostEqual(\n expected_allele_frequency, actual_allele_frequency, places=3)\n return self\n\n def has_read_depth(self, expected):\n self.__test_case.assertTrue(self.__sample_data.has_genotype_key(self.read_depth_key))\n read_depth = self.__sample_data.get_field(self.__sample_name, self.read_depth_key)\n self.__test_case.assertEqual(len(read_depth), 1)\n self.__test_case.assertEqual(read_depth[0], expected)\n return self\n\n def has_min_read_depth(self, expected):\n self.__test_case.assertTrue(self.__sample_data.has_genotype_key(self.min_read_depth_key))\n min_read_depth = self.__sample_data.get_field(self.__sample_name, self.min_read_depth_key)\n self.__test_case.assertEqual(len(min_read_depth), 1)\n self.__test_case.assertEqual(min_read_depth[0], expected)\n return self\n\n def has_RR_genotype_likelihood(self, reference_value):\n likelihoods = self.__sample_data.get_field(\n self.__sample_name, self.phred_likelihood_key)\n self.__test_case.assertEqual(len(likelihoods), 3)\n self.__test_case.assertEqual(likelihoods[0], reference_value)\n return self\n\n def has_RA_genotype_likelihood(self, reference_value):\n likelihoods = self.__sample_data.get_field(\n self.__sample_name, self.phred_likelihood_key)\n self.__test_case.assertEqual(len(likelihoods), 3)\n self.__test_case.assertEqual(likelihoods[1], reference_value)\n return self\n\n def has_AA_genotype_likelihood(self, reference_value):\n likelihoods = self.__sample_data.get_field(\n self.__sample_name, self.phred_likelihood_key)\n self.__test_case.assertEqual(len(likelihoods), 3)\n self.__test_case.assertEqual(likelihoods[2], reference_value)\n return self\n", "id": "2412056", "language": "Python", "matching_score": 3.368189573287964, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/vcf_expectation.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\nMAX_PHRED = 3000\n\n\nclass TestAlignPhasingOfClusters(BaseTest):\n def test_phase_alignment_for_two_snps_in_different_clusters_on_different_strands(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A.................................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".............................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_max_cluster_distance(10) \\\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"7\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"7\")\n\n def test_phase_alignment_for_two_snps_in_different_clusters_on_different_strands_for_reference_calling(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A.................................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".............................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_max_cluster_distance(20) \\\n .with_output_ref_calls(1) \\\n .with_verbosity(6)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(7)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"7\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"7\")\n\n def test_phase_alignment_for_two_snps_in_different_clusters_on_same_strands(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A.....................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".........................................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(20)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"7\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"7\")\n\n def test_phase_not_aligns_for_hom_snp_in_first_cluster(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A.....................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".......A.................................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(10)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"7\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"28\")\n\n def test_phase_aligns_for_hom_snp_in_second_cluster(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A.....................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".............................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(10)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"7\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"7\")\n\n def test_phase_aligns_for_hom_snp_in_both_cluster(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A.....................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".......A.....................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(10)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"7\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"7\")\n\n def test_phase_alignment_for_hom_and_het_variants_on_different_clusters(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \".......A...A.................A...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"...........A...............A.............\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(10) \\\n .with_min_cluster_distance(5)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(4)\n\n # phase set id is start of cluster\n vcf_expect.has_record_for_variants(Variant(chrom, 7, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"5\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 11, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"5\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 27, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"5\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 29, \"G\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"5\")\n\n def test_phase_alignment_for_het_variants_for_two_samples(self):\n sample_name_1 = \"sample1\"\n sample_name_2 = \"sample2\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \"...........A.............................\", n_fwd=10, n_rev=10, sample_name=sample_name_1) \\\n .with_read(\n \"...........................A.............\", n_fwd=10, n_rev=10, sample_name=sample_name_1) \\\n .with_read(\n \"...........A.............................\", n_fwd=10, n_rev=10, sample_name=sample_name_2) \\\n .with_read(\n \"...........................A.............\", n_fwd=10, n_rev=10, sample_name=sample_name_2) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(10) \\\n .with_min_cluster_distance(5)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n # phase set id is start of cluster\n vcf_expect.has_record_for_variants(Variant(chrom, 11, \"G\", \"A\"))\\\n .with_sample(sample_name_1)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"6\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 27, \"G\", \"A\"))\\\n .with_sample(sample_name_1)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"6\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 11, \"G\", \"A\"))\\\n .with_sample(sample_name_2)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"6\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 27, \"G\", \"A\"))\\\n .with_sample(sample_name_2)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"6\")\n\n def test_phase_alignment_for_het_variants_for_three_clusters(self):\n sample_name = \"sample1\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \"....A.............C......................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"..................................T......\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(5) \\\n .with_min_cluster_distance(5)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(3)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 4, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 18, \"T\", \"C\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 34, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"4\")\n\n def test_phase_alignment_for_het_variants_for_three_clusters_when_middle_cluster_is_homozygous(self):\n sample_name = \"sample1\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \"....A.............C......................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"..................C...............T......\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(5) \\\n .with_min_cluster_distance(5)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(3)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 4, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 18, \"T\", \"C\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 34, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"29\")\n\n def test_phase_alignment_for_het_variants_for_three_clusters_when_first_cluster_is_homozygous(self):\n sample_name = \"sample1\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \"....A....................................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"....A.............C...............T......\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(5) \\\n .with_min_cluster_distance(5)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(3)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 4, \"C\", \"A\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 18, \"T\", \"C\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"13\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 34, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"13\")\n\n def test_phase_alignment_for_het_variants_for_two_samples_with_missing_read_support(self):\n sample_name_1 = \"sample1\"\n sample_name_2 = \"sample2\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\", chrom=chrom) \\\n .with_read(\n \"...........A.............................\", n_fwd=10, n_rev=10, sample_name=sample_name_1) \\\n .with_read(\n \"...........................A.............\", n_fwd=10, n_rev=10, sample_name=sample_name_1) \\\n .with_read(\n \"...........A............. \", n_fwd=10, n_rev=10, sample_name=sample_name_2) \\\n .with_read(\n \"......................... \", n_fwd=10, n_rev=10, sample_name=sample_name_2) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(5) \\\n .with_min_cluster_distance(5)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 11, \"G\", \"A\"))\\\n .with_sample(sample_name_1)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"6\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 27, \"G\", \"A\"))\\\n .with_sample(sample_name_1)\\\n .has_exact_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"6\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 11, \"G\", \"A\"))\\\n .with_sample(sample_name_2)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"6\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 27, \"G\", \"A\"))\\\n .with_sample(sample_name_2)\\\n .has_exact_phased_genotypes(\"0|0\")\\\n .has_phase_set_id(\"6\")\n\n def test_phase_alignment_for_overlapping_variants_for_two_samples(self):\n chrom = \"1\"\n sample1 = \"sample1\"\n sample2 = \"sample2\"\n\n svc_driver = SVCDriver(self)\n svc_driver\\\n .with_ref_sequence(\n 'CGAGCGATACAGATAAAGACATCGAGTGA', chrom=chrom) \\\n .with_read(\n '....T................A.......', n_rev=5, n_fwd=0, chrom=chrom, sample_name=sample1) \\\n .with_read(\n '.....................A.......', n_rev=5, n_fwd=0, chrom=chrom, sample_name=sample1) \\\n .with_read(\n '....T................A.......', n_rev=5, n_fwd=0, chrom=chrom, sample_name=sample2) \\\n .with_read(\n '.....................C.......', n_rev=5, n_fwd=0, chrom=chrom, sample_name=sample2)\\\n .with_max_cluster_distance(8) \\\n .with_min_cluster_distance(8) \\\n .with_verbosity(6)\n\n vcf_expect = svc_driver.call() \\\n .with_output_vcf() \\\n .record_count(3)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 4, \"C\", \"T\"))\\\n .with_sample(sample1)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 21, \"T\", \"C\"))\\\n .with_sample(sample1)\\\n .has_exact_phased_genotypes(\".|.\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 21, \"T\", \"A\"))\\\n .with_sample(sample1)\\\n .has_exact_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 4, \"C\", \"T\"))\\\n .with_sample(sample2)\\\n .has_exact_phased_genotypes(\"1|0\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 21, \"T\", \"C\"))\\\n .with_sample(sample2)\\\n .has_exact_phased_genotypes(\".|1\")\\\n .has_phase_set_id(\"4\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 21, \"T\", \"A\"))\\\n .with_sample(sample2)\\\n .has_exact_phased_genotypes(\"1|.\")\\\n .has_phase_set_id(\"4\")\n", "id": "3376227", "language": "Python", "matching_score": 4.235057830810547, "max_stars_count": 8, "path": "test/wecall_acceptance/phased_genotypes/test_align_phasing_of_clusters.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nMAX_PHRED = 3000\n\n\nclass TestMultiSampleDiploidPhasedCalls(BaseTest):\n def test_phasing_for_isolated_snp_on_one_sample_only(self):\n sample_1 = \"sample_1\"\n sample_2 = \"sample_2\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom\n ).with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_1\n ).with_read(\n \".....................\", n_fwd=10, n_rev=10, sample_name=sample_2\n )\n svc_driver.with_output_phased_genotypes(True)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(1)\n\n record_expect = vcf_expect.has_record_for_variant(\n Variant(chrom, 8, \"G\", \"T\"))\n\n sample_1_expect = record_expect.with_sample(sample_1)\n sample_1_expect.has_phased_genotype(\"1|1\")\n sample_1_expect.has_phase_set_id(str(8))\n sample_1_expect.has_phase_set_quality(MAX_PHRED)\n\n sample_2_expect = record_expect.with_sample(sample_2)\n sample_2_expect.has_phased_genotype(\"0|0\")\n sample_2_expect.has_phase_set_id(str(8))\n sample_2_expect.has_phase_set_quality(MAX_PHRED)\n\n def test_phasing_for_two_snps_across_samples(self):\n sample_1 = \"sample_1\"\n sample_2 = \"sample_2\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom\n ).with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_1\n ).with_read(\n \".....................\", n_fwd=10, n_rev=10, sample_name=sample_1\n ).with_read(\n \"........C............\", n_fwd=10, n_rev=10, sample_name=sample_2\n ).with_read(\n \".....................\", n_fwd=10, n_rev=10, sample_name=sample_2\n )\n svc_driver.with_output_phased_genotypes(True)\n\n expect = svc_driver.call()\\\n .with_output_vcf() \\\n .record_count(2)\n\n expect.has_record_for_variants(Variant(chrom, 8, \"G\", \"T\"), Variant(chrom, 8, \"G\", \"C\"))\n expected_phase_set_id = str(8)\n\n expect \\\n .has_record_for_variants(Variant(chrom, 8, \"G\", \"T\"), Variant(chrom, 8, \"G\", \"C\")) \\\n .with_sample(sample_1)\\\n .has_phased_genotypes(\"0|1\", \"0|.\") \\\n .has_phase_set_id(expected_phase_set_id) \\\n .has_phase_set_quality(MAX_PHRED)\n\n expect \\\n .has_record_for_variants(Variant(chrom, 8, \"G\", \"T\"), Variant(chrom, 8, \"G\", \"C\")) \\\n .with_sample(sample_2)\\\n .has_phased_genotypes(\".|0\", \"1|0\") \\\n .has_phase_set_id(expected_phase_set_id) \\\n .has_phase_set_quality(MAX_PHRED)\n\n def test_phasing_for_overlapping_variant_across_samples(self):\n sample_1 = \"sample_1\"\n sample_2 = \"sample_2\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom\n ).with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_1\n ).with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_2\n ).with_read(\n \"........C............\", n_fwd=10, n_rev=10, sample_name=sample_2\n )\n svc_driver.with_output_phased_genotypes(True)\n\n expect = svc_driver.call()\\\n .with_output_vcf() \\\n .record_count(2)\n\n expected_phase_set_id = str(8)\n\n expect \\\n .has_record_for_variants(Variant(chrom, 8, \"G\", \"C\"), Variant(chrom, 8, \"G\", \"T\")) \\\n .with_sample(sample_1)\\\n .has_phased_genotypes(\".|.\", \"1|1\") \\\n .has_phase_set_id(expected_phase_set_id) \\\n .has_phase_set_quality(MAX_PHRED)\n\n expect \\\n .has_record_for_variants(Variant(chrom, 8, \"G\", \"C\"), Variant(chrom, 8, \"G\", \"T\")) \\\n .with_sample(sample_2)\\\n .has_phased_genotypes(\"1|.\", \".|1\") \\\n .has_phase_set_id(expected_phase_set_id) \\\n .has_phase_set_quality(MAX_PHRED)\n", "id": "11641575", "language": "Python", "matching_score": 4.615330696105957, "max_stars_count": 8, "path": "test/wecall_acceptance/phased_genotypes/test_multi_sample_diploid.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\nfrom math import log10\n\n\nMAX_PHRED = 3000\n\n\nclass TestSingleSampleDiploidPhasedCalls(BaseTest):\n def test_phasing_for_isolated_homozygous_alt_variant(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom\n ).with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_output_phased_genotypes(True)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(1)\n\n record_expect = vcf_expect.has_record_for_variant(Variant(chrom, 8, \"G\", \"T\"))\n\n sample_expect = record_expect.with_sample(sample_name)\n sample_expect.has_genotype(\"1|1\")\n sample_expect.has_phase_set_id(str(8))\n sample_expect.has_phase_set_quality(MAX_PHRED)\n\n def test_phasing_for_isolated_heterozygous_variant(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom) \\\n .with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".....................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n svc_driver.with_output_phased_genotypes(True)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(1)\n\n record_expect = vcf_expect.has_record_for_variants(\n Variant(chrom, 8, \"G\", \"T\"))\n\n sample_expect = record_expect.with_sample(sample_name)\n sample_expect.has_phased_genotypes(\"0|1\")\n sample_expect.has_phase_set_id(str(8))\n sample_expect.has_phase_set_quality(MAX_PHRED)\n\n def test_phasing_for_two_heterozygous_variants_ocrn_same_strand(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom) \\\n .with_read(\n \"........T...T........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".....................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True) \\\n .with_allow_MNP_calls(False)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n records_expect = vcf_expect.has_record_for_variants(\n Variant(chrom, 8, \"G\", \"T\"),\n Variant(chrom, 12, \"A\", \"T\")\n )\n records_expect\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"0|1\", \"0|1\")\\\n .has_phase_set_id(\"8\")\n\n def test_phasing_for_two_heterozygous_variants_on_same_strand_more_than_a_cluster_apart(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom) \\\n .with_read(\n \"........TT...........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \".....................\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True)\\\n .with_allow_MNP_calls(False) \\\n .with_max_cluster_distance(-1)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect .has_record_for_variants(\n Variant(\n chrom, 8, \"G\", \"T\"), Variant(\n chrom, 9, \"C\", \"T\")) .with_sample(sample_name) .has_phased_genotypes(\n \"0|1\", \"0|1\")\n\n def test_phasing_for_two_heterozygous_variants_on_different_strand(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom) \\\n .with_read(\n \"........T............\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"............T........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True)\\\n .with_allow_MNP_calls(False)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n records_expect = vcf_expect.has_record_for_variants(\n Variant(chrom, 8, \"G\", \"T\"),\n Variant(chrom, 12, \"A\", \"T\")\n )\n records_expect.with_sample(sample_name)\\\n .has_phased_genotypes(\"0|1\", \"1|0\")\\\n .has_phase_set_id(\"8\")\n\n @expectedFailure\n def test_symmetry_in_repetitive_reference(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n m = 2\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"TAAAAAAAAAAAAAAAAAAAAAAAAAT\", chrom=chrom) \\\n .with_read(\n \"........T..................\", n_fwd=m, n_rev=m, sample_name=sample_name) \\\n .with_read(\n \"...........................\", n_fwd=m, n_rev=m, sample_name=sample_name) \\\n .with_read(\n \".................T.........\", n_fwd=m, n_rev=m, sample_name=sample_name) \\\n .with_allow_MNP_calls(False)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(2)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 17, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"0/1\")\n\n vcf_expect.has_record_for_variants(Variant(chrom, 8, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"0/1\")\n\n def test_phase_quality_for_equally_likely_phasings_for_same_variant_representations(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom) \\\n .with_read(\n \"........T...T..T.....\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"............T........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"........T...T........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"............T..T.....\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True)\\\n .with_allow_MNP_calls(False)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(3)\n\n # for het calls ratio_of_phase_to_total is defined as\n # N_supporting_mutation / ( N_supporting_mutation + N_supporting_ref)\n ratio_of_phase_to_total = 0.5\n expected_phase_quality = int(\n round(log10(1.0 - ratio_of_phase_to_total) * -10.0))\n\n vcf_expect.has_record_for_variants(Variant(chrom, 8, \"G\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"8\")\\\n .has_phase_set_quality(expected_phase_quality)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 12, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"1|1\")\\\n .has_phase_set_id(\"8\")\\\n .has_phase_set_quality(expected_phase_quality)\n\n vcf_expect.has_record_for_variants(Variant(chrom, 15, \"A\", \"T\"))\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"0|1\")\\\n .has_phase_set_id(\"8\")\\\n .has_phase_set_quality(expected_phase_quality)\n\n @expectedFailure\n def test_phase_quality_for_phase_with_2_out_of_3_support(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAAT\", chrom=chrom)\\\n .with_read(\n \"........T...T..T.....\", n_fwd=5, n_rev=5, sample_name=sample_name) \\\n .with_read(\n \"............T........\", n_fwd=5, n_rev=5, sample_name=sample_name) \\\n .with_read(\n \"........T...T........\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_read(\n \"............T..T.....\", n_fwd=10, n_rev=10, sample_name=sample_name) \\\n .with_output_phased_genotypes(True)\\\n .with_allow_MNP_calls(False)\n\n vcf_expect = svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(3)\n\n ratio_of_phase_to_total = 2.0 / 3.0\n # actual value needs to be figured out from equations\n unknown_phase_quality = int(round(log10(1.0 - ratio_of_phase_to_total) * -10.0))\n\n vcf_expect.has_record_for_variants(\n Variant(chrom, 8, \"G\", \"T\"),\n Variant(chrom, 12, \"A\", \"T\"),\n Variant(chrom, 15, \"A\", \"T\")\n )\\\n .with_sample(sample_name)\\\n .has_phased_genotypes(\"0|1\", \"1|1\", \"1|0\")\\\n .has_phase_set_id(\"8\")\\\n .has_phase_set_quality(unknown_phase_quality)\n", "id": "2753161", "language": "Python", "matching_score": 3.959162712097168, "max_stars_count": 8, "path": "test/wecall_acceptance/phased_genotypes/test_single_sample_diploid.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestVariantReadCounts(AsciiWecallRunnerTest):\n def test_should_count_reads_that_do_not_overlap_the_calling_region(self):\n sample = \"seed\"\n chrom = \"1\"\n\n driver = SVCDriver(self).with_allow_MNP_calls(True)\n driver.with_ref_sequence(\n \"GAAAAAAAAAAACGCACCCCCAAATTTTTTTTAA***********AAAATAAAAAACGCACCCCCAAATTTTTTTTAA\", chrom=chrom\n ).with_read(\n \" ..........G......................\",\n n_fwd=10, n_rev=10, sample_name=sample,\n ).with_read(\n \"..................................AAAATAAAAAG \",\n n_fwd=10, n_rev=10, sample_name=sample,\n ).with_region_string(\"{}:{}-{}\".format(chrom, 34, 55))\n\n vcf = driver.call().with_output_vcf()\n vcf \\\n .has_record_for_variant(Variant(chrom, 44, 'A', 'G')) \\\n .with_info() \\\n .with_field(\"DP\", [40]) \\\n .with_field(\"VC\", [40])\n\n def test_should_record_the_read_support_mnp_and_snp(self):\n sample = \"seed\"\n chrom = \"1\"\n\n driver = SVCDriver(self).with_allow_MNP_calls(True)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCAAATTTTTTTTAA\", chrom=chrom\n ).with_read(\n \" ......................\", n_fwd=0, n_rev=1, sample_name=sample,\n ).with_read(\n \"..........G......................\", n_fwd=10, n_rev=10, sample_name=sample,\n ).with_read(\n \"..........CT.....................\", n_fwd=10, n_rev=9, sample_name=sample)\n\n vcf = driver.call().with_output_vcf()\n\n vcf \\\n .with_samples([sample]) \\\n .record_count(2)\n\n vcf \\\n .has_record_for_variant(Variant(chrom, 10, 'AC', 'CT')) \\\n .with_sample(sample) \\\n .has_read_depth(40) \\\n .has_allelic_read_support(1, 19) \\\n .has_variant_allelic_frequency(19.0 / 40.0)\n\n vcf \\\n .has_record_for_variant(Variant(chrom, 10, 'A', 'G')) \\\n .with_sample(sample) \\\n .has_read_depth(39) \\\n .has_allelic_read_support(0, 20) \\\n .has_variant_allelic_frequency(20.0 / 39.0)\n\n def test_should_record_the_read_support_insertion_and_snp_on_same_strand(self):\n sample = \"sample\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAACGTAGCTG*GCACCCCCAAA\", chrom=chrom\n ).with_read(\n \"...........*...........\", n_fwd=10, n_rev=10, sample_name=sample,\n ).with_read(\n \"..........CT...........\", n_fwd=10, n_rev=10, sample_name=sample,\n )\n\n vcf = driver.call().with_output_vcf()\n\n vcf \\\n .with_samples([sample]) \\\n .record_count(2)\n\n vcf \\\n .has_record_for_variant(Variant(chrom, 9, 'T', 'TC')) \\\n .with_sample(sample) \\\n .has_read_depth(40) \\\n .has_allelic_read_support(20, 20) \\\n .has_genotype(\"0/1\")\n\n def test_should_record_the_read_support_deletion_and_snp_on_same_strand(self):\n sample = \"sample\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAACGTAGCTGTGCACCCCCAAA\", chrom=chrom\n ).with_read(\n \".......................\", n_fwd=10, n_rev=10, sample_name=sample,\n ).with_read(\n \"..........C*...........\", n_fwd=10, n_rev=10, sample_name=sample,\n )\n\n vcf = driver.call().with_output_vcf()\n\n vcf \\\n .with_samples([sample]) \\\n .record_count(2)\n\n vcf \\\n .has_record(chrom, 9, 'TG', 'T') \\\n .with_sample(sample) \\\n .has_read_depth(40) \\\n .has_allelic_read_support(20, 20) \\\n .has_genotype(\"0/1\")\n\n def test_should_report_unkown_value_for_allele_frequence_when_depth_is_zero(self):\n # Only way to output depth zero for sample and variant is to have\n # another good sample\n good_sample = \"good_sample\"\n empty_sample = \"empty_sample\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAACGTAGCTGTGCACCCCCAAA\", chrom=chrom\n ).with_read(\n \"..........T............\", n_fwd=10, n_rev=10, sample_name=good_sample,\n ).with_read(\n \".......................\", n_fwd=0, n_rev=0, sample_name=empty_sample,\n )\n vcf = driver.call().with_output_vcf()\n vcf \\\n .with_samples([good_sample, empty_sample]) \\\n .record_count(1)\n\n vcf \\\n .has_record_for_variant(Variant(chrom, 10, \"G\", \"T\")) \\\n .with_sample(empty_sample) \\\n .has_read_depth(0) \\\n .has_variant_allelic_frequency(None)\n\n def test_single_snp_in_clean_data(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ...................C........ \",\n \" .................C.............. \",\n \" .................C.................. \",\n \".....................C...... \"],\n [(21, \"A\", \"C\", {\"DP\": [5], \"AD\": [0, 5]})]\n )\n\n def test_single_snp_in_dirty_data(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............T............. \",\n \" 2 \",\n \" ...................T........ \",\n \" 2 \",\n \" .................C.............. \",\n \" .................C.................. \",\n \".....................C...... \"],\n [(21, \"A\", \"C\", {\"DP\": [5], \"AD\": [0, 3]})], config_dict={\"minBaseQual\": 20}\n )\n\n def test_snps_at_same_location_should_have_expected_read_counts(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ...................C........ \",\n \" .................C.............. \",\n \" .................C.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \".....................T...... \"],\n [(21, \"A\", \"C\", {\"DP\": [8], \"AD\": [0, 4]}),\n (21, \"A\", \"T\", {\"DP\": [8], \"AD\": [0, 4]})]\n )\n\n def test_snp_and_mnp_at_same_location_with_mnp_containing_snp_should_have_expected_read_counts(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............TC............ \",\n \" ...................TC....... \",\n \" .................TC............. \",\n \" .................TC................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \".....................T...... \"],\n [(21, \"AT\", \"TC\", {\"DP\": [8], \"AD\": [0, 4]}),\n (21, \"A\", \"T\", {\"DP\": [8], \"AD\": [0, 4]})], config_dict={\"allowMNPCalls\": \"True\"}\n )\n\n def test_should_only_count_reads_that_overlap_the_snp(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTTAT\",\n [\" ..................... \",\n \"..................... ..................... \",\n \" ..............T............. \",\n \" ...................T........ \",\n \" .................T.............. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \".....................T...... \"],\n [(21, \"A\", \"T\", {\"DP\": [9], \"AD\": [1, 8]})]\n )\n\n def test_should_only_count_reads_that_overlap_the_ins(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCC*CAAAAAAAATTTTTTTTTTTAT\",\n [\" ....................*. \",\n \"..................... ..................... \",\n \" ..............T............. \",\n \" ...................T........ \",\n \" .................T.............. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \".....................T...... \"],\n [(20, \"C\", \"CT\", {\"DP\": [9], \"AD\": [1, 8]})]\n )\n\n def test_should_only_count_reads_that_overlap_the_del(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTTAT\",\n [\" ..................... \",\n \"..................... ..................... \",\n \" ..............*............. \",\n \" ...................*........ \",\n \" .................*.............. \",\n \" .................*.................. \",\n \" .................*.................. \",\n \" .................*.................. \",\n \" .................*.................. \",\n \".....................*...... \"],\n [(20, \"CA\", \"C\", {\"DP\": [9], \"AD\": [1, 8]})]\n )\n\n def test_snps_at_same_location_should_have_expected_read_counts_with_anonymous_reads(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ...................C........ \",\n \" ............................ \",\n \" .................C.............. \",\n \" .................C.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \" .................T.................. \",\n \".....................T...... \"],\n [(21, \"A\", \"C\", {\"DP\": [9], \"AD\": [1, 4]}),\n (21, \"A\", \"T\", {\"DP\": [9], \"AD\": [1, 4]})]\n )\n\n def test_should_have_expected_sample_info_data_when_half_reads_support_snp(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ............................ \",\n \" .................C.............. \",\n \" .................................... \",\n \" .................................... \",\n \" .................C.................. \",\n \" .................................... \",\n \" .................C.................. \",\n \" .................................... \",\n \".....................C...... \"],\n [(21, \"A\", \"C\", {\"DP\": [10], \"AD\": [5, 5]})]\n )\n\n def test_all_reads_support_deletion(self):\n self.calls_variants_with_sample_data(\n \"TGTTATTAATCCCTTGTCAGATTTTTTTTTTGCAAATATTTT\",\n [\" ..............**********........ \",\n \" ...................**********........ \",\n \" .................**********..... \",\n \" .................**********......... \",\n \" .................**********......... \",\n \" .................**********......... \",\n \" .................**********......... \",\n \" .................**********......... \",\n \" .................**********......... \",\n \".....................**********.......... \"],\n [(20, \"ATTTTTTTTTT\", \"A\", {\"DP\": [10], \"AD\": [0, 10]})]\n )\n\n def test_incorrectly_aligned_reads_support_deletion(self):\n self.calls_variants_with_sample_data(\n \"TGTTATTAATCCCTTGTCAGA*********TTTTTTTTTTGCAAATATTTTCTGATGAGTACGG\",\n [\" ..............GCAAATATT \",\n \" ...................GCAAATATT \",\n \" .................GCAAATATT \",\n \" .................*******************................... \",\n \" .................*******************................... \",\n \" .................*******************................... \",\n \" .................*******************................... \",\n \" .................*******************................... \",\n \" .................*******************................... \",\n \".....................*******************.............. \"],\n [(20, \"ATTTTTTTTTT\", \"A\", {\"DP\": [10], \"AD\": [0, 10]})]\n )\n\n def test_should_only_count_reads_that_properly_overlap_a_deletion_as_support(self):\n self.calls_variants_with_sample_data(\n \"TTGTATTTCCTGTTATTAATCCCTTGTCAGATTTTTTTTTTGCAAATATTTT\",\n [\"...............................**********.... \",\n \"...............................**********.... \",\n \"...............................**********... \",\n \"...............................**********.. \",\n \"...............................**********. \",\n \"...............................********** \"],\n [(30, \"ATTTTTTTTTT\", \"A\", {\"DP\": [6], \"AD\": [1, 5]})]\n )\n\n def test_should_count_reads_that_partially_overlap_an_insertion_if_inserted_sequence_is_distinct_from_subsequent_reference_sequence(self): # noqa\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCC****ATAAAAAAAATTTTTTTTTTT\",\n [\" ..............CTCT............. \",\n \".....................CTC \",\n \" .................CTCT.............. \",\n \" .................CTCT............... \",\n \".....................CTC \",\n \" .................CTCT.................. \",\n \".....................CTC \",\n \" .................CTCT.................. \",\n \".....................CTC \",\n \".....................CTCT...... \"],\n [(20, \"C\", \"CCTCT\", {\"DP\": [10], \"AD\": [0, 10]})]\n )\n\n def test_should_count_reads_support_consistent_with_left_alignment(self):\n self.calls_variants_with_sample_data(\n \"TTAGATTTTTTTCCTATAGAATTG**TTTTTTTTTTTT**ATTTCCTGTTATTAATCCCTTGTCAGATTTTTT\",\n [\"........................TT............**.................................\",\n \"........................**............TT.................................\",\n \"........................**............TT.................................\",\n ],\n [(23, \"G\", \"GTT\", {\"DP\": [3], \"AD\": [0, 3]})]\n )\n\n @expectedFailure\n def test_should_count_read_support_consistent_with_large_repeat_motif_across_different_reads(self):\n self.calls_variants_with_sample_data(\n \"GAATTGTTTTTTTTTTTTATTTCCTGTTTTTTTTTTTTATTTCCTGTT*********A*********TTCTGTC*********CATTCTGTCCGGATTCAGATACTTTGCCCATTTTTAAGTTGGATCATTAGATTTTTTTCCTAT\", # noqa\n [\"................................................ATTCTGTCC.*********.......*********... \", # noqa\n \"................................................ATTCTGTCC.*********.......*********... \", # noqa\n \" ............................................*********.TTCTGTCCA.......*********....... \", # noqa\n \" ............................................*********.TTCTGTCCA.......*********....... \", # noqa\n \" .*********.......CATTCTGTC...............................................................\", # noqa\n \" .*********.......CATTCTGTC...............................................................\" # noqa\n ],\n [(47, \"T\", \"TATTCTGTCC\", {\"DP\": [6], \"AD\": [0, 6]})]\n )\n\n def test_should_count_partial_support_for_non_completely_overlapping_reads(self):\n self.calls_variants_with_sample_data(\n \"GAATTGTTTTTTTTTTTTATTTCCTGTTTTTTTTTTTTATTTTCCTGTTATTCTGTCCATTCTGTCCGGATTCAGATACTTTGCCCATTT\",\n [\".......................................A.. \",\n \" ...............A..G.......................... \",\n \" ...............A..G.......................... \",\n \" ..G.............................................. \"],\n [(39, \"TTTT\", \"ATTG\", {\"DP\": [4], \"AD\": [0, 3]})], config_dict={\"allowMNPCalls\": \"True\"}\n )\n\n def test_should_count_reads_that_partially_overlap_insertions_as_supporting_reads(self):\n self.calls_variants_with_sample_data(\n \"GAATTGTTTTTTTTTTTTATTTCCTGTTTTTTTTTTTTAT**********ATTCCTGTTATTCTGTCCATTCTGTCCGGATTCAGATACTTTGCCCAT\",\n [\"........................................TATTCTGTCC.. \",\n \"........................................TATTCTGTCC.. \",\n \"........................................TATTCTGT \",\n \"........................................TATTCTGT \",\n \"........................................TATTCTG \",\n \"........................................TATTCTG \",\n \"........................................TATTCT \",\n \"........................................TATTCT \",\n \"........................................TATTC \",\n \"........................................TATTC \",\n \"........................................TATT \",\n \"........................................TATT \",\n \"........................................TAT \",\n \"........................................TAT \",\n \"........................................TA \",\n \"........................................TA \",\n \"........................................T \",\n \"........................................T \"],\n [(39, \"T\", \"TTATTCTGTCC\", {\"DP\": [18], \"AD\": [0, 18]})]\n )\n\n def test_should_count_reads_that_support_both_alternate_and_reference_allele_as_half(self):\n self.calls_variants_with_sample_data(\n \"GAATTGTTTTTTTTTTTTATTTCCTGTTTTTTTTTTTTAT**********TTTCCTGTTATTCTGTCCATTCTGTCCGGATTCAGATACTTTGCCCAT\",\n [\"........................................TATTCTGTCC.. \",\n \"........................................TATTCTGTCC.. \",\n \"........................................T \",\n \"........................................T \"],\n [(39, \"T\", \"TTATTCTGTCC\", {\"DP\": [4], \"AD\": [1, 3]})]\n )\n\n\nclass TestReadCountsInVariantCluster(AsciiWecallRunnerTest):\n\n def test_coverage_and_supporting_counts_for_one_snp(self):\n self.calls_variants_with_sample_data(\n \"TGCGAATACATCGCACCCCCCATACAACAAATTTGTCTATTG\",\n [\" ..............C............. \",\n \" ............................ \",\n \" .................C.............. \",\n \" .................................... \",\n \" .................................... \",\n \" .................C.................. \",\n \" .................................... \",\n \" .................C.................. \",\n \" .................................... \",\n \".....................C...... \"],\n [(21, \"A\", \"C\", {\"DP\": [10], \"AD\": [5, 5]})]\n )\n\n def test_coverage_and_supporting_counts_for_two_snps(self):\n self.calls_variants_with_sample_data(\n \"TGCGAATACATCGCACCCCCCATACAACAAATTTGTCTATTG\",\n [\" ..............C............. \",\n \" ..............A............. \",\n \" .................C.............. \",\n \" ............A....................... \",\n \" ............A....................... \",\n \" .................C.................. \",\n \" ............A....................... \",\n \" .................C.................. \",\n \" ............A....................... \",\n \".....................C...... \"],\n [(16, \"C\", \"A\", {\"DP\": [10], \"AD\": [5, 5]}),\n (21, \"A\", \"C\", {\"DP\": [10], \"AD\": [5, 5]})]\n )\n\n def test_coverage_and_supporting_counts_for_two_snps_in_one_cluster_when_some_reads_only_cover_one_snp_position(self): # noqa\n self.calls_variants_with_sample_data(\n \"ACCTGACCTGCGAATACATCGCACCCCCCATCTGCTACAACAAATTTGTCTATTGCGTAATGCC\",\n [\".............................C.. A..........................\",\n \" A..........................\",\n \" A..........................\",\n \" A..........................\",\n \" A..........................\",\n \".............................C.. \",\n \".............................C.. \",\n \".............................C.. \",\n \".............................C.. \"],\n [(29, \"A\", \"C\", {\"DP\": [5], \"AD\": [0, 5]}),\n (37, \"C\", \"A\", {\"DP\": [5], \"AD\": [0, 5]})], config_dict={\"allowMNPCalls\": False}\n )\n\n def test_coverage_and_supporting_counts_for_two_snps_in_two_clusters_when_some_reads_only_cover_one_snp_position(self): # noqa\n self.calls_variants_with_sample_data(\n \"ACCTGACCTGCGAATACATCGCACCCCCCATCCTGCCTGCTCTCCAACAAATTTGTCTATTGCGTAATGCC\",\n [\" A..........................\",\n \" A..........................\",\n \" A..........................\",\n \" A..........................\",\n \" A..........................\",\n \".............................C.. \",\n \".............................C.. \",\n \".............................C.. \",\n \".............................C.. \",\n \".............................C.. \"],\n [(29, \"A\", \"C\", {\"DP\": [5], \"AD\": [0, 5]}),\n (44, \"C\", \"A\", {\"DP\": [5], \"AD\": [0, 5]})], config_dict={\"allowMNPCalls\": False}\n )\n", "id": "508128", "language": "Python", "matching_score": 4.351405620574951, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_read_counts.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestVariantCallingFromTwoEqualLengthFwdReads(AsciiWecallRunnerTest):\n\n def test_calls_snp_with_one_one_genotype_on_forward_reads(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\" ..............C............. \",\n \" ...................C........ \",\n \" .................C.............. \",\n \" .................C.................. \",\n \".....................C...... \"],\n\n [\".....................C....................\", # expected output\n \".....................C....................\"]\n )\n\n def test_calls_snp_with_zero_one_genotype_on_forward_reads(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ...................C........ \",\n \" ................................ \",\n \" .................C.................. \",\n \".....................C...... \"],\n\n [\".....................C....................\", # expected output\n \"..........................................\"]\n )\n\n def test_calls_two_snps_with_zero_one_genotypes(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,, \",\n \" ................................ \",\n \" ,,,,,g,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \".........G.................. \"],\n\n [\".....................C....................\", # expected output\n \".........G................................\"]\n )\n\n def test_calls_snp_with_noise(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" .....T........C............. \",\n \" 2 \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,, \",\n \" ................................ \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \"............................ \"],\n\n [\".....................C....................\", # expected output\n \"..........................................\"]\n )\n\n def test_calls_deletion_and_snp_at_same_location_in_repeat_region_with_few_reads_as_anchors(self):\n chrom = \"1\"\n sample = \"sample\"\n\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n 'CGAGAGAGAGAGAGAGAGAGATAGAGAGAGAGAGAGAGAGTC', chrom=chrom\n ).with_read(\n '....................**....................', n_rev=5, n_fwd=0, chrom=chrom, sample_name=sample\n ).with_read(\n '.....................G....................', n_rev=5, n_fwd=0, chrom=chrom, sample_name=sample\n )\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect \\\n .has_record_for_variants(\n Variant(chrom, 21, \"T\", \"G\"),\n Variant(chrom, 19, \"GAT\", \"G\")\n ).with_sample(sample).has_phased_genotypes(\".|1\", \"1|.\")\n\n def test_calls_snp_as_hom_alt_call(self):\n # Based on real example found in NA12878 at SNP(2:15650796-15650797 G\n # --> A)\n self.calls_variants_with_genotype(\n # 7 8 9 # noqa\n # 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 # noqa\n # 789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567 # noqa\n \"CATTTGCTCATTTGATTGCTTGAATTATCAAAATAGAACCTAGCCTATGCTGACTACTACAGCCCCAAGGACTGCCTGACACAAAATAGAAGCTCAATAAGTAAGTATAGAATGAATGGATGGATGGATGAATAGTCCATGTAAATATGGAACAACATGGAAATCTGGACTCTAATTCTGCCATTTGCTAGCTGGATGATC\", # noqa\n [\" ...A....................T.C......TT.T.CAAGCAG..G.CG.C.TACGAGATCGTGATGT....GG.G...A.A.G.G....CTTCC...C \", # noqa\n \" ...A................................................................................................. \", # noqa\n \" ...A................................................................................................. \", # noqa\n \" ...A................................................................................................. \", # noqa\n \" ...A................................................................................................. \", # noqa\n \" ...A................................................................................................. \", # noqa\n \" ...A................................................................................................. \", # noqa\n \" ...A................................................................................................. \", ], # noqa\n\n [\"....................................................................................................A....................................................................................................\", # noqa\n \"....................................................................................................A....................................................................................................\"] # noqa\n )\n", "id": "449362", "language": "Python", "matching_score": 3.9156434535980225, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_with_genotypes.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestCallingWithDifferentReadLengths(AsciiWecallRunnerTest):\n\n def test_should_not_call_snps_with_reads_of_length_less_or_equal_to_20(self):\n self.calls_variants(\n \"ACGCCCCTGCAAAAAAAAAA\",\n [\".T..................\",\n \".T..................\",\n \"..........C.........\",\n \"..........C.........\"],\n [\"....................\",\n \"....................\"]\n )\n\n def test_calls_two_snps_on_reads_longer_than_20(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\".T...................\",\n \".T...................\",\n \"...........C.........\",\n \"...........C.........\"]\n )\n\n\nclass TestCallingSingleVariants(AsciiWecallRunnerTest):\n\n def test_calls_heterozygous_snp_on_the_left_edge(self):\n self.calls_variants_with_genotype(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\"C....................\",\n \"C....................\",\n \".....................\",\n \".....................\"],\n\n [\"C....................\",\n \".....................\"] # Expected genotype\n )\n\n def test_calls_homozygous_snp_on_the_left_edge(self):\n self.calls_variants_with_genotype(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\"C....................\",\n \"C....................\",\n \"C....................\",\n \"C....................\"],\n\n [\"C....................\",\n \"C....................\"] # Expected genotype\n )\n\n def test_calls_heterozygous_snp_on_the_right_edge(self):\n self.calls_variants_with_genotype(\n \"ACGCCCCCTGCAATAAATGCG\",\n [\"....................C\",\n \".....................\",\n \".....................\",\n \"....................C\"],\n\n [\"....................C\",\n \".....................\"] # Expected genotype\n )\n\n def test_calls_homozygous_snp_on_the_right_edge(self):\n self.calls_variants_with_genotype(\n \"ACGCCCCCTGCGACAGATAAT\",\n [\"....................C\",\n \"....................C\",\n \"....................C\",\n \"....................C\"],\n\n [\"....................C\",\n \"....................C\"] # Expected genotype\n )\n\n def test_calls_heterozygous_snp(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n [\"...................T........................\",\n \"...................T........................\",\n \"............................................\",\n \"............................................\"],\n\n [\"...................T........................\", # Expected genotype\n \"............................................\"]\n )\n\n def test_calls_homozygous_snp(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n [\"...................T........................\",\n \"...................T........................\",\n \"...................T........................\",\n \"...................T........................\"],\n\n [\"...................T........................\", # Expected genotype\n \"...................T........................\"]\n )\n\n def test_calls_heterozygous_insertion(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTA*GTCACAAACCCGTTACGTATGCATG\",\n [\"...................T.........................\",\n \"...................T.........................\",\n \"...................*.........................\",\n \"...................*.........................\"],\n\n [\"...................T.........................\", # Expected genotype\n \"...................*.........................\"]\n )\n\n def test_calls_homozygous_insertion(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTA*GTCACAAACCCGTTACGTATGCATG\",\n [\"...................T.........................\",\n \"...................T.........................\",\n \"...................T.........................\",\n \"...................T.........................\"],\n\n [\"...................T.........................\", # Expected genotype\n \"...................T.........................\"]\n )\n\n def test_calls_heterozygous_deletion(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n [\"............................................\",\n \"............................................\",\n \"...................*........................\",\n \"...................*........................\"],\n\n [\"............................................\", # Expected genotype\n \"...................*........................\"]\n )\n\n def test_calls_homozygous_deletion(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n [\"...................*........................\",\n \"...................*........................\",\n \"...................*........................\",\n \"...................*........................\"],\n\n [\"...................*........................\", # Expected genotype\n \"...................*........................\"]\n )\n\n\nclass TestCallingCombinationsOfVariants(AsciiWecallRunnerTest):\n\n def test_calls_heterozygous_snps_on_both_edges_further_than_15_bases(self):\n self.calls_variants_with_genotype(\n \"ACGCCCCCATGCTCCAAAGAA\",\n [\"....................C\",\n \"....................C\",\n \"C....................\",\n \"C....................\"],\n\n [\"....................C\",\n \"C....................\"] # Expected genotype\n )\n\n def test_calls_snps_on_both_edges_when_gap_at_least5_low_coverage(self):\n self.calls_variants_with_genotype(\n \"ACGCCCCCTGCAAAAAAAAAAAAAAAAAAA\",\n [\"G....................... \",\n \"G....................... \",\n \"G.............................\",\n \".............................C\",\n \".............................C\",\n \".............................C\"],\n\n [\"G.............................\",\n \".............................C\"] # Expected genotype\n )\n\n def test_calls_two_heterozygous_snps_at_same_pos(self):\n svc_driver = SVCDriver(self)\n svc_driver\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=\"1\")\\\n .with_read(\n \"...................T........................\", n_fwd=1, n_rev=1, chrom=\"1\", sample_name=\"sample\")\\\n .with_read(\n \"...................C........................\", n_fwd=1, n_rev=1, chrom=\"1\", sample_name=\"sample\")\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect\\\n .has_record(\"1\", 19, \"G\", \"T\")\\\n .with_sample(\"sample\")\\\n .has_genotype(\"./1\")\n vcf_expect\\\n .has_record(\"1\", 19, \"G\", \"C\")\\\n .with_sample(\"sample\")\\\n .has_genotype(\"./1\")\n\n def test_calls_two_heterozygous_snps_at_different_pos(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n [\"...................T........................\",\n \"...................T........................\",\n \"....................C.......................\",\n \"....................C.......................\"],\n\n [\"...................T........................\", # Expected genotype\n \"....................C.......................\"]\n )\n\n def test_calls_two_heterozygous_deletions_at_same_pos(self):\n svc_driver = SVCDriver(self)\n svc_driver\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=\"1\")\\\n .with_read(\n \"...................**.......................\", n_fwd=1, n_rev=1, chrom=\"1\", sample_name=\"sample\")\\\n .with_read(\n \"...................*........................\", n_fwd=1, n_rev=1, chrom=\"1\", sample_name=\"sample\")\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect\\\n .has_record(\"1\", 18, \"AG\", \"A\")\\\n .with_sample(\"sample\")\\\n .has_genotype(\"./1\")\n vcf_expect\\\n .has_record(\"1\", 18, \"AGT\", \"A\")\\\n .with_sample(\"sample\")\\\n .has_genotype(\"./1\")\n\n def test_calls_two_heterozygous_deletions_at_different_pos(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n [\"...................**.......................\",\n \"...................**.......................\",\n \".....................*......................\",\n \".....................*......................\"],\n\n [\"...................**.......................\", # Expected genotype\n \".....................*......................\"]\n )\n\n def test_calls_two_heterozygous_insertions_at_same_pos(self):\n svc_driver = SVCDriver(self)\n svc_driver\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTC**GTCACAAACCCGTTACGTATATG\", chrom=\"1\")\\\n .with_read(\n \"...................AA.......................\", n_fwd=1, n_rev=1, chrom=\"1\", sample_name=\"sample\")\\\n .with_read(\n \"...................*A.......................\", n_fwd=1, n_rev=1, chrom=\"1\", sample_name=\"sample\")\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect\\\n .has_record(\"1\", 18, \"C\", \"CA\")\\\n .with_sample(\"sample\")\\\n .has_genotype(\"./1\")\n vcf_expect\\\n .has_record(\"1\", 18, \"C\", \"CAA\")\\\n .with_sample(\"sample\")\\\n .has_genotype(\"./1\")\n\n def test_calls_two_heterozygous_insertions_at_different_pos(self):\n self.calls_variants_with_genotype(\n \"AAAGCGTACAACCGGGTTC**GTCACAAAC**CCGTCGTATATG\",\n [\"...................AA.........**............\",\n \"...................AA.........**............\",\n \"...................**.........AA............\",\n \"...................**.........AA............\"],\n\n [\"...................AA.........**............\", # Expected genotype\n \"...................**.........AA............\"]\n )\n\n\nclass TestMNPCalling(AsciiWecallRunnerTest):\n def calls_variants_with_coverage_20(self, ref, sequence_list, expected_variants=None):\n self.calls_variants(\n ref,\n sequence_list,\n expected_ascii_haplotypes=None,\n expected_variant_stubs=expected_variants,\n n_fwd=20,\n n_rev=20,\n config_dict={\n \"allowMNPCalls\": \"True\",\n \"maxClusterDist\": 20})\n\n def test_calls_mnp_on_adjacent_bases_single_read(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".......AG.....................\"],\n expected_variants={(7, \"CT\", \"AG\")}\n )\n\n def test_calls_mnp_with_bases_apart_by_1(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.A....................\"],\n expected_variants={(7, \"CTG\", \"ATA\")}\n )\n\n def test_calls_mnp_with_bases_apart_by_10(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A....................A............\"],\n expected_variants={(7, \"CTGGGGGGGGGTGGGGGGGGGG\", \"ATGGGGGGGGGTGGGGGGGGGA\")}\n )\n\n def test_calls_two_snps_with_bases_apart_by_21(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGTGGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.....................A...........\"]\n )\n\n def test_calls_mnp_with_three_snps_and_bases_apart_by_1(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.A.A..................\"],\n expected_variants={(7, \"CTGGG\", \"ATAGA\")}\n )\n\n def test_calls_mnp_with_snps_apart_by_1(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".T.G.G.A.A.A.T.T.T.A.G.G.G.G.T\"],\n expected_variants={(1, \"CGCCCCCTGGGGGGGGGGCAAAAAAAAAA\", \"TGGCGCATAGAGTGTGTGAAGAGAGAGAT\")}\n )\n\n def test_calls_mnp_with_snps_apart_by_1_starting_in_the_middle(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.A.A.T.T.T.A.G.G.G.G.T\"],\n expected_variants={(7, \"CTGGGGGGGGGGCAAAAAAAAAA\", \"ATAGAGTGTGTGAAGAGAGAGAT\")}\n )\n\n def test_calls_mnp_with_adjacent_snps(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".TTGTAGAGC....................\"],\n expected_variants={(1, \"CGCCCCCTG\", \"TTGTAGAGC\")}\n )\n\n def test_calls_mnp_with_competely_different_seq(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\"TTTGTAGAGCCATCATCATATTTGGGCCTG\"],\n expected_variants={(0, \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\", \"TTTGTAGAGCCATCATCATATTTGGGCCTG\")}\n )\n\n def test_calls_adjacent_snps_and_mnp_on_separate_reads(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.T....................\",\n \"........G.....................\"],\n expected_variants={(7, \"CTG\", \"ATT\"), (8, \"T\", \"G\")}\n )\n\n def test_calls_overlapping_mnps_on_separate_reads(self):\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.T......T.............\",\n \"....A...G...C......T...C......\"],\n {(7, \"CTGGGGGGGG\", \"ATTGGGGGGT\"), (4, \"CCCCTGGGGGGGGGGCAAAA\", \"ACCCGGGGCGGGGGGTAAAC\")}\n )\n\n def test_calls_mnps_when_interupted_by_deletion_within_too_long_homopolymer(self):\n # deletion not called. If the GG homopolymer one shorter it does get called\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGAGGGGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.A.A.T.T.*............A.G.G.G.G.T\"],\n {(7, \"CTGGGGGGG\", \"ATAGAGTGT\"), (16, \"AG\", \"A\"), (30, \"CAAAAAAAAAA\", \"AAGAGAGAGAT\")}\n )\n\n def test_calls_mnps_when_interupted_by_deletion_shorter_homopolymer(self):\n # deletion now called because homopolymer just short enough\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGAGGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.A.A.T.T.*..........A.G.G.G.G.T\"],\n {(7, \"CTGGGGGGG\", \"ATAGAGTGT\"), (16, \"AG\", \"A\"), (28, \"CAAAAAAAAAA\", \"AAGAGAGAGAT\")}\n )\n\n def test_calls_del_when_followed_by_mnp(self):\n # same example as above (test_calls_mnps_when_interupted_by_deletion_within_too_long_homopolymer)\n # but the first MNP removed. Deletion still not called\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGAGGGGGGGGGGGGGCAAAAAAAAAA\",\n [\".................*............A.G.G.G.G.T\"],\n {(16, \"AG\", \"A\"), (30, \"CAAAAAAAAAA\", \"AAGAGAGAGAT\")}\n )\n\n def test_calls_del_when_preceded_by_mnp(self):\n # same example as above (test_calls_mnps_when_interupted_by_deletion_within_too_long_homopolymer)\n # but the second MNP has now been removed. Deletion now called\n self.calls_variants_with_coverage_20(\n \"ACGCCCCCTGGGGGGGAGGGGGGGGGGGGGCAAAAAAAAAA\",\n [\".......A.A.A.T.T.*.......................\"],\n {(7, \"CTGGGGGGG\", \"ATAGAGTGT\"), (16, \"AG\", \"A\")}\n )\n\n\nclass TestVariantCallingWithCustomQuality(AsciiWecallRunnerTest):\n def test_calls_two_snps_with_high_quality(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\", # input\n [\".T...................\",\n \" 9 \",\n \".T...................\",\n \" 9 \",\n \"...........C.........\",\n \" 9 \",\n \"...........C.........\",\n \" 9 \"]\n )\n\n def test_should_not_call_snp_with_low_quality(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\", # input\n [\".T...................\",\n \" 1 \",\n \".T...................\",\n \" 1 \",\n \"...........C.........\",\n \" 9 \",\n \"...........C.........\",\n \" 9 \"],\n\n [\"...........C.........\", # expected output\n \".....................\"]\n )\n\n def test_should_not_call_snp_with_low_quality_in_the_middle(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\", # input\n [\".T...................\",\n \" 9 \",\n \".T...................\",\n \" 9 \",\n \"...........C.........\",\n \" 1 \",\n \"...........C.........\",\n \" 1 \"],\n\n [\".T...................\", # expected output\n \".....................\"]\n )\n\n def test_should_not_call_snp_with_low_quality_on_different_length_reads(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAACCCCCCTTTGGGGGGGGGG\", # input\n [\" .T...................... \",\n \" 1 \",\n \"......T........................... \",\n \" 1 \",\n \" ...........G..................\",\n \" 9 \",\n \" ...................G......... \",\n \" 9 \"],\n\n [\"........................................\", # expected output\n \".....................G..................\"]\n )\n\n\nclass TestCallingInExtremeEdgeCases(AsciiWecallRunnerTest):\n def test_should_not_call_anything_in_silly_case(self):\n self.calls_variants(\n \"A**********************************************A\",\n [\".**********************************************.\",\n \".**********************************************.\"]\n )\n", "id": "5912675", "language": "Python", "matching_score": 6.4934210777282715, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_calling_in_clean_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\n\n\nclass TestCallingFromDataWithReadErrors(AsciiWecallRunnerTest):\n\n def test_calls_heterozygous_snp_on_reads_with_scattered_base_calling_errors(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\" ..............C.........G... \",\n \" ,,,c,,,t,,,,,,,,,,,c,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,c,,,,,,,,,,,,,,, \",\n \" ....C.......G....C.......\",\n \" ..........T...C............. \",\n \" ,,t,,,,,,,,,,,,,,,,c,,,,,,,,, \"],\n\n [\".....................C....................\", # expected output\n \".....................C....................\"]\n )\n\n def test_calls_homozygous_snp_on_reads_with_base_calling_errors_at_every_position(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\" .C....G....G..C.T.G.....G... \",\n \" ,t,c,,,t,,,,,,,,,g,c,,c,,,,,, \",\n \" g,,,,,,t,,t,,,,,gcg,,a,,,,,,a,,g, \",\n \" ....C....T..G...GC..A.A.A\",\n \" G..T.T....T...C.....G......G \",\n \" c,t,t,,,,,,,,tt,,,,c,,,,,,g,g \"],\n\n [\".....................C....................\", # expected output\n \".....................C....................\"]\n )\n\n @expectedFailure\n def test_calls_heterozygous_snp_on_reads_with_base_calling_errors_at_every_position(self):\n self.calls_variants_with_genotype(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\" .C....G....G....T.G.....G... \",\n \" ,t,c,,,t,,,,,,,,,g,c,,c,,,,,, \",\n \" g,,,,,,t,,t,,,,,g,g,,a,,,,,,a,,g, \",\n \" ....C....T..G...GC..A.A.A\",\n \" G..T.T....T...C.....G......G \",\n \" c,t,t,,,,,,,,tt,,,,c,,,,,,g,g \"],\n\n [\"..........................................\", # expected output\n \".....................C....................\"]\n )\n\n def test_calls_del_on_noisy_reads(self):\n self.calls_variants(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\" ..............*............. \",\n \" ,,,,,,,t,,,,,,,,,,,*,,,,,,,,, \",\n \" ,*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ............G....C.......\",\n \" ..........T...*............. \",\n \" ,,,,,,,,,,*,,,,,,,,*,,,,,,,,, \"],\n\n [\".....................*....................\", # expected output\n \".....................*....................\"]\n )\n\n\nclass TestCallingFromSimulationOfRealData(AsciiWecallRunnerTest):\n def test_calls_nonconflicting_variants(self):\n \"\"\"\n This is a real world example, githash 4cce80b721ffc564b21682b07cbd6d4924045112\n calls a conflicting combination of a het MNP and hom SNP. Do no regress\n back test.\n \"\"\"\n self.calls_variants(\n # 012345678\n \"GACCATCCCGGCTAAAACGGTGAAACCCAGTCTCTACTAAAAATACAAAA\",\n [\",,,,,,,, ......C............C.....................\",\n \"... ,,,,,,c,,,,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,, .....C.....................\",\n \",,,,,,,,t,,,,,,c,,,,, .....................\",\n \"........T......C..A......... ..................\",\n \"........T......C..A.........C.......... ..........\",\n \",,,,,,,,t,,,,,,c,,,,,,,,,,,,c,,,,,,,,,, .........\",\n \",,,,,,,,t,,,,,,c,,a,,,,,,,,,c,,,,,,,,,,, ,,,,,,,,\",\n \",,,,,,,,t,,,,,,c,,a,,,,,,,,,c,,,,,,,,,,,,,, ,,,\",\n \"........T......C..A.........C.................. \",\n \",,,,c,,,t,,,,,,c,,,,,,,,,,,,c,,,,,,, .............\",\n \",,,,,,,,t,,,,,,c,,,,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,t,,,,,,c,,,,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,t,,,,,,c,,,,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \"........T......C..A.........C.....................\",\n \"..................................................\",\n \",,,,,,,,t,,,,,,c,,a,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \"........T......C..A.........C.....................\",\n \"........T......C..A.........C.....................\",\n \"........T......C..A.........C.....................\",\n \"........T......C..A.........C.....................\",\n \",,,,,,,,t,,,,,,c,,a,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \"........T......C............C.....................\",\n \"........T......C..A.........C.....................\",\n \",,,,,,,,t,,,,,,c,,a,,,,,,,,,c,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,t,,,,,ccc,,,,,,,,,,,c,,,,,,,,,,,,,,,,a,,,,\"],\n\n expected_variant_stubs=[\n (8, \"CGGCTAAAACGGTGAAACCCA\", \"TGGCTAACACAGTGAAACCCC\"),\n (8, \"CGGCTAAAACGGTGAAACCCA\", \"TGGCTAACACGGTGAAACCCC\"),\n ], config_dict={\"allowMNPCalls\": \"True\"}\n )\n\n\nclass TestVariantCallingWithCustomQuality(AsciiWecallRunnerTest):\n def test_calls_two_snps_with_high_quality(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\", # input\n [\".T...................\",\n \" 9 \",\n \".T...................\",\n \" 9 \",\n \"...........C.........\",\n \" 9 \",\n \"...........C.........\",\n \" 9 \"]\n )\n\n def test_should_not_call_snp_with_low_quality(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\", # input\n [\".T...................\",\n \" 1 \",\n \".T...................\",\n \" 1 \",\n \"...........C.........\",\n \" 9 \",\n \"...........C.........\",\n \" 9 \"],\n\n [\"...........C.........\", # expected output\n \".....................\"]\n )\n\n def test_should_not_call_snp_with_low_quality_in_the_middle(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\", # input\n [\".T...................\",\n \" 9 \",\n \".T...................\",\n \" 9 \",\n \"...........C.........\",\n \" 1 \",\n \"...........C.........\",\n \" 1 \"],\n\n [\".T...................\", # expected output\n \".....................\"]\n )\n\n def test_should_not_call_snp_with_low_quality_on_different_length_reads(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAACCCCCCTTTGGGGGGGGGG\", # input\n [\" .T...................... \",\n \" 1 \",\n \"......T........................... \",\n \" 1 \",\n \" ...........G..................\",\n \" 9 \",\n \" ...................G......... \",\n \" 9 \"],\n\n [\"........................................\", # expected output\n \".....................G..................\"]\n )\n\n\nclass TestCallingInExtremeEdgeCases(AsciiWecallRunnerTest):\n def test_should_not_call_anything_in_silly_case(self):\n self.calls_variants(\n \"A**********************************************A\",\n [\".**********************************************.\",\n \".**********************************************.\"]\n )\n", "id": "5573022", "language": "Python", "matching_score": 1.3738853931427002, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_calling_in_data_with_read_errors.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport logging\nimport re\n\nfrom wecall.bamutils.bam_builder import BAMBuilder\nfrom wecall.bedutils.bedwriter import BEDWriterContextManager\nfrom wecall.wecall_utils.wecall_config_builder import WecallConfigBuilder\nfrom wecall.wecall_utils.wecall_input_data_builder import WecallInputDataBuilder\nfrom wecall_test_drivers.tool_runner import log_file, log_bam_file\nimport os\nfrom wecall.bamutils.read_sequence import HIGH_QUALITY\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall_test_drivers.ascii_wecall_runner import DEFAULT_SAMPLE_NAME\nfrom wecall_test_drivers.variant_caller_wrapper import VariantCallerWrapper\nfrom wecall_test_drivers.vcf_expectation import VCFExpectation\n\n\nclass SVCDriver(object):\n\n def __init__(self, test_case):\n self.__test_case = test_case\n self._sample_bank = {}\n self.__bam_data = {}\n self._config = {\n 'noSimilarReadsFilter': False,\n 'minCallQual': 2,\n }\n self._output_vcf_filename = None\n self.__log_filename = None\n self.__bam_filenames = None\n self.__reference_filename = None\n\n def with_ref_sequence(self, ref_sequence, pos_from=0, chrom=DEFAULT_CHROM):\n self._sample_bank[chrom] = SampleBank(ref_sequence, pos_from, chrom)\n return self\n\n def with_read(\n self,\n read,\n quality=None,\n n_fwd=None,\n n_rev=None,\n mapping_quality=HIGH_QUALITY,\n chrom=DEFAULT_CHROM,\n sample_name=DEFAULT_SAMPLE_NAME,\n read_id=None,\n read_flags=None,\n cigar_string=None,\n read_start=None,\n read_mate_start=None\n ):\n assert(chrom in self._sample_bank.keys())\n if sample_name not in self._sample_bank[chrom].sample_names:\n self._sample_bank[chrom].add_sample_name(sample_name)\n self._sample_bank[chrom][sample_name].add_sequence(\n read,\n n_fwd=n_fwd,\n n_rev=n_rev,\n mapping_quality=mapping_quality,\n quality_string=quality,\n read_id=read_id,\n read_flags=read_flags,\n cigar_string=cigar_string,\n read_start=read_start,\n read_mate_start=read_mate_start\n )\n return self\n\n def with_ploidy(self, ploidy):\n self._config['ploidy'] = ploidy\n return self\n\n def with_normalize_variant_calls(self, normalize_variant_calls_option):\n self._config['normalizeVariantCalls'] = normalize_variant_calls_option\n return self\n\n def with_simple_reads(self):\n # for adding reads without explicitly specified metadata\n raise NotImplementedError\n\n def with_ref_filename(self, filename):\n self.__reference_filename = filename\n return self\n\n def with_read_with_quality(\n self,\n read,\n quality,\n sample_name=None,\n n_fwd=None,\n n_rev=None):\n raise NotImplementedError()\n\n def with_bam_data(self, file_name, bam_data, with_read_group=True):\n self.__bam_data[file_name] = (bam_data, with_read_group)\n return self\n\n def with_output_vcf_filename(self, filename):\n self._output_vcf_filename = filename\n return self\n\n def with_min_call_qual(self, qual_phred):\n self._config['minCallQual'] = qual_phred\n return self\n\n def with_output_format(self, outputFormat):\n self._config['outputFormat'] = outputFormat\n return self\n\n def with_mem_limit(self, mem_limit):\n self._config['memLimit'] = mem_limit\n return self\n\n def with_all_variants(self, all_variants):\n self._config['allVariants'] = all_variants\n return self\n\n def with_candidate_variants_file(self, filename):\n self._config['candidateVariantsFile'] = filename\n return self\n\n def with_genotype_alleles(self, filename):\n self._config['genotypeAllelesFile'] = filename\n return self\n\n def with_allow_MNP_calls(self, condition):\n self._config['allowMNPCalls'] = condition\n return self\n\n def with_max_cluster_distance(self, distance):\n self._config['maxClusterDist'] = distance\n return self\n\n def with_min_cluster_distance(self, distance):\n self._config['minClusterDist'] = distance\n return self\n\n def with_output_phased_genotypes(self, condition):\n self._config[\"outputPhasedGenotypes\"] = condition\n return self\n\n def with_allow_improper_pairs(self):\n self._config[\"allowImproperPairs\"] = True\n return self\n\n def with_duplicates_filter(self, condition):\n self._config[\"duplicatesFilter\"] = condition\n return self\n\n def with_no_similar_reads_filter(self, condition):\n self._config[\"noSimilarReadsFilter\"] = condition\n return self\n\n def with_var_filters(self, *var_filters):\n self._config[\"varFilterIDs\"] = \",\".join(var_filters)\n return self\n\n def with_bad_reads_window_size(self, bad_reads_window_size):\n self._config['badReadsWindowSize'] = bad_reads_window_size\n return self\n\n def with_min_bad_reads_score(self, min_bad_reads_score):\n self._config['minBadReadsScore'] = min_bad_reads_score\n return self\n\n def with_min_snp_q_over_depth(self, min_quality_over_depth):\n self._config['minSNPQOverDepth'] = min_quality_over_depth\n return self\n\n def with_overwrite(self, status):\n self._config['overwrite'] = status\n return self\n\n def with_number_of_jobs(self, n_jobs):\n self._config['numberOfJobs'] = n_jobs\n return self\n\n def with_work_dir(self, location):\n self._config['workDir'] = location\n return self\n\n def with_min_indel_q_over_depth(self, min_quality_over_depth):\n self._config['minIndelQOverDepth'] = min_quality_over_depth\n return self\n\n def with_min_root_mean_square_mapping_q(self, min_rms_mapping_quality):\n self._config['minRMSMappingQ'] = min_rms_mapping_quality\n return self\n\n def with_strand_bias_p(self, p_value):\n self._config['minStrandBiasP'] = p_value\n return self\n\n def with_allele_plus_strand_bias_p(self, p_value):\n self._config['minAllelePlusStrandBiasP'] = p_value\n return self\n\n def with_read_mapping_filter_q(self, min_read_mapping_quality):\n self._config['readMappingFilterQ'] = min_read_mapping_quality\n return self\n\n def with_log_timings(self, condition):\n self._config['logTimings'] = condition\n return self\n\n def with_region_string(self, region_string):\n self._config['regions'] = region_string\n return self\n\n def with_region_padding(self, padding):\n self._config['regionPadding'] = padding\n return self\n\n def with_min_reads_per_var(self, value):\n self._config[\"minReadsPerVar\"] = value\n return self\n\n def with_output_ref_calls(self, condition):\n self._config[\"outputRefCalls\"] = condition\n return self\n\n def with_max_ref_call_size(self, size):\n self._config[\"maxRefCallSize\"] = size\n return self\n\n def with_log_filename(self, filename):\n self.__log_filename = filename\n return self\n\n def with_bed_file(self, bed_file_records):\n tmp_bed_file = os.path.join(self.__test_case.work_dir, \"test.bed\")\n # come up with a temporary file name ending with .bed, tmp\n with BEDWriterContextManager(tmp_bed_file) as bed_writer:\n for record in bed_file_records:\n bed_writer.write_bed_record(record)\n self._config['regions'] = tmp_bed_file\n return self\n\n def with_turn_on_large_variant_calls(self, toggle_value):\n self._config['turnOnLargeVariantCalls'] = toggle_value\n return self\n\n def with_verbosity(self, verbosity):\n self._config['verbosity'] = verbosity\n return self\n\n def with_bam_filenames(self, bam_filenames):\n self.__bam_filenames = bam_filenames\n return self\n\n def call(self, expected_success=True):\n filestem = os.path.join(self.__test_case.work_dir, \"_\")\n wecall_input_data_builder = WecallInputDataBuilder(\n self.__test_case.work_dir)\n\n for chrom in self._sample_bank.keys():\n wecall_input_data_builder.with_sample_bank(\n self._sample_bank[chrom])\n\n if self.__reference_filename is not None:\n wecall_input_data_builder.with_ref_filename(\n self.__reference_filename)\n\n if self.__bam_filenames is not None:\n wecall_input_data_builder.with_bam_filenames(self.__bam_filenames)\n\n wecall_input_data = wecall_input_data_builder.build()\n\n for file_name, (bam_data, with_read_group) in self.__bam_data.items():\n bam_builder = BAMBuilder(\n os.path.join(\n self.__test_case.work_dir,\n file_name),\n with_read_group)\n for sample_name, sequence_bank in bam_data.items():\n ref = sequence_bank.reference\n bam_builder.with_bam_contig_data(\n ref.chrom, ref.length_minus_deletions(), sample_name, sequence_bank)\n bam_builder.build()\n wecall_input_data.bam_filenames.append(bam_builder.filename)\n\n wecall_config_builder = WecallConfigBuilder(\n wecall_input_data, filestem)\n for key, value in self._config.items():\n wecall_config_builder.with_configuration(key, value)\n\n vc_wrapper = VariantCallerWrapper(\n filestem, wecall_config_builder.build())\n\n for bam_filename in wecall_input_data.bam_filenames:\n for chrom, sample_bank in self._sample_bank.items():\n region = \"{}:{}\".format(chrom, sample_bank.reference.pos_from)\n log_bam_file(\n wecall_input_data.reference_filename,\n bam_filename,\n region)\n\n if self._output_vcf_filename is not None:\n vc_wrapper.output_vcf = self._output_vcf_filename\n\n if self.__log_filename is not None:\n vc_wrapper.log_filename = self.__log_filename\n\n vc_wrapper.run()\n\n if expected_success:\n self.__test_case.assertEqual(0, vc_wrapper.return_code, vc_wrapper.stderr.decode())\n else:\n self.__test_case.assertNotEqual(0, vc_wrapper.return_code, vc_wrapper.stderr.decode())\n\n return SVCExpectation(\n self.__test_case,\n vc_wrapper.stdout,\n vc_wrapper.stderr,\n vc_wrapper.return_code,\n vc_wrapper.output_vcf,\n vc_wrapper.log_filename\n )\n\n\nclass SVCExpectation(object):\n\n def __init__(\n self,\n test_case,\n stdout,\n stderr,\n return_code,\n output_vcf_path,\n log_filename):\n self.__stdout = stdout.decode()\n self.__stderr = stderr.decode()\n self.__return_code = return_code\n self.__output_vcf_path = output_vcf_path\n self.__log_filename = log_filename\n self.__test_case = test_case\n\n def with_log(self):\n self.__test_case.assertTrue(\n os.path.isfile(\n self.__log_filename),\n self.__log_filename)\n return SVCLogFileExpectation(self.__test_case, self.__log_filename)\n\n def with_output_vcf(self):\n self.__test_case.assertTrue(\n os.path.isfile(\n self.__output_vcf_path),\n self.__output_vcf_path)\n return VCFExpectation(self.__test_case, self.__output_vcf_path)\n\n def missing_output_vcf(self):\n self.__test_case.assertFalse(\n os.path.exists(\n self.__output_vcf_path),\n self.__output_vcf_path)\n return self\n\n def incorrect_var_ids_error(self, *incorrect_var_ids):\n expected_format = \" or \".join(\n filter(None, (\", \".join(incorrect_var_ids[0:-1]), incorrect_var_ids[-1])))\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - Could not find filter ID(s): \" + expected_format + \"\\n\"))\n return self\n\n def bed_file_contains_contigs_that_are_not_present_in_the_reference_error(\n self, *bad_regions):\n bad_regions_list = list((str(region) for region in bad_regions))\n self.__test_case.assertRegexpMatches(\n self.__stderr,\n re.escape(\n \"FAILED - Region(s) \" +\n \",\".join(bad_regions_list) +\n \" are not contained in reference.\\n\"))\n return self\n\n def regions_contains_both_bedfile_and_region_string_error(self):\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - Can not have mixture of BED files and region strings\\n\"))\n return self\n\n def bedfile_does_not_exist_error(self, bed_file_name):\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - BED file {} does not exist\\n\".format(bed_file_name)))\n return self\n\n def genotyping_is_incompatible_with_outputting_reference_calls_error(self):\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - Genotyping is incompatible with outputting reference calls.\\n\"))\n return self\n\n def attempt_to_load_invalid_contig_warning(self, contig_name):\n self.__test_case.assertIn(\n \"WARNING -- Attempted to load an invalid contig \\\"{}\\\" from the BAM file - \"\n \"Check that the contig names in the reference file match those in the BAM.\".format(contig_name),\n self.__stderr)\n return self\n\n def work_dir_not_a_directory_error(self, location):\n self.__test_case.assertRegexpMatches(\n self.__stderr,\n \"FAILED - Working dir: {} is not a directory\\n\".format(location)\n )\n return self\n\n def missing_genotype_file(self, filename):\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - Genotype file {} does not exist\\n\".format(filename)))\n return self\n\n def output_exists_error(self, filename):\n self.__test_case.assertRegexpMatches(\n self.__stderr, \"FAILED - {} already exists\".format(filename))\n return self\n\n def missing_genotype_index_file(self, filename):\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - Genotype index file {} does not exist\\n\".format(filename)))\n return self\n\n def unexpected_genotype_file_format(self, filename):\n self.__test_case.assertRegexpMatches(self.__stderr, re.escape(\n \"FAILED - File {} does not have .gz extension\\n\".format(filename)))\n return self\n\n def with_mem_limit_range_error(self):\n self.__test_case.assertRegexpMatches(\n self.__stderr,\n re.escape(\"FAILED - <memLimit> not in acceptable range. \\n\")\n )\n return self\n\n def with_incorrect_output_format_error(self):\n self.__test_case.assertRegexpMatches(\n self.__stderr, \"FAILED - output file format must be VCF4.1 or VCF4.2\")\n return self\n\n\nclass SVCLogFileExpectation(object):\n def __init__(self, test_case, path):\n self.__test_case = test_case\n self.__path = path\n\n with open(self.__path) as fp:\n for line in fp:\n logging.info('> ' + line.rstrip())\n logging.info(\"\")\n\n self.__lines = open(self.__path).read()\n\n def bed_file_contains_contigs_that_are_not_present_in_the_reference_warning(\n self, *bad_regions):\n bad_regions_list = list((str(region) for region in bad_regions))\n self.__test_case.assertRegexpMatches(\n self.__lines,\n re.escape(\n \"WARNING -- Contig(s) \" +\n \",\".join(bad_regions_list) +\n \" are not contained in reference.\\n\"))\n return self\n\n def input_variant_trimmed_warning(self, input, trimmed):\n def we_call_variant(var):\n return \"Variant({}:{}-{} {} --> {})\".format(var.chrom,\n var.pos_from, var.pos_to, var.ref, var.alt)\n self.__test_case.assertRegexpMatches(\n self.__lines,\n re.escape(\n \"WARNING -- Trimming input variant from: \" +\n we_call_variant(input) +\n \" to \" +\n we_call_variant(trimmed) +\n \".\\n\"))\n return self\n", "id": "4410385", "language": "Python", "matching_score": 5.363088130950928, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/svc_driver.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.wecall_utils.wecall_input_data_builder import WecallInputDataBuilder\nfrom wecall_test_drivers.variant_caller_wrapper import VariantCallerWrapper\nimport os\n\nfrom wecall_test_drivers import variant_caller_wrapper\n\n# The standard human reference genome. Needed for variant calling\nfrom wecall.wecall_utils.wecall_config_builder import WecallConfigBuilder\n\nEDNA_FATAL = 0\nEDNA_ERROR = 1\nEDNA_WARNING = 2\nEDNA_INFO = 3\nEDNA_TIMING = 4\nEDNA_DEBUG = 5\nEDNA_SUPER_DEBUG = 6\n\n\nclass VariantCallerBuilder(object):\n def __init__(\n self,\n workdir,\n dataset,\n chrom_intervals=None,\n ):\n self.__output_file_path_stem = os.path.join(workdir, \"weCall\")\n\n self.__wecall_config_builder = WecallConfigBuilder(\n dataset, self.__output_file_path_stem)\n\n if chrom_intervals is not None:\n self.with_configuration(\"regions\", \",\".join(\n [str(intv) for intv in chrom_intervals]))\n\n self.with_configuration(\"verbosity\", EDNA_FATAL)\n self.with_configuration(\"overwrite\", True)\n\n def with_configuration(self, key, value):\n if value is not None:\n self.__wecall_config_builder.with_configuration(key, value)\n return self\n\n def with_configuration_dict(self, config_dict):\n for key, value in config_dict.items():\n self.with_configuration(key, value)\n return self\n\n def build(self):\n wecall_config = self.__wecall_config_builder.build()\n return variant_caller_wrapper.VariantCallerWrapper(\n self.__output_file_path_stem, wecall_config)\n\n\nclass VariantCallerBuilderFromSampleBank(object):\n def __init__(self, sample_bank, work_dir):\n self.sample_bank = sample_bank\n self.work_dir = work_dir\n self.filestem = os.path.join(self.work_dir, \"_\")\n self.configuration = {'noSimilarReadsFilter': False, 'minCallQual': 2}\n self.wecall_input_data = None\n\n def build(self):\n self.wecall_input_data = WecallInputDataBuilder(\n self.work_dir).with_sample_bank(\n self.sample_bank).build()\n wecall_config_builder = WecallConfigBuilder(\n self.wecall_input_data, self.filestem)\n\n for key, value in self.configuration.items():\n wecall_config_builder.with_configuration(key, value)\n wecall_config = wecall_config_builder.build()\n\n return VariantCallerWrapper(self.filestem, wecall_config)\n", "id": "2958300", "language": "Python", "matching_score": 2.011989116668701, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/variant_caller_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nclass WecallConfig(object):\n\n def __init__(self, filename):\n self.filename = filename\n\n\nclass ConfigFileWriter(object):\n\n def __init__(self, filename):\n self.__filename = filename\n\n def __enter__(self):\n self.__file = open(self.__filename, \"w\")\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.__file.close()\n\n def write_config_line(self, key, value):\n self.__file.write(\"{} = {}\\n\".format(key, value))\n\n\nclass WecallConfigBuilder(object):\n\n def __init__(self, wecall_input_data, filestem):\n self.filestem = filestem\n self.__configuration = {\n \"refFile\": wecall_input_data.reference_filename,\n \"inputs\": \",\".join(wecall_input_data.bam_filenames)\n }\n\n def with_configuration(self, key, value):\n self.__configuration[key] = value\n return self\n\n def build(self):\n filename = self.filestem + \".cfg\"\n with ConfigFileWriter(filename) as config_writer:\n for key, value in list(self.__configuration.items()):\n config_writer.write_config_line(key=key, value=value)\n return WecallConfig(filename)\n", "id": "10736114", "language": "Python", "matching_score": 1.415785551071167, "max_stars_count": 8, "path": "python/wecall/wecall_utils/wecall_config_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nclass InputData(object):\n\n def __init__(self, tags, filenames):\n self.tags = tags\n self.__filenames = filenames\n\n @property\n def filenames(self):\n return self.__filenames\n\n\nclass WecallInputData(InputData):\n\n def __init__(self, bam_filenames, reference_filename):\n InputData.__init__(self, set(), set())\n self.bam_filenames = bam_filenames\n self.reference_filename = reference_filename\n\n @property\n def filenames(self):\n filenames = set()\n for bam_filename in self.bam_filenames:\n filenames.update({bam_filename, bam_filename + \".bai\"})\n filenames.update(\n {self.reference_filename, self.reference_filename + \".fai\"})\n return filenames\n", "id": "7439544", "language": "Python", "matching_score": 0.8753708004951477, "max_stars_count": 8, "path": "python/wecall/wecall_utils/wecall_input_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport unittest\n\nfrom wecall.common.exceptions import weCallException\nfrom wecall.vcfutils.record import generate_records\nfrom wecall.vcfutils.sample_data import SampleData\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall.vcfutils.schema import Schema\n\nimport testfixtures\n\n\nclass TestSampleData(unittest.TestCase):\n\n def test_default_field_value_is_assigned_when_sample_data_is_constructed(self):\n sample_data = SampleData(['key1'], ['sample_name'])\n self.assertEqual(sample_data.get_field('sample_name', 'key1'), [])\n\n def test_genotype_field_default_value_is_assigned_when_sample_data_is_constructed(self):\n sample_data = SampleData(['GT'], ['sample_name'])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name',\n 'GT'),\n GenotypeCall(\"./.\"))\n\n def test_default_values_are_assigned_when_sample_data_is_constructed(self):\n sample_data = SampleData(['GT', 'key1', 'key2'], [\n 'sample_name1', 'sample_name2'])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name1',\n 'GT'),\n GenotypeCall(\"./.\"))\n self.assertEqual(\n sample_data.get_field(\n 'sample_name2',\n 'GT'),\n GenotypeCall(\"./.\"))\n self.assertEqual(sample_data.get_field('sample_name1', 'key1'), [])\n self.assertEqual(sample_data.get_field('sample_name2', 'key1'), [])\n self.assertEqual(sample_data.get_field('sample_name1', 'key2'), [])\n self.assertEqual(sample_data.get_field('sample_name2', 'key2'), [])\n\n def test_has_sample_reports_expected_value(self):\n sample_data = SampleData(['key1'], ['sample_name'])\n self.assertTrue(sample_data.has_sample('sample_name'))\n self.assertFalse(sample_data.has_sample('missing_sample_name'))\n\n def test_has_genotype_key_should_report_expected_value(self):\n sample_data = SampleData(['genotype_key'], ['sample_name'])\n self.assertTrue(sample_data.has_genotype_key('genotype_key'))\n self.assertFalse(sample_data.has_genotype_key('missing_genotype_key'))\n\n def test_has_genotype_keys_should_support_multiple_keys(self):\n sample_data = SampleData(\n ['genotype_key1', 'genotype_key2'], ['sample_name'])\n self.assertTrue(sample_data.has_genotype_key('genotype_key1'))\n self.assertTrue(sample_data.has_genotype_key('genotype_key2'))\n\n def test_should_add_sample_data(self):\n sample_data = SampleData(['genotype_key1'], ['sample_name'])\n sample_data.add_sample_data('sample_name', 'genotype_key1', [1])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name',\n 'genotype_key1'),\n [1])\n\n def test_should_allow_multiple_calls_to_add_sample_data(self):\n sample_data = SampleData(\n ['genotype_key1', 'genotype_key2'], ['sample_name'])\n sample_data.add_sample_data('sample_name', 'genotype_key1', [1])\n sample_data.add_sample_data('sample_name', 'genotype_key2', [2])\n sample_data.add_sample_data('sample_name', 'genotype_key1', [3, 4])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name', 'genotype_key1'), [\n 3, 4])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name',\n 'genotype_key2'),\n [2])\n\n def test_should_allow_multiple_samples_for_add_sample_data(self):\n sample_data = SampleData(\n ['genotype_key1'], [\n 'sample_name1', 'sample_name2'])\n sample_data.add_sample_data('sample_name1', 'genotype_key1', [1])\n sample_data.add_sample_data('sample_name2', 'genotype_key1', [3, 4])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name1',\n 'genotype_key1'),\n [1])\n self.assertEqual(\n sample_data.get_field(\n 'sample_name2', 'genotype_key1'), [\n 3, 4])\n\n def test_should_raise_when_adding_sample_data_to_missing_key(self):\n sample_data = SampleData(['key'], ['sample_name'])\n self.assertRaisesRegex(\n weCallException,\n \"Missing key missing_key when adding sample data.\",\n sample_data.add_sample_data,\n 'sample_name',\n 'missing_key',\n [1],\n )\n\n def test_should_raise_when_adding_sample_data_to_missing_sample(self):\n sample_data = SampleData(['key'], ['sample_name'])\n self.assertRaisesRegex(\n weCallException,\n \"Missing sample name missing_sample_name supplied when adding sample data.\",\n sample_data.add_sample_data,\n 'missing_sample_name',\n 'key',\n [1],\n )\n\n def test_should_raise_when_adding_wrong_genotype_data(self):\n sample_data = SampleData(['GT'], ['sample_name'])\n self.assertRaisesRegex(\n weCallException,\n \"Genotype field must be a GenotypeCall.\",\n sample_data.add_sample_data,\n 'sample_name',\n 'GT',\n [1],\n )\n\n\nclass TestSampleDataGetGenotypeLikelihood(unittest.TestCase):\n def test_gets_exact_values_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'GL', [-0.1, -0.2, -0.3])\n self.assertEqual(sample_data.get_genotype_likelihoods(\n sample_name), [-0.1, -0.2, -0.3])\n\n def test_gets_exact_values_if_key_is_PL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['PL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'PL', [1, 2, 3])\n self.assertEqual(sample_data.get_genotype_likelihoods(\n sample_name), [-0.1, -0.2, -0.3])\n\n def test_gets_dot_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'GL', '.')\n self.assertEqual(\n sample_data.get_genotype_likelihoods(sample_name), '.')\n\n def test_gets_list_of_dot_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'GL', ['.', '.', '.'])\n self.assertEqual(\n sample_data.get_genotype_likelihoods(sample_name), [\n None, None, None])\n\n def test_gets_none_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'GL', None)\n self.assertEqual(\n sample_data.get_genotype_likelihoods(sample_name), None)\n\n def test_gets_list_of_none_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'GL', [None, None, None])\n self.assertEqual(\n sample_data.get_genotype_likelihoods(sample_name), [\n None, None, None])\n\n def test_gets_dot_if_key_is_PL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['PL'], [sample_name])\n sample_data.add_sample_data(sample_name, 'PL', '.')\n self.assertEqual(\n sample_data.get_genotype_likelihoods(sample_name), '.')\n\n def test_sample_data_copes_with_mixed_dot_missing_values_in_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.add_sample_data(\n sample_name, 'GL', [-0.1, '.', -0.2, None, -0.3])\n self.assertEqual(sample_data.get_genotype_likelihoods(\n sample_name), [-0.1, None, -0.2, None, -0.3])\n\n def test_sample_data_copes_with_mixed_missing_values_in_PL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['PL'], [sample_name])\n sample_data.add_sample_data(\n sample_name, 'PL', [-0.1, '.', -0.2, None, -0.3])\n self.assertEqual(sample_data.get_genotype_likelihoods(\n sample_name), [0.01, None, 0.02, None, 0.03])\n\n\nclass TestSampleDataSetGenotypeLikelihood(unittest.TestCase):\n def test_gets_exact_values_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.set_genotype_likelihoods(sample_name, [-0.1, -0.2, -0.3])\n self.assertEqual(sample_data.get_field(\n sample_name, 'GL'), [-0.1, -0.2, -0.3])\n\n def test_gets_exact_values_if_key_is_PL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['PL'], [sample_name])\n sample_data.set_genotype_likelihoods(sample_name, [-0.1, -0.2, -0.3])\n self.assertEqual(sample_data.get_field(sample_name, 'PL'), [1, 2, 3])\n\n def test_gets_dot_if_key_is_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.set_genotype_likelihoods(sample_name, '.')\n self.assertEqual(sample_data.get_field(sample_name, 'GL'), '.')\n\n def test_gets_dot_if_key_is_PL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['PL'], [sample_name])\n sample_data.set_genotype_likelihoods(sample_name, '.')\n self.assertEqual(sample_data.get_field(sample_name, 'PL'), '.')\n\n def test_sample_data_copes_with_mixed_dot_missing_values_in_GL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GL'], [sample_name])\n sample_data.set_genotype_likelihoods(\n sample_name, [-0.1, '.', -0.2, None, -0.3])\n self.assertEqual(sample_data.get_field(\n sample_name, 'GL'), [-0.1, None, -0.2, None, -0.3])\n\n def test_sample_data_copes_with_mixed_missing_values_in_PL(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['PL'], [sample_name])\n sample_data.set_genotype_likelihoods(\n sample_name, [-0.1, '.', -0.2, None, -0.3])\n self.assertEqual(\n sample_data.get_field(\n sample_name, 'PL'), [\n 1.0, None, 2.0, None, 3.0])\n\n\nclass TestSampleDataGetReadDepth(unittest.TestCase):\n def test_gets_exact_values_if_key_is_DP(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['DP'], [sample_name])\n sample_data.add_sample_data(sample_name, 'DP', [100])\n self.assertEqual(sample_data.get_read_depth(sample_name), [100])\n\n def test_gets_exact_values_if_key_is_NR(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['NR'], [sample_name])\n sample_data.add_sample_data(sample_name, 'NR', [100])\n self.assertEqual(sample_data.get_read_depth(sample_name), [100])\n\n\nclass TestSampleDataGetVariantSupport(unittest.TestCase):\n def test_gets_exact_values_if_key_is_AD(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['AD'], [sample_name])\n sample_data.add_sample_data(sample_name, 'AD', [None, 100])\n self.assertEqual(sample_data.get_variant_support(sample_name), [100])\n\n def test_gets_exact_values_if_key_is_NV(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['NV'], [sample_name])\n sample_data.add_sample_data(sample_name, 'NV', [100])\n self.assertEqual(sample_data.get_variant_support(sample_name), [100])\n\n\nclass TestSampleDataGetGenotypeQuality(unittest.TestCase):\n def test_gets_value_for_GQ_key(self):\n sample_name = 'sample_name'\n sample_data = SampleData(['GQ'], [sample_name])\n sample_data.add_sample_data(sample_name, 'GQ', [2.3])\n self.assertEqual(sample_data.get_genotype_quality(sample_name), [2.3])\n\n\nclass TestGenotypeDataView(unittest.TestCase):\n\n def setUp(self):\n self.sample_data = SampleData(\n ['GT', 'key'], ['sample_name1', 'sample_name2'])\n self.sample_data.add_sample_data(\"sample_name1\", \"key\", [1, 2])\n self.sample_data.add_sample_data(\n \"sample_name2\", \"GT\", GenotypeCall(\"0/1\"))\n\n def test_contains_method_returns_expected_value_sample1(self):\n genotype_data = self.sample_data.get_genotype_data(\"sample_name1\")\n self.assertNotIn(\"cheesecake\", genotype_data)\n self.assertNotIn(\"sample_name1\", genotype_data)\n self.assertIn(\"GT\", genotype_data)\n self.assertIn(\"key\", genotype_data)\n\n def test_contains_method_returns_expected_value_sample2(self):\n genotype_data = self.sample_data.get_genotype_data(\"sample_name2\")\n self.assertIn(\"GT\", genotype_data)\n self.assertIn(\"key\", genotype_data)\n\n def test_getitem_method_returns_expected_value(self):\n genotype_data = self.sample_data.get_genotype_data(\"sample_name1\")\n self.assertEqual(genotype_data[\"GT\"], GenotypeCall(\"./.\"))\n self.assertEqual(genotype_data[\"key\"], [1, 2])\n genotype_data = self.sample_data.get_genotype_data(\"sample_name2\")\n self.assertEqual(genotype_data[\"GT\"], GenotypeCall(\"0/1\"))\n self.assertEqual(genotype_data[\"key\"], [])\n\n def test_keys_method_returns_expected_data(self):\n genotype_data = self.sample_data.get_genotype_data(\"sample_name1\")\n self.assertEqual(list(genotype_data.keys()), [\"GT\", \"key\"])\n genotype_data = self.sample_data.get_genotype_data(\"sample_name2\")\n self.assertEqual(list(genotype_data.keys()), [\"GT\", \"key\"])\n\n def test_values_method_returns_expected_data(self):\n genotype_data = self.sample_data.get_genotype_data(\"sample_name1\")\n self.assertEqual(list(genotype_data.values()),\n [GenotypeCall(\"./.\"), [1, 2]])\n genotype_data = self.sample_data.get_genotype_data(\"sample_name2\")\n self.assertEqual(list(genotype_data.values()),\n [GenotypeCall(\"0/1\"), []])\n\n\nclass TestGenerateRecords(unittest.TestCase):\n\n def test_should_split_genotype_likelihood_properly(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', '.', 'GT:GL', '0/1:1,2,3,4,5,6'\n ]))\n\n self.assertEqual(\n GenotypeCall(\"0/1\"),\n records[0].sample_info.get_field(\n 'foo',\n 'GT'))\n self.assertEqual(\n [1.0, 2.0, 3.0], records[0].sample_info.get_field('foo', 'GL'))\n self.assertEqual(\n GenotypeCall(\"0/0\"),\n records[1].sample_info.get_field(\n 'foo',\n 'GT'))\n self.assertEqual(\n [1.0, 4.0, 6.0], records[1].sample_info.get_field('foo', 'GL'))\n\n def test_should_drop_genotype_likelihood_with_mismatch_ploidy(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', '.', 'GT:GL', '0/1:1,2,3,4'\n ]))\n\n self.assertEqual(\n GenotypeCall(\"0/1\"),\n records[0].sample_info.get_field(\n 'foo',\n 'GT'))\n self.assertEqual([None, None, None],\n records[0].sample_info.get_field('foo', 'GL'))\n self.assertEqual(\n GenotypeCall(\"0/0\"),\n records[1].sample_info.get_field(\n 'foo',\n 'GT'))\n self.assertEqual([None, None, None],\n records[1].sample_info.get_field('foo', 'GL'))\n\n @testfixtures.log_capture()\n def test_should_warn_when_GT_is_not_present(self, log):\n schema = Schema()\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', '.', 'GL', '1,2,3'\n ]))\n for index, record in enumerate(records):\n self.assertEqual(\n (index, [\n '1', '2', '3']), (index, record.sample_info.get_field(\n 'foo', 'GL')))\n log.check(('wecall.vcfutils.fieldmetadata', 'WARNING',\n 'Unknown ploidy when parsing genotype likelihood'), )\n\n\nclass TestHasNoLikelihoods(unittest.TestCase):\n def test_should_return_true_if_no_GL_or_PL_present(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT', '0/1'\n ]))\n self.assertTrue(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_true_if_all_likelihoods_are_none_for_GL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT:GL', '0/1:.,.,.'\n ]))\n self.assertTrue(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_true_if_all_likelihoods_are_none_for_PL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('PL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT:PL', '0/1:.,.,.'\n ]))\n self.assertTrue(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_false_if_all_likelihoods_is_not_none_for_PL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('PL', 'G', 'Float', '')\n schema.samples = ['foo']\n\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT:PL', '0/1:90,10,12'\n ]))\n self.assertFalse(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_false_if_all_likelihoods_is_not_none_for_GL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT:GL', '0/1:90,1,120'\n ]))\n self.assertFalse(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_false_if_any_likelihoods_is_not_none_for_PL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('PL', 'G', 'Float', '')\n schema.samples = ['foo']\n\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT:PL', '0/1:90,.,.'\n ]))\n self.assertFalse(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_false_if_any_likelihoods_is_not_none_for_GL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A', '.', 'PASS', '.', 'GT:GL', '0/1:90,.,.'\n ]))\n self.assertFalse(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_false_if_one_sample_okay_for_PL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('PL', 'G', 'Float', '')\n schema.samples = ['foo', 'bar']\n records = list(generate_records(schema,\n ['chrZ',\n '200',\n '.',\n 'C',\n 'A',\n '.',\n 'PASS',\n '.',\n 'GT:PL',\n '0/1:90,1,120',\n '0/1:.,.,.']))\n self.assertFalse(records[0].sample_info.has_no_likelihoods())\n\n def test_should_return_false_if_one_sample_okay_for_GL(self):\n schema = Schema()\n schema.set_sample_data('GT', '1', 'String', '')\n schema.set_sample_data('GL', 'G', 'Float', '')\n schema.samples = ['foo']\n records = list(generate_records(schema,\n ['chrZ',\n '200',\n '.',\n 'C',\n 'A',\n '.',\n 'PASS',\n '.',\n 'GT:GL',\n '0/1:90,1,120',\n '0/1:.,.,.']))\n self.assertFalse(records[0].sample_info.has_no_likelihoods())\n", "id": "2090052", "language": "Python", "matching_score": 3.822323799133301, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_sample_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom collections import OrderedDict\n\nfrom wecall.common.exceptions import weCallException\nfrom wecall.vcfutils.genotype_call import GenotypeCall, merge_genotype_calls\nfrom wecall.vcfutils.stringutils import to_vcf_str\n\n\nGENOTYPE_QUALITY_KEY = 'GQ'\n\nGENOTYPE_KEY = 'GT'\nGENOTYPE_LIKELIHOODS_KEY = 'GL'\nGENOTYPE_PHRED_LIKELIHOODS_KEY = 'PL'\nLIKELIHOOD_SCALING_FACTOR = OrderedDict(\n [(GENOTYPE_PHRED_LIKELIHOODS_KEY, -10.0), (GENOTYPE_LIKELIHOODS_KEY, 1.0)])\nGENOTYPE_LIKELIHOODS_KEYS = list(LIKELIHOOD_SCALING_FACTOR.keys())\n\nNUMBER_OF_READ = \"NR\"\nREAD_DEPTH = \"DP\"\nREAD_DEPTH_KEYS = [READ_DEPTH, NUMBER_OF_READ]\n\nALLELIC_DEPTH = \"AD\"\nVARIANT_SUPPORT = \"NV\"\nVARIANT_SUPPORT_MAP = {\n ALLELIC_DEPTH: lambda v: v[1:], VARIANT_SUPPORT: lambda v: v}\n\n\nclass SampleData(object):\n\n __slots__ = (\n '__sample_names',\n '__key_to_sample_values',\n '__merged_genotypes')\n\n def __init__(self, key_names, sample_names):\n self.__sample_names = sample_names\n self.__key_to_sample_values = OrderedDict()\n self.__merged_genotypes = False\n\n for key_name in key_names:\n default_value = []\n\n if key_name == GENOTYPE_KEY:\n default_value = GenotypeCall('./.')\n\n self.__key_to_sample_values[key_name] = [\n default_value for _ in range(len(self.__sample_names))]\n\n def __eq__(self, other):\n return (\n self.__sample_names == other.__sample_names and\n self.__key_to_sample_values == other.__key_to_sample_values\n )\n\n def __repr__(self):\n return \"<SampleData: samples={!r}, keys={!r}>\".format(\n self.__sample_names,\n list(self.__key_to_sample_values.items())\n )\n\n def add_sample_data(self, sample_name, key_name, sample_data_value):\n if key_name not in self.__key_to_sample_values:\n raise weCallException(\n \"Missing key {} when adding sample data.\".format(key_name))\n\n if sample_name not in self.__sample_names:\n raise weCallException(\n \"Missing sample name {} supplied when adding sample data.\".format(sample_name))\n\n if key_name == GENOTYPE_KEY and not isinstance(\n sample_data_value, GenotypeCall):\n raise weCallException(\"Genotype field must be a GenotypeCall.\")\n\n self.__key_to_sample_values[key_name][self.__sample_names.index(\n sample_name)] = sample_data_value\n\n def get_genotype_data(self, sample_name):\n return SampleDataView(self, sample_name)\n\n def get_genotype_quality(self, sample_name):\n return self.get_field(sample_name, GENOTYPE_QUALITY_KEY)\n\n def has_read_depth_key(self):\n return any(self.has_genotype_key(key) for key in READ_DEPTH_KEYS)\n\n def get_read_depth(self, sample_name):\n for key in READ_DEPTH_KEYS:\n if self.has_genotype_key(key):\n return self.get_field(sample_name, key)\n raise weCallException(\n \"Expected one of {} as the depth key.\".format(READ_DEPTH_KEYS))\n\n def has_variant_support_key(self):\n return any(self.has_genotype_key(key)\n for key in list(VARIANT_SUPPORT_MAP.keys()))\n\n def get_variant_support(self, sample_name):\n for key in list(VARIANT_SUPPORT_MAP.keys()):\n if self.has_genotype_key(key):\n return VARIANT_SUPPORT_MAP[key](\n self.get_field(sample_name, key))\n raise weCallException(\n \"Expected one of {} as the variant support key.\".format(\n list(\n VARIANT_SUPPORT_MAP.keys())))\n\n def has_no_likelihoods(self):\n if self.has_genotype_key(GENOTYPE_LIKELIHOODS_KEY) or self.has_genotype_key(\n GENOTYPE_PHRED_LIKELIHOODS_KEY):\n for sample_name in self.__sample_names:\n likelihoods = self.get_genotype_likelihoods(sample_name)\n if any([likeli is not None for likeli in likelihoods]):\n return False\n return True\n\n def get_raw_genotype_likelihoods(self, sample_name):\n for key in list(LIKELIHOOD_SCALING_FACTOR.keys()):\n if self.has_genotype_key(key):\n try:\n return self.get_field(sample_name, key)\n except KeyError:\n pass\n return None\n\n def get_genotype_likelihoods(self, sample_name):\n def convert_likelihoods(likelihoods, factor):\n if likelihoods is None or likelihoods == '.':\n return likelihoods\n else:\n return [\n None if value in {\n None,\n '.'} else value /\n factor for value in likelihoods]\n\n for key in list(LIKELIHOOD_SCALING_FACTOR.keys()):\n if self.has_genotype_key(key):\n values = self.get_field(sample_name, key)\n return convert_likelihoods(\n values, LIKELIHOOD_SCALING_FACTOR[key])\n raise weCallException(\n \"Expected one of {} as the likelihood key.\".format(\n list(\n LIKELIHOOD_SCALING_FACTOR.keys())))\n\n def set_genotype_likelihoods(self, sample_name, likelihood_values):\n def convert_likelihoods(likelihoods, factor):\n if likelihoods is None or likelihoods == '.':\n return likelihoods\n else:\n return [\n None if value in {\n None,\n '.'} else value *\n factor for value in likelihoods]\n\n for key in list(LIKELIHOOD_SCALING_FACTOR.keys()):\n if self.has_genotype_key(key):\n converted_values = convert_likelihoods(\n likelihood_values, LIKELIHOOD_SCALING_FACTOR[key])\n self.add_sample_data(sample_name, key, converted_values)\n return\n raise weCallException(\n \"Expected one of {} as the likelihood key.\".format(\n list(\n LIKELIHOOD_SCALING_FACTOR.keys())))\n\n def get_field(self, sample_name, key):\n return self.__key_to_sample_values[key][self.__sample_names.index(\n sample_name)]\n\n def get_fields(self, sample_name):\n index = self.__sample_names.index(sample_name)\n return [value[index]\n for key, value in list(self.__key_to_sample_values.items())]\n\n def has_genotype_key(self, key):\n return key in self.__key_to_sample_values\n\n def genotype_keys(self):\n return list(self.__key_to_sample_values.keys())\n\n def get_sample_names(self):\n return self.__sample_names\n\n def has_sample(self, sample_name):\n return sample_name in self.__sample_names\n\n def to_vcf_header_columns(self):\n return [\"FORMAT\"] + self.__sample_names\n\n def to_vcf_columns(self):\n keys_string = [\":\".join(self.genotype_keys())]\n\n sample_strings = [[] for sample_name in self.__sample_names]\n\n for values_per_sample in list(self.__key_to_sample_values.values()):\n for index, value in enumerate(values_per_sample):\n if not isinstance(value, GenotypeCall) and value == []:\n sample_strings[index].append('.')\n else:\n sample_strings[index].append(to_vcf_str(value))\n\n return keys_string + [':'.join(sample_string)\n for sample_string in sample_strings]\n\n def genotypes(self):\n genotypes = OrderedDict()\n for sample_name in self.__sample_names:\n genotypes[sample_name] = self.get_field(sample_name, GENOTYPE_KEY)\n return genotypes\n\n @property\n def has_merged_genotypes(self):\n return self.__merged_genotypes\n\n def merge_genotype_calls(self, dictionary_sample_name_to_genotype_call):\n # assert(not self.__merged_genotypes)\n for sample_name in self.__sample_names:\n sample_name_index = self.__sample_names.index(sample_name)\n self.__key_to_sample_values[GENOTYPE_KEY][sample_name_index] = merge_genotype_calls(\n self.__key_to_sample_values[GENOTYPE_KEY][sample_name_index],\n dictionary_sample_name_to_genotype_call[sample_name])\n self.__merged_genotypes = True\n\n\nclass SampleDataView(object):\n\n def __init__(self, sample_data, sample_name):\n self.__sample_data = sample_data\n self.__sample_name = sample_name\n\n def __contains__(self, key):\n return self.__sample_data.has_genotype_key(key)\n\n def __getitem__(self, key):\n return self.__sample_data.get_field(self.__sample_name, key)\n\n def keys(self):\n return self.__sample_data.genotype_keys()\n\n def values(self):\n return self.__sample_data.get_fields(self.__sample_name)\n\n def genotype(self):\n return self.__getitem__(GENOTYPE_KEY)\n", "id": "9035860", "language": "Python", "matching_score": 1.0623037815093994, "max_stars_count": 8, "path": "python/wecall/vcfutils/sample_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nfrom wecall.bamutils.bam_builder import BAMBuilder, BAMContigData\nfrom wecall.bamutils.sequence_bank import SequenceBank\nfrom wecall.wecall_utils.wecall_input_data import WecallInputData\nfrom wecall.fastautils.fasta_file_builder import FastaFileBuilder\n\n\nclass WecallInputDataBuilder(object):\n\n def __init__(self, work_dir):\n self.__sample_banks = None\n self.__work_dir = work_dir\n self.__ref_filename = None\n self.__bam_filenames = None\n\n def with_ref_filename(self, filename):\n self.__ref_filename = filename\n return self\n\n def with_bam_filenames(self, filenames):\n self.__bam_filenames = filenames\n return self\n\n def with_sample_bank(self, sample_bank):\n if self.__sample_banks is None:\n self.__sample_banks = []\n self.__sample_banks.append(sample_bank)\n return self\n\n def __build_ref(self):\n if self.__ref_filename is None:\n self.__ref_filename = os.path.join(self.__work_dir, \"bah.fa\")\n\n fasta_file_builder = FastaFileBuilder(self.__ref_filename)\n\n for sample_bank in self.__sample_banks:\n fasta_file_builder.with_chrom(\n sample_bank.reference.chrom,\n sample_bank.reference.fasta_string()\n )\n fasta_file_builder.build().index()\n return fasta_file_builder.filename\n\n def __build_bams(self):\n if len(self.__sample_banks) == 1:\n sample_names = self.__sample_banks[0].sample_names\n else:\n sample_names = set().union(\n *[sample_bank.sample_names for sample_bank in self.__sample_banks])\n\n bam_files = []\n if self.__bam_filenames is None:\n self.__bam_filenames = [\n os.path.join(\n self.__work_dir,\n \"wecall_input_\" +\n sample_name +\n \".bam\") for sample_name in sample_names]\n\n for sample_name, filename in zip(sample_names, self.__bam_filenames):\n bam_file_builder = BAMBuilder(filename)\n for sample_bank in self.__sample_banks:\n sequence_bank = sample_bank.get(sample_name, None)\n if sequence_bank is not None:\n bam_file_builder.with_bam_contig_data(\n sample_bank.reference.chrom,\n sample_bank.reference.length_minus_deletions(),\n sample_name,\n sequence_bank\n )\n bam_file_builder.build()\n bam_files.append(bam_file_builder.filename)\n return bam_files\n\n def build(self):\n fasta_filename = self.__build_ref()\n bam_filenames = self.__build_bams()\n return WecallInputData(bam_filenames, fasta_filename)\n", "id": "3380382", "language": "Python", "matching_score": 2.736769914627075, "max_stars_count": 8, "path": "python/wecall/wecall_utils/wecall_input_data_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport pysam\n\nRG_ID = \"Test\"\n\n\nclass BAMBuilder(object):\n\n def __init__(self, filename, with_read_group=True):\n self.__bam_data = None\n self.__filename = filename\n self.__with_read_group = with_read_group\n\n def with_bam_contig_data(\n self,\n chrom,\n chrom_length,\n sample_name,\n sequence_bank):\n if self.__bam_data is None:\n self.__bam_data = []\n self.__bam_data.append(\n BAMContigData(\n chrom,\n chrom_length,\n sample_name,\n sequence_bank))\n return self\n\n @property\n def filename(self):\n return self.__filename\n\n @property\n def header(self):\n sample_names = {\n contig_data.sample_name for contig_data in self.__bam_data}\n bam_header = {'HD': {'VN': '1.0'}, 'SQ': [\n {'LN': contig.chrom_length, 'SN': contig.chrom} for contig in self.__bam_data], }\n if self.__with_read_group:\n bam_header['RG'] = [{\"ID\": RG_ID + \"_\" + sample_name,\n \"SM\": sample_name} for sample_name in sorted(sample_names)]\n return bam_header\n\n def build(self):\n bam_fp = pysam.Samfile(self.__filename, \"wb\", header=self.header)\n\n for contig_data in self.__bam_data:\n chrom_id = bam_fp.gettid(contig_data.chrom)\n read_tags = []\n if self.__with_read_group:\n read_tags.append((\"RG\", RG_ID + \"_\" + contig_data.sample_name))\n\n # Sort sequences before writing to bam file. Indexing will\n # otherwise fail.\n reads = sorted(\n contig_data.sequence_bank.build_reads(chrom_id, read_tags),\n key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq)\n )\n\n for read in reads:\n bam_fp.write(read)\n\n bam_fp.close()\n pysam.index(self.filename)\n\n\nclass BAMContigData(object):\n\n def __init__(self, chrom, chrom_length, sample_name, sequence_bank):\n self.chrom = chrom\n self.chrom_length = chrom_length\n self.sample_name = sample_name\n self.sequence_bank = sequence_bank\n", "id": "5297317", "language": "Python", "matching_score": 2.2814667224884033, "max_stars_count": 8, "path": "python/wecall/bamutils/bam_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom pysam import AlignedRead\n\nDUPLICATE = 1024\nHIGH_QUALITY = 80\nFORWARD_GOOD_READ = 1 + 2 + 32 + 64\nREVERSE_GOOD_READ = 1 + 2 + 16 + 128\n\n\nclass ReadSequence(object):\n\n def __init__(\n self,\n sequence,\n quality,\n mapping_quality,\n insert_size=None,\n read_id=None,\n read_flags=None,\n read_start=None,\n read_mate_start=None\n ):\n self.chrom_id = None\n self.sequence = sequence\n self.quality = quality\n self.mapping_quality = mapping_quality\n self.insert_size = insert_size if insert_size is not None else int(\n 2 * self.sequence.length_minus_deletions())\n self.read_flags = read_flags\n self.read_id = read_id if read_id is not None else \"no_id\"\n self.read_start = read_start if read_start is not None else self.sequence.pos_from\n\n self.read_mate_start = read_mate_start\n if self.read_mate_start is None:\n read_mate_end = self.read_start + self.insert_size\n self.read_mate_start = read_mate_end - self.sequence.length_minus_deletions()\n\n @property\n def variants(self):\n return self.sequence.variants\n\n def build_read(self, read_tags, is_forward):\n read = AlignedRead()\n\n read.seq = self.sequence.sequence_minus_deletions()\n read.rname = self.chrom_id\n read.pos = self.read_start\n read.mapq = self.mapping_quality\n read.cigarstring = self.sequence.cigar\n read.rnext = self.chrom_id\n read.pnext = self.read_mate_start\n read.tlen = self.insert_size\n read.qual = self.quality.ascii_quality\n read.tags = read_tags\n\n read.qname = self.read_id\n\n if self.read_flags is None:\n read.flag = FORWARD_GOOD_READ if is_forward else REVERSE_GOOD_READ\n else:\n read.flag = self.read_flags\n\n return read\n\n\nclass ReadSequenceWithCoverage(object):\n\n def __init__(self, read_sequence, n_fwd, n_rev):\n self.read_sequence = read_sequence\n self.n_fwd = n_fwd\n self.n_rev = n_rev\n\n def build_reads(self, chrom_id, read_tags):\n self.read_sequence.chrom_id = chrom_id\n for fwd_index in range(self.n_fwd):\n yield self.read_sequence.build_read(read_tags, True)\n for rev_index in range(self.n_rev):\n yield self.read_sequence.build_read(read_tags, False)\n\n\nclass ReadPairWithCoverage(object):\n\n def __init__(self, fwd, rev, n_fwd, n_rev):\n self.__fwd = fwd\n self.__rev = rev\n if n_fwd is not None and n_rev is not None:\n self.n_fwd = n_fwd\n self.n_rev = n_rev\n else:\n self.n_fwd = 1\n self.n_rev = 0\n\n def build_reads(self, chrom_id, read_tags):\n self.__fwd.chrom_id = chrom_id\n self.__rev.chrom_id = chrom_id\n for fwd_index in range(self.n_fwd):\n first = self.__fwd.build_read(read_tags, True)\n second = self.__rev.build_read(read_tags, True)\n first.mpos = second.pos\n second.mpos = first.pos\n yield first\n yield second\n for rev_index in range(self.n_rev):\n first = self.__fwd.build_read(read_tags, False)\n second = self.__rev.build_read(read_tags, False)\n first.mpos = second.pos\n second.mpos = first.pos\n yield first\n yield second\n", "id": "3732160", "language": "Python", "matching_score": 4.01447057723999, "max_stars_count": 8, "path": "python/wecall/bamutils/read_sequence.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.read_sequence import HIGH_QUALITY\nfrom wecall.bamutils.sequence_builder import sequence_builder\n\n\nclass SequenceBank(object):\n \"\"\"\n A container to hold annotated DNA sequences in relation to a reference sequence.\n \"\"\"\n\n def __init__(self, reference):\n self.reference = reference\n self._read_sequences_with_coverage = []\n\n def __getitem__(self, item):\n return self._read_sequences_with_coverage[item]\n\n def __len__(self):\n return len(self._read_sequences_with_coverage)\n\n @property\n def chrom(self):\n return self.reference.chrom\n\n @property\n def variants(self):\n variants = set()\n for sequence in self._read_sequences_with_coverage:\n variants.update(sequence.read_sequence.variants)\n return variants\n\n def add_sequence(\n self,\n seq_string,\n quality_string=None,\n n_fwd=None,\n n_rev=None,\n mapping_quality=HIGH_QUALITY,\n insert_size=None,\n read_id=None,\n read_flags=None,\n cigar_string=None,\n read_start=None,\n read_mate_start=None\n ):\n self._read_sequences_with_coverage.extend(\n sequence_builder(\n self.reference,\n seq_string,\n quality_string,\n n_fwd,\n n_rev,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n cigar_string,\n read_start,\n read_mate_start\n )\n )\n return self\n\n def build_reads(self, chrom_id, read_tags):\n for read_seq_with_coverage in self._read_sequences_with_coverage:\n for read in read_seq_with_coverage.build_reads(\n chrom_id, read_tags):\n yield read\n\n\nclass AsciiVariantGenerator(object):\n\n def __init__(self, reference):\n self.reference = reference\n\n def get_variants(self, ascii_haplotypes):\n seq_bank = SequenceBank(self.reference)\n for candidate_ascii_haplotype in ascii_haplotypes:\n seq_bank.add_sequence(candidate_ascii_haplotype)\n return seq_bank.variants\n", "id": "5203084", "language": "Python", "matching_score": 4.029596328735352, "max_stars_count": 8, "path": "python/wecall/bamutils/sequence_bank.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.read_sequence import ReadSequence, ReadSequenceWithCoverage, HIGH_QUALITY, ReadPairWithCoverage\nfrom wecall.bamutils.raw_string_sequence import RawStringSequences\nfrom wecall.bamutils.sequence import Sequence\nfrom wecall.bamutils.sequence_position import SequencePosition\nfrom wecall.bamutils.sequence_quality import SequenceQuality\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\n\n\ndef sequence_builder(\n reference,\n seq_string,\n quality_string=None,\n n_fwd=None,\n n_rev=None,\n mapping_quality=HIGH_QUALITY,\n insert_size=None,\n read_id=None,\n read_flags=None,\n cigar_string=None,\n read_start=None,\n read_mate_start=None,\n):\n quality_string = \" \" * \\\n len(seq_string) if quality_string is None else quality_string\n if not all(\n i is None for i in [\n n_fwd,\n n_rev]) and any(\n i is None for i in [\n n_fwd,\n n_rev]):\n raise weCallException(\n \"Invalid combination of forward and reverse reads: n_fwd = {}, n_rev = {} \".format(\n n_fwd, n_rev))\n\n if len(seq_string) != reference.length_with_deletions():\n raise weCallException(\n \"Sequence has to be of the same length as reference. seq_length {}, ref_length {}\".format(\n len(seq_string), reference.length_with_deletions()))\n\n if len(quality_string) != reference.length_with_deletions():\n raise weCallException(\n \"Quality string has to be of the same length as reference.\")\n\n ref_pos = reference.pos_from\n current_raw_seq = RawStringSequences()\n sequences = []\n for ref_char, seq_char, qual_char in zip(\n reference.ref_seq, seq_string, quality_string):\n seq_position = SequencePosition(ref_char, seq_char, qual_char)\n\n if seq_position.is_gap and current_raw_seq.is_ongoing:\n sequences.append(current_raw_seq)\n current_raw_seq = RawStringSequences()\n elif not seq_position.is_gap:\n current_raw_seq.add_position(seq_position, ref_pos)\n\n ref_pos = seq_position.update_ref_pos(ref_pos)\n\n if current_raw_seq.is_ongoing:\n sequences.append(current_raw_seq)\n\n annotated_seqs = []\n if (\n len(sequences) %\n 2 == 0 and all(\n (sequences[index].is_forward_seq() for index in range(\n 0,\n len(sequences),\n 2))) and all(\n (sequences[index].is_reverse_seq() for index in range(\n 1,\n len(sequences),\n 2)))):\n # sequence of read pairs\n pairs = list(zip(\n (sequences[index] for index in range(0, len(sequences), 2)),\n (sequences[index] for index in range(1, len(sequences), 2))\n ))\n for fwd, rev in pairs:\n annotated_seqs.extend(\n build_annotated_pair(\n fwd,\n rev,\n n_fwd,\n n_rev,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n cigar_string,\n read_start,\n read_mate_start))\n else:\n # unpaired reads\n for seq in sequences:\n annotated_seqs.extend(\n seq.build_annotated_seq(\n n_fwd,\n n_rev,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n cigar_string,\n read_start,\n read_mate_start))\n return annotated_seqs\n\n\ndef build_annotated_pair(\n fwd,\n rev,\n n_fwd,\n n_rev,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n cigar_string,\n read_start,\n read_mate_start):\n fwd_reference = ReferenceChromosome(fwd.reference_string, fwd.pos_from)\n rev_reference = ReferenceChromosome(rev.reference_string, rev.pos_from)\n fwd_sequence = Sequence(\n fwd_reference, fwd.sequence_string.replace(\n \",\", \".\").upper(), cigar_string)\n rev_sequence = Sequence(\n rev_reference, rev.sequence_string.replace(\n \",\", \".\").upper(), cigar_string)\n fwd_quality = SequenceQuality(fwd.quality_string)\n rev_quality = SequenceQuality(rev.quality_string)\n\n fwd_read_sequence = ReadSequence(\n fwd_sequence,\n fwd_quality,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n read_start,\n read_mate_start)\n rev_read_sequence = ReadSequence(\n rev_sequence,\n rev_quality,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n read_start,\n read_mate_start)\n return [\n ReadPairWithCoverage(\n fwd_read_sequence,\n rev_read_sequence,\n n_fwd,\n n_rev)]\n", "id": "3620523", "language": "Python", "matching_score": 6.512628078460693, "max_stars_count": 8, "path": "python/wecall/bamutils/sequence_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.read_sequence import ReadSequence, ReadSequenceWithCoverage\nfrom wecall.bamutils.sequence import Sequence\nfrom wecall.bamutils.sequence_position import DELETED_BASE\nfrom wecall.bamutils.sequence_quality import SequenceQuality\nfrom wecall.common.exceptions import weCallException\nimport re\n\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\n\n\nclass RawStringSequences(object):\n\n def __init__(self):\n self.reference_string = \"\"\n self.__sequence_string = \"\"\n self.quality_string = \"\"\n self.pos_from = None\n self.is_ongoing = False\n\n @property\n def sequence_string(self):\n if not self.is_forward_seq() and not self.is_reverse_seq():\n raise weCallException(\n \"Illegal character in sequence {!r}\".format(\n self.__sequence_string))\n\n return self.__sequence_string\n\n def add_position(self, sequence_position, ref_pos):\n self.reference_string += sequence_position.ref_char\n self.__sequence_string += sequence_position.seq_char\n\n if sequence_position.seq_char != DELETED_BASE:\n self.quality_string += sequence_position.qual_char\n\n self.is_ongoing = True\n\n if self.pos_from is None:\n self.pos_from = ref_pos\n\n def is_forward_seq(self):\n return re.match(r'^[ACGTURYKMSWBDHVN\\*\\.]*\\Z', self.__sequence_string)\n\n def is_reverse_seq(self):\n return re.match(r'^[acgturykmswbdhvn\\*\\,]*\\Z', self.__sequence_string)\n\n def build_annotated_seq(\n self, n_fwd, n_rev, mapping_quality, insert_size,\n read_id, read_flags, cigar_string, read_start, read_mate_start\n ):\n reference = ReferenceChromosome(self.reference_string, self.pos_from)\n sequence = Sequence(\n reference, self.sequence_string.replace(\n \",\", \".\").upper(), cigar_string)\n quality = SequenceQuality(self.quality_string)\n\n read_sequence = ReadSequence(\n sequence,\n quality,\n mapping_quality,\n insert_size,\n read_id,\n read_flags,\n read_start,\n read_mate_start)\n if n_fwd is not None:\n return [ReadSequenceWithCoverage(read_sequence, n_fwd, n_rev)]\n elif self.is_reverse_seq():\n return [ReadSequenceWithCoverage(read_sequence, 0, 1)]\n elif self.is_forward_seq():\n return [ReadSequenceWithCoverage(read_sequence, 1, 0)]\n else:\n raise weCallException(\n \"Raw sequence: {} is neither forward or reverse\".format(self))\n", "id": "3798445", "language": "Python", "matching_score": 2.4995648860931396, "max_stars_count": 8, "path": "python/wecall/bamutils/raw_string_sequence.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport re\nfrom wecall.bamutils.cigar import Cigar\nfrom wecall.bamutils.sequence_position import DELETED_BASE, MATCHING_BASE\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.variant import Variant, TYPE_DEL, TYPE_INS, TYPE_SNP, TYPE_TO_STR, TYPE_REF\n\n\nclass Sequence(object):\n \"\"\"\n Object to represent a sequence of DNA.\n \"\"\"\n\n def __init__(self, reference, seq_string, cigar_string=None):\n self.__validate_input(reference, seq_string)\n\n self._reference = reference\n self._seq = seq_string\n self.pos_from = self._reference.pos_from\n self.pos_to = self._reference.pos_to\n\n self.variants = self._get_variants()\n self.cigar = cigar_string or str(self._get_cigar())\n\n @property\n def cigar_string(self):\n return str(self.cigar)\n\n @property\n def raw_sequence(self):\n return self._seq\n\n def length_minus_deletions(self):\n return len(self.sequence_minus_deletions())\n\n def length_with_deletions(self):\n return len(self._seq)\n\n def sequence_with_deletions(self):\n seq = \"\"\n for ref_char, alt_char in zip(self._reference.ref_seq, self._seq):\n if alt_char == MATCHING_BASE:\n seq += ref_char\n else:\n seq += alt_char\n return seq\n\n def sequence_minus_deletions(self):\n return self.sequence_with_deletions().replace(DELETED_BASE, \"\")\n\n def _get_variants(self):\n variants = set()\n\n ref_index = self.pos_from - 1\n current_variant = None\n\n for ref_char, alt_char in zip(self._reference.ref_seq, self._seq):\n if ref_char != DELETED_BASE:\n ref_index += 1\n\n if ref_char == DELETED_BASE and alt_char == MATCHING_BASE:\n raise weCallException(\n \"Invalid sequence at ref position {}\".format(ref_index))\n elif ref_char == DELETED_BASE and alt_char == DELETED_BASE:\n continue\n elif alt_char == MATCHING_BASE:\n current_variant = self.__add_variant_to_set(\n current_variant, None, variants)\n continue\n\n if ref_char == DELETED_BASE:\n # insertion\n var_pos = ref_index\n var_ref = self._reference[var_pos]\n var_alt = var_ref + alt_char\n elif alt_char == DELETED_BASE:\n # deletion\n var_pos = ref_index - 1\n var_ref = self._reference[var_pos] + ref_char\n var_alt = self._reference[var_pos]\n else:\n # SNP\n var_pos = ref_index\n var_ref = ref_char\n var_alt = alt_char\n\n new_variant = Variant(\n self._reference.chrom, var_pos, var_ref, var_alt)\n current_variant = self.__add_variant_to_set(\n current_variant, new_variant, variants)\n\n self.__add_variant_to_set(current_variant, None, variants)\n variants = self.__remove_deletions_from_edges(variants)\n return variants\n\n @staticmethod\n def __add_variant_to_set(current_variant, new_variant, var_set):\n var_1, current_variant = Sequence.__potentially_merge_adjacent_variants(\n current_variant, new_variant)\n if var_1 is not None and var_1.type != TYPE_REF:\n var_set.add(var_1)\n return current_variant\n\n def __remove_deletions_from_edges(self, variants):\n filtered_variants = set()\n for var in variants:\n if var.type == TYPE_DEL and (\n var.pos_from == -1 or var.pos_to == self.pos_to):\n continue\n else:\n filtered_variants.add(var)\n\n return filtered_variants\n\n @staticmethod\n def __potentially_merge_adjacent_variants(var_1, var_2):\n if var_1 is None or var_2 is None or var_1.type != var_2.type:\n return var_1, var_2\n else:\n if var_1.type == TYPE_SNP or var_1.type == TYPE_REF:\n return var_1, var_2\n elif var_1.type == TYPE_DEL:\n merged_variant = Variant(\n var_1.chrom, var_1.pos_from, var_1.ref + var_2.ref[-1], var_1.alt)\n return None, merged_variant\n elif var_1.type == TYPE_INS:\n merged_variant = Variant(\n var_1.chrom, var_1.pos_from, var_1.ref, var_1.alt + var_2.alt[-1])\n return None, merged_variant\n else:\n raise weCallException(\n \"Unexpected variant type: \" + TYPE_TO_STR[var_1.type])\n\n def _get_cigar(self):\n cigar = Cigar()\n for ref_char, alt_char in zip(self._reference.ref_seq, self._seq):\n if alt_char == MATCHING_BASE:\n cigar += Cigar([(Cigar.MATCH, 1)])\n elif alt_char != DELETED_BASE and ref_char != DELETED_BASE:\n # SNP\n cigar += Cigar([(Cigar.MATCH, 1)])\n elif alt_char == DELETED_BASE and ref_char != DELETED_BASE:\n cigar += Cigar([(Cigar.DELETION, 1)])\n elif alt_char != DELETED_BASE and ref_char == DELETED_BASE:\n cigar += Cigar([(Cigar.INSERTION, 1)])\n return cigar\n\n @staticmethod\n def __validate_input(ref, seq):\n if len(seq) != ref.length_with_deletions():\n raise weCallException(\n \"Sequence has to be of the same length as reference.\")\n if not re.match(r'^[ACGTURYKMSWBDHVN\\*\\.]*\\Z', seq):\n raise weCallException(\n \"Illegal character in sequence {!r}\".format(seq))\n", "id": "9361513", "language": "Python", "matching_score": 3.0177159309387207, "max_stars_count": 8, "path": "python/wecall/bamutils/sequence.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.utils.interval import ChromInterval\nimport re\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.chromosome import standardise_chromosome\n\n\nDEFAULT_CHROM = \"1\"\n\n\nclass ReferenceChromosome(object):\n\n def __init__(self, ref_string, pos_from=0, chrom=DEFAULT_CHROM):\n self.__validate_ref_seq(ref_string)\n\n self.chrom = chrom\n self.pos_from = pos_from\n self.ref_seq = ref_string\n self._ref_minus_deletions = self.ref_seq.replace(\"*\", \"\")\n\n def __str__(self):\n return self._ref_minus_deletions\n\n def length_with_deletions(self):\n return len(self.ref_seq)\n\n def length_minus_deletions(self):\n return len(self._ref_minus_deletions)\n\n def __getitem__(self, item):\n return self._ref_minus_deletions[item - self.pos_from]\n\n @property\n def chrom_interval(self):\n return ChromInterval(self.chrom, self.pos_from, self.pos_to)\n\n @property\n def pos_to(self):\n return self.pos_from + len(self._ref_minus_deletions)\n\n def fasta_string(self):\n return self.pos_from * 'N' + self._ref_minus_deletions\n\n def __validate_ref_seq(self, ref_seq):\n if not re.match(r'^[ACGTURYKMSWBDHVN\\*]*\\Z', ref_seq):\n raise weCallException(\n \"Illegal character in reference sequence {!r}\".format(ref_seq))\n", "id": "8853603", "language": "Python", "matching_score": 1.4350645542144775, "max_stars_count": 8, "path": "python/wecall/genomics/reference_chromosome.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom abc import ABCMeta, abstractmethod\nfrom wecall.genomics.chromosome import CHROMOSOME_ORDER\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\n\n\nclass AbstractReferenceGenome(object, metaclass=ABCMeta):\n\n @abstractmethod\n def chromosomes(self):\n return []\n\n @abstractmethod\n def fetch(self, chrom, start, end):\n return ''\n\n @abstractmethod\n def get_chrom_length(self, chrom):\n return 0\n\n\nclass InMemoryReferenceGenome(AbstractReferenceGenome):\n\n def __init__(self):\n self.__data = {}\n\n def with_chrom(self, name, sequence, pos_from=0):\n reference_chrom = self.__data[name] = ReferenceChromosome(\n sequence, pos_from, name)\n return reference_chrom\n\n def chromosomes(self):\n return sorted(list(self.__data.keys()),\n key=lambda x: CHROMOSOME_ORDER.get(x) or -1)\n\n def get_chrom_length(self, chrom):\n return self.__data[chrom].pos_to\n\n def fetch(self, chrom, start=None, end=None):\n if start is None:\n start = 0\n if end is None:\n end = self.get_chrom_length(chrom)\n\n seq = self.__data[chrom].fasta_string()[start:end]\n if end - start != len(seq):\n raise IndexError\n return seq\n", "id": "12054793", "language": "Python", "matching_score": 3.031806230545044, "max_stars_count": 8, "path": "python/wecall/genomics/reference_genome.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.genomics.reference_genome import InMemoryReferenceGenome\n\n\nclass TestInMemoryReferenceGenome(TestCase):\n\n def test_should_not_get_duplicate_or_extra_chromosomes(self):\n reference_genome = InMemoryReferenceGenome()\n reference_genome.with_chrom(\"1\", \"\", 0)\n reference_genome.with_chrom(\"1\", \"\", 0)\n reference_genome.with_chrom(\"2\", \"\", 0)\n\n self.assertEqual({\"1\", \"2\"}, set(reference_genome.chromosomes()))\n\n def test_should_get_chromosomes_in_correct_order(self):\n reference_genome = InMemoryReferenceGenome()\n reference_genome.with_chrom(\"1\", \"\", 0)\n reference_genome.with_chrom(\"3\", \"\", 0)\n reference_genome.with_chrom(\"2\", \"\", 0)\n\n self.assertEqual(['1', '2', '3'], reference_genome.chromosomes())\n\n def test_should_get_total_chromosome_length(self):\n reference_genome = InMemoryReferenceGenome()\n reference_genome.with_chrom(\"1\", \"ATG\", 11)\n\n self.assertEqual(14, reference_genome.get_chrom_length(\"1\"))\n\n def test_should_fetch_correct_sequence_with_padding(self):\n reference_genome = InMemoryReferenceGenome()\n reference_genome.with_chrom(\"1\", \"ATG\", 11)\n\n self.assertEqual(\"NA\", reference_genome.fetch(\"1\", 10, 12))\n\n def test_should_get_index_error_when_out_of_range(self):\n reference_genome = InMemoryReferenceGenome()\n reference_genome.with_chrom(\"1\", \"ATG\", 11)\n\n with self.assertRaises(IndexError):\n print((reference_genome.fetch(\"1\", 100, 120)))\n", "id": "4558598", "language": "Python", "matching_score": 1.0316543579101562, "max_stars_count": 8, "path": "test/test_utils/genomics/test_reference_genome.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.common.exceptions import weCallException\n\n# Define a standard ordering for chromosomes. N.B. We always use the naming convention \"1\",\n# \"2\", \"3\" etc for internal use, not \"chr1\", \"chr2\", \"chr3\"\n\nCHROMOSOME_LIST = [\n '1',\n '2',\n '3',\n '4',\n '5',\n '6',\n '7',\n '8',\n '9',\n '10',\n '11',\n '12',\n '13',\n '14',\n '15',\n '16',\n '17',\n '18',\n '19',\n '20',\n '21',\n '22',\n 'X',\n 'Y',\n 'MT']\nCHROMOSOME_ORDER = {\n chrom: index for index,\n chrom in enumerate(CHROMOSOME_LIST)}\n\n\ndef chromosome_comp(lhs, rhs):\n p_left = CHROMOSOME_ORDER.get(lhs, len(CHROMOSOME_ORDER))\n p_right = CHROMOSOME_ORDER.get(rhs, len(CHROMOSOME_ORDER))\n if p_left != p_right:\n return p_left < p_right\n else:\n return lhs < rhs\n\n\ndef standardise_chromosome(chrom):\n stripped_chrom = chrom.upper().replace(\"CHR\", \"\").lstrip('0')\n if stripped_chrom == \"M\":\n return \"MT\"\n else:\n return stripped_chrom\n\n\ndef add_chr(chrom):\n return \"chr{}\".format(chrom)\n\n\ndef get_chromosome_index(chrom):\n try:\n return CHROMOSOME_ORDER[standardise_chromosome(chrom)]\n except KeyError:\n raise weCallException(\"Invalid chromosome {}\".format(chrom))\n", "id": "11399080", "language": "Python", "matching_score": 0.7283416390419006, "max_stars_count": 8, "path": "python/wecall/genomics/chromosome.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom collections import OrderedDict\n\n\nclass WecallConfigFileRunnerTest(AsciiWecallRunnerTest):\n \"\"\"\n Run a set of tests specified in a configuration file\n \"\"\"\n\n def parse_file_into_list(self, input_file):\n config_data = []\n current_key = None\n\n for line in input_file:\n\n if line.strip().startswith(\"#\"):\n continue\n\n line_no_newline = line.rstrip(\"\\n\")\n line_no_whitespace = line.strip()\n\n if not line_no_whitespace:\n continue\n\n if line_no_whitespace.endswith(\":\"):\n current_key = line_no_whitespace[0:-1]\n continue\n\n elif \":\" in line:\n key, value = [x.strip() for x in line_no_whitespace.split(\":\")]\n config_data.append([key, [value]])\n current_key = None\n continue\n\n else:\n if len(config_data) == 0 or current_key is not None:\n if current_key != config_data[-1][0]:\n config_data.append([current_key, [line_no_newline]])\n else:\n config_data[-1][1].append(line_no_newline)\n else:\n raise Exception(\"Invalid config file\")\n\n return config_data\n\n def split_config_data(self, data):\n tests = OrderedDict()\n current_key = None\n\n for key, values in data:\n if key == \"TestName\":\n current_key = values[0]\n test_type = current_key.split(\"_\")[0]\n tests[values[0]] = OrderedDict()\n tests[values[0]][\"TestType\"] = test_type\n else:\n if current_key is not None:\n tests[current_key][key] = values\n else:\n raise Exception(\"Invalid config file\")\n return tests\n\n def check_variant_calling(self, test_name, test_data):\n samples = test_data[\"Samples\"][0].split()\n reference_sequences = set()\n sample_bam_data = {}\n sample_variant_calls = {}\n\n for sample in samples:\n this_sample_bam_data = test_data[\"{}_Sequence\".format(sample)]\n reference_sequence = this_sample_bam_data[0]\n reference_sequences.add(reference_sequence)\n reads = this_sample_bam_data[1:]\n sample_bam_data[sample] = reads\n expected_variants = test_data[\"{}_ExpectedVariants\".format(sample)]\n\n for variant in expected_variants:\n chrom, pos, ref, alt, genotype = variant.split()\n this_variant = (int(pos), ref, alt)\n\n if this_variant not in sample_variant_calls:\n sample_variant_calls[this_variant] = {}\n\n sample_variant_calls[this_variant][sample] = genotype\n\n assert len(reference_sequences) == 1\n the_reference = reference_sequences.pop()\n\n self.calls_variants_from_samples(\n the_reference,\n sample_bam_data,\n expected_call_stubs=sample_variant_calls\n )\n\n def check_recalibration(self, test_name, test_data):\n pass\n\n def run_from_config_file(self, fileName):\n \"\"\"\n Load config file and run all specified tests\n \"\"\"\n with open(fileName, 'r') as input_file:\n data = self.parse_file_into_list(input_file)\n test_data = self.split_config_data(data)\n\n for test in test_data:\n this_test_data = test_data[test]\n test_type = test.split(\"_\")[0]\n\n if test_type == \"VariantCalling\":\n self.check_variant_calling(test, this_test_data)\n elif test_type == \"Recalibration\":\n self.check_recalibration(test, this_test_data)\n else:\n raise Exception(\"Unknown test type {}\".format(test_type))\n", "id": "5702320", "language": "Python", "matching_score": 2.9368276596069336, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/wecall_config_file_test_runnner.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nfrom wecall_test_drivers.wecall_config_file_test_runnner import WecallConfigFileRunnerTest\n\n\nclass TestsFromConfigFiles(WecallConfigFileRunnerTest):\n def test_basic_config(self):\n test_dir = os.path.dirname(__file__)\n fileName = os.path.join(test_dir, \"config\", \"basic_calling_test.config\")\n self.run_from_config_file(fileName)\n", "id": "10481001", "language": "Python", "matching_score": 1.5918172597885132, "max_stars_count": 8, "path": "test/wecall_acceptance/wecall_runner/test_from_config_file.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nimport unittest\n\nimport pep8\n\n\nclass TestPep8(unittest.TestCase):\n \"\"\"Run PEP8 on all files in this directory and subdirectories.\"\"\"\n\n def setUp(self):\n base_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(\n os.path.abspath(__file__)))))\n self.base_dirs = [os.path.join(base_dir, sub_dir) for sub_dir\n in [\"python\", \"scripts\", \"test\", \"test_drivers\"]]\n\n def test_pep8(self):\n style = pep8.StyleGuide()\n style.options.max_line_length = 120 # because it isn't 1928 anymore\n errors = 0\n\n for base_dir in self.base_dirs:\n for root, _, files in os.walk(base_dir):\n python_files = [f for f in files if f.endswith('.py')]\n for pf in python_files:\n check = style.check_files([os.path.join(root, pf)])\n errors += check.file_errors\n self.assertEqual(errors, 0)\n", "id": "9120599", "language": "Python", "matching_score": 0.8102957606315613, "max_stars_count": 8, "path": "test/test_style/test_wecall_pep8.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom logging import FileHandler, StreamHandler, DEBUG, INFO, getLogger, Formatter\nimport shutil\nimport unittest\nimport os\nimport sys\n\n\nclass BaseTest(unittest.TestCase):\n\n def setUp(self):\n self.work_dir = os.path.join(\n os.environ[\"WECALL_TEST_RESULTS\"],\n *self.id().split(\".\"))\n if os.path.exists(self.work_dir):\n shutil.rmtree(self.work_dir)\n os.makedirs(self.work_dir)\n\n logger = getLogger()\n logger.setLevel(DEBUG)\n fh = FileHandler(os.path.join(self.work_dir, \"test.log\"))\n ch = StreamHandler(sys.stdout)\n logger.addHandler(configure_log_handler(fh, DEBUG))\n logger.addHandler(configure_log_handler(ch, INFO))\n\n def tearDown(self):\n logger = getLogger()\n for handler in logger.handlers[:]:\n handler.close()\n logger.removeHandler(handler)\n\n\ndef configure_log_handler(handler, level):\n handler.setLevel(level)\n formatter = Formatter('%(message)s')\n handler.setFormatter(formatter)\n return handler\n", "id": "12374810", "language": "Python", "matching_score": 1.0625898838043213, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/base_test.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport logging\nimport subprocess\nimport os\n\nclass ToolRunner(object):\n\n def __init__(self):\n self.return_code = None\n self.stdout = None\n self.stderr = None\n\n def log_output(self):\n log_output(\"stdout\", self.stdout)\n log_output(\"stderr\", self.stderr)\n logging.info('returncode: {}'.format(self.return_code))\n logging.info('')\n\n def run(self, command, cwd=None):\n # TODO: make this private\n proc = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd\n )\n\n self.stdout, self.stderr = proc.communicate()\n self.return_code = proc.returncode\n\n return self\n\n def start(self, command, cwd=None):\n log_command(command)\n self.run(command, cwd)\n self.log_output()\n return self\n\n\ndef subprocess_expectation_from_tool_runner(test_case, tool_runner):\n return SubprocessExpectation(\n test_case,\n tool_runner.stdout,\n tool_runner.stderr,\n tool_runner.return_code)\n\n\nclass SubprocessExpectation(object):\n def __init__(self, test_case, stdout, stderr, return_code):\n self.__test_case = test_case\n self.__stdout = stdout\n self.__stderr = stderr\n self.__return_code = return_code\n\n def failure(self):\n self.__test_case.assertNotEqual(0, self.__return_code)\n\n def success(self):\n self.__test_case.assertEqual(\n 0, self.__return_code, msg=\"0!={}\\nstderr:\\n{!s}\".format(\n self.__return_code, self.__stderr))\n\n\ndef log_command(command):\n logging.info(\"Running: `{}`\".format(\" \".join(command)))\n logging.info('')\n\n\ndef log_file(filename):\n if filename is None:\n return\n try:\n with open(filename) as fp:\n log_output(filename, fp.read())\n except IOError:\n logging.info('{!r} not found'.format(filename))\n\n\ndef log_output(title, output):\n logging.info(\"{}:\".format(title))\n for line in str(output).split('\\n'):\n logging.info('> ' + line)\n logging.info('')\n\n\ndef log_bam_file(reference_filename, bam_filename, chrom=None):\n command = [os.path.join(os.environ['WECALL_BIN'], \"samtools\"), 'tview', '-dT', bam_filename, reference_filename]\n if chrom is not None:\n print(\"Contig: {}\".format(chrom))\n command.extend([\"-p\", chrom])\n log_output(bam_filename, subprocess.check_output(command))\n\n command_2 = [os.path.join(os.environ['WECALL_BIN'], \"samtools\"), 'view', bam_filename]\n if chrom is not None:\n print(\"Contig: {}\".format(chrom))\n log_output(bam_filename, subprocess.check_output(command_2))\n", "id": "11283733", "language": "Python", "matching_score": 2.476294755935669, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/tool_runner.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.fastautils.fasta_file_builder import FastaFileBuilder\nfrom wecall_test_drivers.base_test import BaseTest\nimport os\nimport subprocess\nfrom tempfile import NamedTemporaryFile\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestCmdLineOptions(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n\n self.we_call = os.path.join(os.environ[\"WECALL_BIN\"], \"weCall\")\n self.bam_filename = os.path.join(self.work_dir, \"input.bam\")\n self.bam_index_filename = self.bam_filename + \".bai\"\n self.ref_filename = os.path.join(self.work_dir, \"refFile.fa\")\n self.ref_index_filename = self.ref_filename + \".fai\"\n self.output_filename = os.path.join(self.work_dir, \"output.vcf\")\n self.we_call_work_dir = os.path.join(self.work_dir, \"temp_dir\")\n self.log_filename = os.path.join(self.work_dir, \"_.log\")\n self.chrom = \"2\"\n self.chrom_string = \"A\" * 2\n\n def __build_default_fasta_file(self):\n fasta_file_builder = FastaFileBuilder(os.path.join(self.work_dir, \"haha.fa\"))\n fasta_file_builder.filename = self.ref_filename\n fasta_file_builder.with_chrom(self.chrom, self.chrom_string)\n fasta_file_builder.build().index()\n\n @property\n def default_cmd(self):\n return [\n self.we_call,\n \"--inputs\", self.bam_filename,\n \"--refFile\", self.ref_filename,\n \"--output\", self.output_filename,\n \"--verbosity\", \"0\",\n \"--logFilename\", self.log_filename,\n ]\n\n def test_should_not_predict_cmdline_option_for_input(self):\n self.__build_default_fasta_file()\n\n p = subprocess.Popen(\n [os.path.join(os.environ[\"WECALL_BIN\"], \"weCall\"), \"--inp\", \"input/path\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertEqual(stderr.decode(), \"FAILED - unrecognised option '--inp'\\n\")\n\n def test_fail_when_input_files_missing(self):\n self.__build_default_fasta_file()\n\n p = subprocess.Popen(\n self.default_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(\n stderr.decode(), \"FAILED - File {} does not exist\\n\".format(self.bam_filename))\n\n def test_should_fail_if_input_file_missing_index_file(self):\n with NamedTemporaryFile(prefix=\"_bam\", suffix=\".bam\", dir=self.work_dir) as fake_bam:\n self.bam_filename = fake_bam.name\n self.__build_default_fasta_file()\n\n p = subprocess.Popen(\n self.default_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(\n stderr.decode(), \"FAILED - Index file {}.bai does not exist\\n\".format(self.bam_filename))\n\n def test_should_fail_if_input_doesnt_have_bam_extension(self):\n with NamedTemporaryFile(prefix=\"_bam\", suffix=\".not_bam\", dir=self.work_dir) as fake_bam:\n self.bam_filename = fake_bam.name\n\n self.__build_default_fasta_file()\n\n p = subprocess.Popen(\n self.default_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(\n stderr.decode(), \"FAILED - File {} does not have .bam extension\\n\".format(self.bam_filename)\n )\n\n def test_should_fail_if_reference_file_is_missing(self):\n open(self.bam_filename, \"w\").close()\n open(self.bam_index_filename, \"w\").close()\n\n p = subprocess.Popen(\n self.default_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(stderr.decode(), \"FAILED - File {} does not exist\\n\".format(self.ref_filename))\n\n def test_should_fail_if_reference_index_file_is_missing(self):\n open(self.bam_filename, \"w\").close()\n open(self.bam_index_filename, \"w\").close()\n\n with NamedTemporaryFile(prefix=\"_fa\", suffix=\".fa\", dir=self.work_dir) as fake_fa:\n self.ref_filename = fake_fa.name\n p = subprocess.Popen(\n self.default_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(stderr.decode(), \"FAILED - Index file {}.fai does not exist\\n\".format(fake_fa.name))\n\n def test_should_fail_if_reference_doesnt_have_fa_extension(self):\n open(self.bam_filename, \"w\").close()\n open(self.bam_index_filename, \"w\").close()\n\n with NamedTemporaryFile(prefix=\"_fa\", suffix=\".not_fa\", dir=self.work_dir) as fake_fa:\n self.ref_filename = fake_fa.name\n p = subprocess.Popen(\n self.default_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(stderr.decode(), \"FAILED - File {} does not have .fa extension\\n\".format(fake_fa.name))\n\n\nclass TestCmdLineOptionsUsingDriver(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.output_filename = os.path.join(self.work_dir, \"output.vcf\")\n self.we_call_work_dir = os.path.join(self.work_dir, \"work_der\")\n\n def test_should_fail_if_work_dir_is_not_directory_when_run_in_parallel(self):\n with NamedTemporaryFile(prefix=\"not_a_directory\", dir=self.work_dir) as fake_dir:\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_number_of_jobs(2) \\\n .with_work_dir(fake_dir.name) \\\n .with_output_vcf_filename(self.output_filename)\n\n svc_driver\\\n .call(expected_success=False)\\\n .work_dir_not_a_directory_error(fake_dir.name)\n\n def test_should_fail_if_output_already_exists_serial(self):\n open(self.output_filename, \"w\").close()\n\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_overwrite(False) \\\n .with_output_vcf_filename(self.output_filename)\n\n assert(os.path.exists(self.output_filename))\n svc_driver.call(\n expected_success=False).output_exists_error(\n self.output_filename)\n\n def test_should_not_fail_due_to_existing_output_if_overwrite_specified(self):\n open(self.output_filename, \"w\").close()\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_overwrite(True) \\\n .with_output_vcf_filename(self.output_filename)\n svc_driver.call(expected_success=True)\n\n def test_should_not_fail_due_to_existing_output_if_no_option_specified(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\"A\").with_read(\".\")\n svc_driver.call(expected_success=True)\n\n def test_should_fail_if_output_already_exists_parallel(self):\n open(self.output_filename, \"w\").close()\n\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_overwrite(False) \\\n .with_number_of_jobs(2) \\\n .with_work_dir(self.we_call_work_dir) \\\n .with_output_vcf_filename(self.output_filename)\n\n assert(os.path.exists(self.output_filename))\n svc_driver\\\n .call(expected_success=False)\\\n .output_exists_error(self.output_filename)\n\n def test_should_not_fail_due_to_existing_output_if_overwrite_specified_in_parallel(self):\n open(self.output_filename, \"w\").close()\n\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_overwrite(True) \\\n .with_number_of_jobs(2) \\\n .with_work_dir(self.we_call_work_dir) \\\n .with_output_vcf_filename(self.output_filename)\n\n assert(os.path.exists(self.output_filename))\n svc_driver.call(expected_success=True)\n\n def test_should_not_fail_due_to_existing_output_if_no_option_specified_in_parallel(self):\n open(self.output_filename, \"w\").close()\n\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_number_of_jobs(2) \\\n .with_work_dir(self.we_call_work_dir) \\\n .with_output_vcf_filename(self.output_filename)\n\n assert(os.path.exists(self.output_filename))\n svc_driver.call(expected_success=True)\n\n def test_should_fail_if_mem_limit_is_negative(self):\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_mem_limit(-100) \\\n .with_output_vcf_filename(self.output_filename)\n\n svc_driver.call(False).with_mem_limit_range_error()\n\n def test_should_fail_if_mem_limit_is_just_below_acceptable_range(self):\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_mem_limit(49) \\\n .with_output_vcf_filename(self.output_filename)\n\n svc_driver.call(False).with_mem_limit_range_error()\n\n def test_should_fail_if_mem_limit_is_just_above_acceptable_range(self):\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_mem_limit(1024 * 1024 + 1) \\\n .with_output_vcf_filename(self.output_filename)\n\n svc_driver.call(False).with_mem_limit_range_error()\n\n def test_should_fail_if_invalid_output_file_format_provided(self):\n svc_driver = SVCDriver(self)\n svc_driver \\\n .with_ref_sequence(\"A\") \\\n .with_read(\".\") \\\n .with_output_format('bah4.1')\n\n svc_driver.call(False).with_incorrect_output_format_error()\n", "id": "7315140", "language": "Python", "matching_score": 3.473766803741455, "max_stars_count": 8, "path": "test/wecall_acceptance/wecall_runner/test_cmd_line_options.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.vcfutils.parser import VCFReaderContextManager\nfrom wecall_test_drivers.base_test import BaseTest\nfrom shutil import rmtree\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall_test_drivers.ascii_wecall_runner import DEFAULT_SAMPLE_NAME\nfrom wecall_test_drivers.tool_runner import ToolRunner\nfrom wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank\nfrom os import path, environ, mkdir\nfrom subprocess import Popen, PIPE\nimport tempfile\n\n\nclass TestWeCallReduceCmdLine(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.tool_location = path.join(environ[\"WECALL_BIN\"], \"weCall\")\n\n self.input_directory_location = path.join(self.work_dir, \"input_directory\")\n self.log_filename = path.join(self.work_dir, \"_.log\")\n self.final_vcf_location = path.join(self.work_dir, \"_.vcf\")\n self.assertFalse(path.exists(self.final_vcf_location))\n\n def test_should_fail_if_input_directory_doesnt_exist(self):\n self.assertFalse(path.exists(self.input_directory_location))\n\n tool_runner = ToolRunner().start([\n self.tool_location,\n \"reduce\",\n \"--inputDir={}\".format(self.input_directory_location),\n \"--output={}\".format(self.final_vcf_location),\n \"--verbosity={!s}\".format(0),\n \"--logFilename={!s}\".format(self.log_filename),\n ])\n\n self.assertEqual(tool_runner.return_code, 1)\n self.assertRegex(tool_runner.stderr.decode(), \"FAILED - Working dir: .* does not exist\")\n\n def test_should_fail_if_directory_contains_files_with_non_vcf_extension(self):\n if path.exists(self.input_directory_location):\n rmtree(self.input_directory_location)\n mkdir(self.input_directory_location)\n\n self.assertTrue(path.exists(self.input_directory_location))\n tool_runner = ToolRunner()\n with tempfile.NamedTemporaryFile(prefix=\"fake_vcf\", dir=self.input_directory_location):\n tool_runner.start([\n self.tool_location, \"reduce\",\n \"--inputDir={}\".format(self.input_directory_location),\n \"--output={}\".format(self.final_vcf_location),\n \"--logFilename={!s}\".format(self.log_filename),\n ])\n\n self.assertEqual(tool_runner.return_code, 1)\n self.assertRegex(tool_runner.stderr.decode(), \"FAILED - file .* is not a VCF\")\n\n def test_should_fail_if_directory_is_empty(self):\n if path.exists(self.input_directory_location):\n rmtree(self.input_directory_location)\n mkdir(self.input_directory_location)\n\n self.assertTrue(path.exists(self.input_directory_location))\n tool_runner = ToolRunner() \\\n .start([\n self.tool_location, \"reduce\",\n \"--inputDir={}\".format(self.input_directory_location),\n \"--output={}\".format(self.final_vcf_location),\n \"--logFilename={!s}\".format(self.log_filename),\n ])\n\n self.assertEqual(tool_runner.return_code, 1)\n self.assertRegex(tool_runner.stderr.decode(),\n \"FAILED - directory .*{} is empty\".format(self.input_directory_location))\n\n def test_should_fail_if_directory_contains_a_directory(self):\n if path.exists(self.input_directory_location):\n rmtree(self.input_directory_location)\n mkdir(self.input_directory_location)\n\n self.assertTrue(path.exists(self.input_directory_location))\n\n non_vcf_file = tempfile.mkdtemp(\n prefix=\"fake_file\",\n suffix=\".vcf\",\n dir=self.input_directory_location)\n\n tool_runner = ToolRunner().start([\n self.tool_location, \"reduce\",\n \"--inputDir={}\".format(self.input_directory_location),\n \"--output={}\".format(self.final_vcf_location),\n \"--logFilename={}\".format(self.log_filename),\n ])\n\n self.assertEqual(tool_runner.return_code, 1)\n self.assertRegex(\n tool_runner.stderr.decode(),\n \"FAILED - .*{} is not a file\".format(non_vcf_file))\n if path.exists(non_vcf_file):\n rmtree(non_vcf_file)\n\n def test_should_fail_if_directory_contains_files_with_empty_vcfs(self):\n if path.exists(self.input_directory_location):\n rmtree(self.input_directory_location)\n mkdir(self.input_directory_location)\n\n self.assertTrue(path.exists(self.input_directory_location))\n with tempfile.NamedTemporaryFile(prefix=\"fake_vcf\", suffix=\".vcf\", dir=self.input_directory_location):\n p = Popen(\n [\n self.tool_location, \"reduce\",\n \"--inputDir={}\".format(self.input_directory_location),\n \"--output={}\".format(self.final_vcf_location),\n \"--logFilename={!s}\".format(self.log_filename),\n ],\n stdout=PIPE,\n stderr=PIPE\n )\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertRegex(stderr.decode(), \"FAILED - file .* is not a valid VCF\")\n\n def test_should_fail_if_other_params_are_passed(self):\n if path.exists(self.input_directory_location):\n rmtree(self.input_directory_location)\n mkdir(self.input_directory_location)\n\n self.assertTrue(path.exists(self.input_directory_location))\n\n p = Popen(\n [\n self.tool_location, \"reduce\",\n \"--inputDir={}\".format(self.input_directory_location),\n \"--output={}\".format(self.final_vcf_location),\n \"--regions=1\",\n \"--logFilename={!s}\".format(self.log_filename),\n ],\n stdout=PIPE,\n stderr=PIPE\n )\n stdout, stderr = p.communicate()\n\n self.assertEqual(p.returncode, 1)\n self.assertEqual(\n stderr.decode(), \"FAILED - unrecognised option '--regions=1'\\n\")\n\n\nclass TestReduce(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.tool_location = path.join(environ[\"WECALL_BIN\"], \"weCall\")\n self.intermediate_vcfs_dir = path.join(self.work_dir, \"intermediate_vcfs\")\n self.final_vcf_location = path.join(self.work_dir, \"reduced.vcf\")\n self.log_filename = path.join(self.work_dir, \"_.log\")\n\n mkdir(self.intermediate_vcfs_dir)\n\n self.stdout, self.stderr = None, None\n\n def tearDown(self):\n if path.exists(self.intermediate_vcfs_dir):\n rmtree(self.intermediate_vcfs_dir)\n BaseTest.tearDown(self)\n\n def test_should_reduce_a_wecall_produced_vcf_to_a_valid_vcf(self):\n temp_vcf_filename = self.__run_wecall_variant_caller(\n \"1\",\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\"...................T......................\"],\n )\n\n with VCFReaderContextManager(temp_vcf_filename) as temp_vcf:\n reference_header = temp_vcf.header\n reference_records = list(temp_vcf.read_records())\n\n self.__run_wecall_reduce()\n\n with VCFReaderContextManager(self.final_vcf_location) as final_vcf:\n self.assertEqual(final_vcf.header, reference_header)\n final_records = list(final_vcf.read_records())\n\n self.assertEqual(len(final_records), 1)\n self.assertEqual(final_records, reference_records)\n\n def test_should_derive_use_lexigraphical_order_of_vcfs_for_reduce(self):\n temp_vcf_filename_b = self.__run_wecall_variant_caller(\n \"2\",\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\"............T........................C....\"], vcf_stem=\"ab\"\n )\n temp_vcf_filename_a = self.__run_wecall_variant_caller(\n \"1\",\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\"...................T......................\"], vcf_stem=\"aa\"\n )\n\n with VCFReaderContextManager(temp_vcf_filename_a) as temp_vcf_a:\n with VCFReaderContextManager(temp_vcf_filename_b) as temp_vcf_b:\n # aa is lexicographical less than ab\n reference_records = list(\n temp_vcf_a.read_records()) + list(temp_vcf_b.read_records())\n\n self.__run_wecall_reduce()\n\n with VCFReaderContextManager(self.final_vcf_location) as final_vcf:\n final_records = list(final_vcf.read_records())\n\n self.assertEqual(len(final_records), 3)\n self.assertEqual(final_records, reference_records)\n\n def test_should_obtain_correct_vcf_header_on_reduce(self):\n temp_vcf_filename_b = self.__run_wecall_variant_caller(\n \"2\",\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\"............T........................C....\"], vcf_stem=\"ab\"\n )\n temp_vcf_filename_a = self.__run_wecall_variant_caller(\n \"1\",\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\"...................T......................\"], vcf_stem=\"aa\"\n )\n\n with VCFReaderContextManager(temp_vcf_filename_a) as temp_vcf_a:\n with VCFReaderContextManager(temp_vcf_filename_b) as temp_vcf_b:\n temp_vcf_a.read_header()\n header_a = temp_vcf_a.header\n\n temp_vcf_b.read_header()\n header_b = temp_vcf_b.header\n\n self.__run_wecall_reduce()\n\n with VCFReaderContextManager(self.final_vcf_location) as final_vcf:\n final_vcf.read_header()\n\n expected_header = header_a\n\n expected_header.set_contig('2', header_b.get_contig('2').length)\n self.assertEqual(final_vcf.header, expected_header)\n\n def __run_wecall_variant_caller(self, chrom, reference_string, sequence_list, vcf_stem=None):\n if vcf_stem is None:\n vcf_stem = chrom\n sample_bank = SampleBank(reference_string, chrom=chrom)\n sample_bank.add_sample_with_seqs_and_quals(DEFAULT_SAMPLE_NAME, sequence_list, n_fwd=10, n_rev=10)\n vc_builder = VariantCallerBuilderFromSampleBank(sample_bank, self.work_dir)\n vc_wrapper = vc_builder.build()\n vc_wrapper.add_additional_command(\"allowMNPCalls\", False)\n vc_wrapper.output_vcf = path.join(self.intermediate_vcfs_dir, \"{}.vcf\".format(vcf_stem))\n vc_wrapper.run()\n return vc_wrapper.output_vcf\n\n def __run_wecall_reduce(self):\n cmd = [\n self.tool_location,\n \"reduce\",\n \"--inputDir={}\".format(self.intermediate_vcfs_dir),\n \"--output={}\".format(self.final_vcf_location),\n \"--logFilename={!s}\".format(self.log_filename),\n ]\n\n print(\"Running '{}'\".format(\" \".join(cmd)))\n\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n self.stdout, self.stderr = p.communicate()\n self.assertEqual(p.returncode, 0, \"stdout = {}\\nstderr={}\".format(self.stdout, self.stderr))\n self.assertTrue(path.exists(self.final_vcf_location))\n", "id": "5840146", "language": "Python", "matching_score": 5.105454444885254, "max_stars_count": 8, "path": "test/wecall_acceptance/wecall_runner/test_wecall_reduce.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.sequence_quality import SequenceQuality\nimport pysam\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall_test_drivers.ascii_wecall_runner import DEFAULT_SAMPLE_NAME\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank\nimport os\n\n\nclass AsciiQualityRecalibrationTest(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.sample_name = DEFAULT_SAMPLE_NAME\n self.output_stem = os.path.join(self.work_dir, \"tmp\")\n self.output_sam = \"{}_{}.sam\".format(\n self.output_stem, DEFAULT_SAMPLE_NAME)\n\n def assert_matching_ascii_qualities(self, first, second):\n non_ascii_first = SequenceQuality.parse_ascii_to_quality_string(first)\n non_ascii_second = SequenceQuality.parse_ascii_to_quality_string(\n second)\n self.assertEqual(non_ascii_first, non_ascii_second)\n\n def assert_quality_recalibrated_in_output_bam(\n self, ref_string, input_bam_seqs, output_bam_seqs):\n input_sample_bank = SampleBank(ref_string)\n input_sample_bank.add_sample_with_seqs_and_quals(\n self.sample_name, input_bam_seqs)\n\n output_sample_bank = SampleBank(ref_string)\n output_sample_bank.add_sample_with_seqs_and_quals(\n self.sample_name, output_bam_seqs)\n\n vc_builder = VariantCallerBuilderFromSampleBank(\n input_sample_bank, self.work_dir)\n vc_builder.configuration[\"recalibrateBaseQs\"] = \"true\"\n vc_builder.configuration[\"intermediateRecalibFileStem\"] = self.output_stem\n vc_builder.build().run()\n\n self.assertTrue(os.path.exists(self.output_sam))\n\n sam_file = pysam.Samfile(self.output_sam, \"r\")\n reads = list(sam_file.fetch())\n self.assertEqual(len(reads), len(output_sample_bank[self.sample_name]))\n\n # Sort the sam as in sequence bank.\n # output_sample_bank.sort_sequence_banks()\n output_reads = sorted(\n output_sample_bank[self.sample_name].build_reads(0, {}),\n key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq)\n )\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n\n for read, expected_sequence in zip(reads, output_reads):\n self.assertEqual(read.pos, expected_sequence.pos)\n self.assertEqual(read.seq, expected_sequence.seq)\n self.assert_matching_ascii_qualities(\n read.qual, expected_sequence.qual)\n self.assertEqual(read.cigarstring, expected_sequence.cigarstring)\n self.assertEqual(read.mapq, expected_sequence.mapq)\n\n sam_file.close()\n", "id": "7892085", "language": "Python", "matching_score": 2.6592812538146973, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/ascii_quality_recalibration_runner.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.common.exceptions import weCallException\nimport unittest\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.genomics.variant import Variant\n\n\nclass TestSampleBank(unittest.TestCase):\n def setUp(self):\n self.default_char = \"H\"\n\n def test_should_add_sequences_with_same_reference(self):\n sample_bank = SampleBank(\"AAATTTTGGGGG\")\n sample_bank.add_sample_name(\"SAMPLE1\")\n sample_bank.add_sample_name(\"SAMPLE2\")\n\n self.assertEqual(\n sample_bank[\"SAMPLE1\"].reference.ref_seq,\n sample_bank[\"SAMPLE2\"].reference.ref_seq)\n\n def test_should_return_all_variants(self):\n sample_bank = SampleBank(\"AAATTTTGGGAG\")\n sample_bank.add_sample_name(\"SAMPLE1\")\n sample_bank.add_sample_name(\"SAMPLE2\")\n\n sample_bank[\"SAMPLE1\"].add_sequence(\".....G......\")\n sample_bank[\"SAMPLE2\"].add_sequence(\"..........*.\")\n\n exp_variant1 = Variant(sample_bank.reference.chrom, 5, \"T\", \"G\")\n exp_variant2 = Variant(sample_bank.reference.chrom, 9, \"GA\", \"G\")\n self.assertEqual(sample_bank[\"SAMPLE1\"].variants, {exp_variant1})\n self.assertEqual(sample_bank[\"SAMPLE2\"].variants, {exp_variant2})\n self.assertEqual(sample_bank.variants, {exp_variant1, exp_variant2})\n\n def test_should_raise_for_invalid_ref_string(self):\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in reference sequence .*\",\n SampleBank,\n \"..ATTTTGGGAG\"\n )\n\n def test_should_raise_when_adding_existing_sample(self):\n sample_bank = SampleBank(\"AAA\")\n sample_name = \"SAMPLE1\"\n sample_bank.add_sample_name(sample_name)\n\n self.assertRaisesRegex(\n weCallException,\n \"Sample SAMPLE1 already exists in the SampleBank.\",\n sample_bank.add_sample_with_seqs_and_quals,\n sample_name,\n []\n )\n\n def test_should_add_sequence_with_quality(self):\n sample_bank = SampleBank(\"AAA\")\n sample_name = \"SAMPLE1\"\n sample_bank.add_sample_name(sample_name)\n sample_bank[sample_name].add_sequence(\"...\", quality_string=\"007\")\n read_lists = [builder.build_reads(0, {})\n for builder in sample_bank[\"SAMPLE1\"]]\n reads = [read for read_list in read_lists for read in read_list]\n\n # ascii: \"0\": \"!\", \"1\": \"+\", \"2\": \"5\", \"3\": \"?\", \"4\": \"I\", \"5\": \"S\",\n # \"6\": \"]\", \"7\": \"g\", \"8\": \"q\", \"9\": \"{\"\n self.assertEqual(reads[0].qual, \"!!g\")\n\n def test_should_add_short_sequence_and_quality_list(self):\n sample_bank = SampleBank(\"AAA\")\n sample_bank.add_sample_with_seqs_and_quals(\"SAMPLE1\", [\"...\", \"007\"])\n read_lists = [builder.build_reads(0, {})\n for builder in sample_bank[\"SAMPLE1\"]]\n reads = [read for read_list in read_lists for read in read_list]\n\n self.assertEqual(reads[0].qual, \"!!g\")\n\n def test_should_add_seq_and_quals_list_with_deletion(self):\n sample_bank = SampleBank(\"AAA\")\n sample_bank.add_sample_with_seqs_and_quals(\"SAMPLE1\", [\".*C\", \"1 3\"])\n read_lists = [builder.build_reads(0, {})\n for builder in sample_bank[\"SAMPLE1\"]]\n reads = [read for read_list in read_lists for read in read_list]\n\n self.assertEqual(reads[0].qual, \"+?\")\n\n def test_should_add_two_sequence_list(self):\n sample_bank = SampleBank(\"AAA\")\n sample_bank.add_sample_with_seqs_and_quals(\"SAMPLE1\", [\"...\", \" ..\"])\n read_lists = [builder.build_reads(0, {})\n for builder in sample_bank[\"SAMPLE1\"]]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n\n self.assertEqual(len(reads), 2)\n self.assertEqual(reads[0].qual, \"HHH\")\n self.assertEqual(reads[1].qual, \"HH\")\n\n def test_should_add_two_seqs_with_one_qual_string(self):\n sample_bank = SampleBank(\"AAA\")\n sample_bank.add_sample_with_seqs_and_quals(\n \"SAMPLE1\", [\"...\", \"007\", \" ..\"])\n read_lists = [builder.build_reads(0, {})\n for builder in sample_bank[\"SAMPLE1\"]]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n\n self.assertEqual(len(reads), 2)\n self.assertEqual(reads[0].seq, \"AAA\")\n self.assertEqual(reads[0].qual, \"!!g\")\n self.assertEqual(reads[1].seq, \"AA\")\n self.assertEqual(reads[1].qual, self.default_char * 2)\n\n def test_should_add_complex_seq_and_quals_list(self):\n sample_bank = SampleBank(\"AAA\")\n sample_bank.add_sample_with_seqs_and_quals(\n \"SAMPLE1\", [\"...\", \"007\", \" ..\", \".*C\", \"1 3\"])\n read_lists = [builder.build_reads(0, {})\n for builder in sample_bank[\"SAMPLE1\"]]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n\n self.assertEqual(len(reads), 3)\n self.assertEqual(reads[0].qual, \"!!g\")\n self.assertEqual(reads[1].qual, \"+?\")\n self.assertEqual(reads[2].qual, self.default_char * 2)\n\n def test_should_add_seq_and_quals_list_with_fwd_and_rev_reads(self):\n sample_bank = SampleBank(\"AAA\")\n sample_bank.add_sample_with_seqs_and_quals(\n \"SAMPLE1\", [\"...\", \"007\"], n_fwd=1, n_rev=2)\n\n self.assertEqual(len(sample_bank[\"SAMPLE1\"]), 1)\n self.assertEqual(sample_bank[\"SAMPLE1\"][0].n_fwd, 1)\n self.assertEqual(sample_bank[\"SAMPLE1\"][0].n_rev, 2)\n\n def test_should_raise_when_multiple_quality_strings_specified_per_sequence(self):\n sample_bank = SampleBank(\"AAA\")\n\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in sequence \\'008\\'\",\n sample_bank.add_sample_with_seqs_and_quals,\n \"SAMPLE1\",\n [\"...\", \"007\", \"008\"]\n )\n\n def test_should_place_variants_at_custom_position(self):\n sample_bank = SampleBank(\"AAATTTTGGGAG\", 100)\n sample_bank.add_sample_name(\"SAMPLE1\")\n sample_bank.add_sample_name(\"SAMPLE2\")\n\n sample_bank[\"SAMPLE1\"].add_sequence(\".....G......\")\n sample_bank[\"SAMPLE2\"].add_sequence(\"..........*.\")\n\n exp_variant1 = Variant(sample_bank.reference.chrom, 105, \"T\", \"G\")\n exp_variant2 = Variant(sample_bank.reference.chrom, 109, \"GA\", \"G\")\n self.assertEqual(sample_bank[\"SAMPLE1\"].variants, {exp_variant1})\n self.assertEqual(sample_bank[\"SAMPLE2\"].variants, {exp_variant2})\n self.assertEqual(sample_bank.variants, {exp_variant1, exp_variant2})\n", "id": "5010891", "language": "Python", "matching_score": 3.1643128395080566, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_sample_bank.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport unittest\nfrom wecall.bamutils.sequence_builder import sequence_builder\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\n\n\nclass TestSequenceBuilder(unittest.TestCase):\n\n def test_should_raise_for_invalid_char_in_seq(self):\n with self.assertRaisesRegex(weCallException, \"Illegal character in sequence .*'\"):\n sequence_builder(ReferenceChromosome(\"TAAAA\"), \"..&..\")\n\n def test_should_raise_for_lower_case_char_in_fwd_seq(self):\n with self.assertRaisesRegex(weCallException, \"Illegal character in sequence .*\"):\n sequence_builder(ReferenceChromosome(\"TAAAA\"), \"..c..\")\n\n def test_should_raise_for_dot_in_reverse_seq(self):\n with self.assertRaisesRegex(weCallException, \"Illegal character in sequence .*\"):\n sequence_builder(ReferenceChromosome(\"TAAAA\"), \",,c.,\")\n\n def test_should_raise_when_quality_string_too_short(self):\n with self.assertRaisesRegex(weCallException, \"Quality string has to be of the same length as reference.\"):\n sequence_builder(ReferenceChromosome(\"TAAAA\"), \".....\", \"22 \")\n\n def test_should_raise_when_quality_string_too_short_due_to_insertions(self):\n with self.assertRaisesRegex(weCallException, \"Quality string has to be of the same length as reference.\"):\n sequence_builder(ReferenceChromosome(\"TA**A\"), \"..TT.\", \"1234\")\n\n def test_should_raise_when_quality_string_too_short_multisequence(self):\n with self.assertRaisesRegex(weCallException, \"Quality string has to be of the same length as reference.\"):\n sequence_builder(\n ReferenceChromosome(\"TAAAA*A\"),\n \"... ..\",\n \"12 4\")\n\n def test_should_raise_when_quality_assigned_to_gap(self):\n with self.assertRaisesRegex(weCallException, \"Cannot assign base quality inside a gap.\"):\n sequence_builder(ReferenceChromosome(\n \"TAAAA*A\"), \"... ..\", \"12 34 \")\n\n def test_should_raise_when_quality_string_too_long_due_to_insertions(self):\n with self.assertRaisesRegex(weCallException, \"Quality string has to be of the same length as reference.\"):\n sequence_builder(ReferenceChromosome(\"TA**A\"), \"..TT.\", \"123\")\n\n def test_should_build_correct_sequence_without_any_whitespace(self):\n ref = ReferenceChromosome(\"C*CC\")\n annotated_seqs = sequence_builder(ref, \".*.T\")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(len(reads), 1)\n self.assertEqual(reads[0].pos, 0)\n self.assertEqual(reads[0].rlen, 3)\n self.assertEqual(reads[0].seq, \"CCT\")\n\n def test_should_build_correct_sequence_with_insertion_at_the_end(self):\n ref = ReferenceChromosome(\"CCC**\")\n builders = sequence_builder(ref, \"...TT\")\n read_lists = [builder.build_reads(0, {}) for builder in builders]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(reads[0].pos, 0)\n self.assertEqual(reads[0].rlen, 5)\n self.assertEqual(reads[0].seq, \"CCCTT\")\n\n def test_should_interpret_leading_whitespace_to_override_pos_from(self):\n ref = ReferenceChromosome(\"CATG\")\n annotated_seqs = sequence_builder(ref, \" .T\")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(len(reads), 1)\n self.assertEqual(reads[0].pos, 2)\n self.assertEqual(reads[0].seq, \"TT\")\n\n def test_should_interpret_leading_whitespace_to_override_pos_from_when_ref_has_deletion(self):\n ref = ReferenceChromosome(\"C*TG\")\n annotated_seqs = sequence_builder(ref, \" C.\")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(len(reads), 1)\n self.assertEqual(reads[0].pos, 1)\n self.assertEqual(reads[0].seq, \"CG\")\n\n def test_should_interpret_trailing_whitespace_to_override_pos_to(self):\n ref = ReferenceChromosome(\"CATG\")\n annotated_seqs = sequence_builder(ref, \".C \")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(len(reads), 1)\n self.assertEqual(reads[0].pos, 0)\n self.assertEqual(reads[0].seq, \"CC\")\n\n def test_should_interpret_trailing_whitespace_to_override_pos_to_when_seq_has_insertion(self):\n ref = ReferenceChromosome(\"C*GA\")\n annotated_seqs = sequence_builder(ref, \".C \")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(reads[0].rlen, 2)\n\n def test_should_interpret_trailing_whitespace_to_override_positions_for_complex_ref_and_seq(self):\n ref = ReferenceChromosome(\"ACCC*G*A\")\n annotated_seqs = sequence_builder(ref, \".**.C \")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(reads[0].pos, 0)\n self.assertEqual(reads[0].rlen, 3)\n\n def test_should_translate_reverse_seq_into_correct_annotations(self):\n ref = ReferenceChromosome(\"CCTG\")\n annotated_seq = sequence_builder(ref, \",,c,\")[0]\n self.assertEqual(annotated_seq.n_fwd, 0)\n self.assertEqual(annotated_seq.n_rev, 1)\n\n def test_should_translate_reverse_seq_into_correct_sequence(self):\n ref = ReferenceChromosome(\"AAACCTG*TAA\")\n builders = sequence_builder(ref, \" ,,c,*, \")\n read_lists = [builder.build_reads(0, {}) for builder in builders]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(reads[0].seq, 'CCCGT')\n\n\nclass TestQualityBuilding(unittest.TestCase):\n\n def setUp(self):\n self.ascii_codes = {\n \"0\": \"!\",\n \"1\": \"+\",\n \"2\": \"5\",\n \"3\": \"?\",\n \"4\": \"I\",\n \"5\": \"S\",\n \"6\": \"]\",\n \"7\": \"g\",\n \"8\": \"q\",\n \"9\": \"{\"}\n self.default_qual = \"H\"\n\n def test_should_build_with_default_quality_for_None(self):\n ref = ReferenceChromosome(\"AAAAA\")\n annotated_seqs = sequence_builder(ref, \".....\")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(reads[0].qual, self.default_qual * 5)\n\n def test_should_build_with_custom_quality_with_del(self):\n ref = ReferenceChromosome(\"AAAAA\")\n annotated_seqs = sequence_builder(ref, \"..*..\", quality_string=\"31 00\")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n self.assertEqual(\n reads[0].qual,\n self.ascii_codes[\"3\"] +\n self.ascii_codes[\"1\"] +\n self.ascii_codes[\"0\"] *\n 2)\n\n def test_should_build_with_custom_quality_with_ins(self):\n ref = ReferenceChromosome(\"AA**A\")\n annotated_seqs = sequence_builder(ref, \"..CC.\", quality_string=\"31220\")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(\n reads[0].qual,\n self.ascii_codes[\"3\"] +\n self.ascii_codes[\"1\"] +\n self.ascii_codes[\"2\"] *\n 2 +\n self.ascii_codes[\"0\"])\n\n def test_should_build_with_custom_quality_and_sequence_shorter_than_reference(self):\n ref = ReferenceChromosome(\"AAAAAAAAAAAA\")\n builders = sequence_builder(\n ref, \" ..*.. \", quality_string=\" 31 0 \")\n read_lists = [builder.build_reads(0, {}) for builder in builders]\n reads = [read for read_list in read_lists for read in read_list]\n self.assertEqual(\n reads[0].qual,\n self.ascii_codes[\"3\"] +\n self.ascii_codes[\"1\"] +\n self.ascii_codes[\"0\"] +\n self.default_qual)\n\n def test_should_raise_when_assigning_qual_to_deletion(self):\n with self.assertRaisesRegex(weCallException, \"Cannot assign base quality to a deleted base.\"):\n sequence_builder(ReferenceChromosome(\"AAAA\"), \".*..\", \" 1 \")\n\n\nclass TestMultiSequenceLines(unittest.TestCase):\n def test_should_build_two_seqs_defined_on_single_line(self):\n ref = ReferenceChromosome(\"AAACCTGTAA\")\n annotated_seqs = sequence_builder(ref, \" ... .C. \")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n self.assertEqual(len(reads), 2)\n self.assertEqual(reads[0].pos, 1)\n self.assertEqual(reads[0].seq, \"AAC\")\n self.assertEqual(reads[1].pos, 6)\n self.assertEqual(reads[1].seq, \"GCA\")\n\n def test_should_build_two_complex_seqs_defined_on_single_line(self):\n ref = ReferenceChromosome(\"AA*CC*TGTAAGG\")\n annotated_seqs = sequence_builder(ref, \" .G. ,c,*, \")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n self.assertEqual(len(reads), 2)\n self.assertEqual(reads[0].pos, 1)\n self.assertEqual(reads[0].seq, \"AGC\")\n self.assertEqual(reads[1].pos, 4)\n self.assertEqual(reads[1].seq, \"TCTA\")\n\n def test_should_build_correct_qualities_for_two_complex_seqs_defined_on_single_line(self):\n ref = ReferenceChromosome(\"AA*CC*TGTAAGG\")\n annotated_seqs = sequence_builder(\n ref, \" .G. ,c,*, \", \" 2 1 0 \")\n read_lists = [builder.build_reads(0, {}) for builder in annotated_seqs]\n reads = [read for read_list in read_lists for read in read_list]\n reads.sort(key=lambda x: (x.pos, x.seq, x.qual, x.cigarstring, x.mapq))\n self.assertEqual(len(reads), 2)\n self.assertEqual(reads[0].seq, \"AGC\")\n self.assertEqual(reads[0].pos, 1)\n self.assertEqual(reads[0].qual, 'H5H')\n self.assertEqual(reads[1].seq, \"TCTA\")\n self.assertEqual(reads[1].pos, 4)\n self.assertEqual(reads[1].qual, '+HH!')\n", "id": "3178880", "language": "Python", "matching_score": 3.6717617511749268, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_sequence_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.bamutils.sequence_quality import SequenceQuality\nfrom wecall.common.exceptions import weCallException\n\n\nclass TestGetVariantsFromSequence(TestCase):\n\n def setUp(self):\n self.ascii_codes = {\n \"0\": \"!\",\n \"1\": \"+\",\n \"2\": \"5\",\n \"3\": \"?\",\n \"4\": \"I\",\n \"5\": \"S\",\n \"6\": \"]\",\n \"7\": \"g\",\n \"8\": \"q\",\n \"9\": \"{\"}\n self.default_qual = \"H\"\n\n def test_should_raise_on_non_numerical_character(self):\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in the quality string .*\",\n SequenceQuality,\n \" A \"\n )\n\n def test_should_create_default_string_for_empty_input(self):\n qual = SequenceQuality(\" \")\n self.assertEqual(qual.ascii_quality, self.default_qual * 5)\n\n def test_should_correctly_assign_to_ascii(self):\n self.assertEqual(SequenceQuality.to_ascii(0), '!')\n self.assertEqual(SequenceQuality.to_ascii(20), '5')\n self.assertEqual(SequenceQuality.to_ascii(90), '{')\n\n def test_should_map_a_fully_qualified_string(self):\n qual = SequenceQuality(\"0123456789\")\n for i in range(10):\n self.assertEqual(qual.ascii_quality[i], SequenceQuality.to_ascii(\n SequenceQuality.QUALITY_MAPPING[str(i)]))\n\n def test_should_correctly_map_gaps_to_default(self):\n qual = SequenceQuality(\" 6 1 \")\n expected_qualities = self.default_qual + self.ascii_codes[\"6\"] + self.default_qual \\\n + self.ascii_codes[\"1\"] + self.default_qual\n self.assertEqual(qual.ascii_quality, expected_qualities)\n\n\nclass TestQualityToAndFromAscii(TestCase):\n def test_should_convert_numeral(self):\n self.assertEqual(SequenceQuality.to_ascii(0), '!')\n\n def test_to_and_from_ascii_should_be_one_to_one_up_to_256(self):\n for i in range(0, 256 - ord('!')):\n self.assertEqual(\n i, SequenceQuality.from_ascii(\n SequenceQuality.to_ascii(i)))\n\n\nclass TestUnparsingAsciiQualityString(TestCase):\n def test_should_be_able_to_convert_from_ascii_quality_string(self):\n\n ascii_quality_string = \"!\\\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\" # noqa\n quality_string = \"000000000011111111112222222222333333333\" + \" \" + \"444444444455555555556666666666777777777788888888889999\" # noqa\n\n self.assertEqual(SequenceQuality.parse_ascii_to_quality_string(\n ascii_quality_string), quality_string)\n", "id": "3819210", "language": "Python", "matching_score": 2.4471356868743896, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_sequence_quality.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.sequence_position import MISSING_BASE\nimport re\nfrom wecall.common.exceptions import weCallException\n\n\nclass SequenceQuality(object):\n QUALITY_MAPPING = {\n \"0\": 0,\n \"1\": 10,\n \"2\": 20,\n \"3\": 30,\n \"4\": 40,\n \"5\": 50,\n \"6\": 60,\n \"7\": 70,\n \"8\": 80,\n \"9\": 90\n }\n\n DEFAULT_QUALITY = 39\n\n def __init__(self, quality_string, quality_mapping=QUALITY_MAPPING):\n if not SequenceQuality.is_valid_qual(quality_string):\n raise weCallException(\n \"Illegal character in the quality string {!r}\".format(quality_string))\n\n self.quality_mapping = quality_mapping\n self.ascii_quality = self.parse_quality_to_ascii(quality_string)\n\n def parse_quality_to_ascii(self, quality_string):\n ascii_quality = \"\"\n for qual_char in quality_string:\n if qual_char == MISSING_BASE:\n ascii_quality += SequenceQuality.to_ascii(\n SequenceQuality.DEFAULT_QUALITY)\n else:\n ascii_quality += SequenceQuality.to_ascii(\n self.quality_mapping[qual_char])\n\n return ascii_quality\n\n @staticmethod\n def parse_ascii_to_quality_string(ascii_quality):\n quality_string = \"\"\n for ascii_char in ascii_quality:\n quality = SequenceQuality.from_ascii(ascii_char)\n if quality == SequenceQuality.DEFAULT_QUALITY:\n quality_string += MISSING_BASE\n else:\n quality_string += str(int(quality / 10))\n return quality_string\n\n @staticmethod\n def to_ascii(quality_score):\n return chr(quality_score + ord('!'))\n\n @staticmethod\n def from_ascii(char_quality):\n return ord(char_quality) - ord('!')\n\n @staticmethod\n def is_valid_qual(quality_string):\n return re.match(r'^[ \\d]*\\Z', quality_string)\n", "id": "10187009", "language": "Python", "matching_score": 1.462973952293396, "max_stars_count": 8, "path": "python/wecall/bamutils/sequence_quality.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.common.exceptions import weCallException\n\nMATCHING_BASE = \".\"\nDELETED_BASE = \"*\"\nMISSING_BASE = \" \"\n\n\nclass SequencePosition(object):\n\n def __init__(self, ref_char, seq_char, qual_char):\n self.__validate_input(ref_char, seq_char, qual_char)\n\n self.ref_char = ref_char\n self.seq_char = seq_char\n self.is_gap = self.__calculate_is_gap()\n self.qual_char = qual_char\n\n self.__validate_character_combination()\n\n def update_ref_pos(self, ref_pos):\n if self.ref_char != DELETED_BASE:\n return ref_pos + 1\n else:\n return ref_pos\n\n def __calculate_is_gap(self):\n return self.seq_char == MISSING_BASE\n\n @staticmethod\n def __validate_input(ref_char, seq_char, qual_char):\n if not all(len(c) == 1 for c in [ref_char, seq_char, qual_char]):\n raise weCallException(\n \"All characters at sequence position has to be of length 1.\")\n\n if ref_char == MISSING_BASE:\n raise weCallException(\"Missing reference character.\")\n\n def __validate_character_combination(self):\n if self.ref_char == DELETED_BASE and self.seq_char == MATCHING_BASE:\n raise weCallException(\n \"Invalid character combination: ref char = {}, sequence char = {}\".format(\n self.ref_char, self.seq_char))\n\n if self.seq_char == DELETED_BASE and self.qual_char != MISSING_BASE:\n raise weCallException(\n \"Cannot assign base quality to a deleted base.\")\n if self.is_gap and self.qual_char != MISSING_BASE:\n raise weCallException(\"Cannot assign base quality inside a gap.\")\n", "id": "1811310", "language": "Python", "matching_score": 3.5598089694976807, "max_stars_count": 8, "path": "python/wecall/bamutils/sequence_position.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.common.exceptions import weCallException\nimport unittest\nfrom wecall.bamutils.sequence_position import SequencePosition\n\n\nclass TestSequencePosition(unittest.TestCase):\n def test_should_recognise_gap(self):\n seq_pos = SequencePosition(\"C\", \" \", \" \")\n self.assertTrue(seq_pos.is_gap)\n\n def test_should_recognise_sequence(self):\n seq_pos = SequencePosition(\"A\", \"C\", \" \")\n self.assertFalse(seq_pos.is_gap)\n\n def test_should_fail_at_quality_inside_gap(self):\n self.assertRaisesRegex(\n weCallException,\n \"Cannot assign base quality inside a gap.\",\n SequencePosition, \"A\", \" \", \"2\"\n )\n\n def test_should_fail_at_missing_ref_char(self):\n self.assertRaisesRegex(\n weCallException,\n \"Missing reference character.\",\n SequencePosition, \" \", \"A\", \" \"\n )\n\n def test_should_fail_at_empty_ref_char(self):\n self.assertRaisesRegex(\n weCallException,\n \"All characters at sequence position has to be of length 1.\",\n SequencePosition, \"\", \"C\", \"2\"\n )\n\n def test_should_fail_at_empty_seq_char(self):\n self.assertRaisesRegex(\n weCallException,\n \"All characters at sequence position has to be of length 1.\",\n SequencePosition, \"A\", \"\", \"2\"\n )\n\n def test_should_fail_at_empty_qual_char(self):\n self.assertRaisesRegex(\n weCallException,\n \"All characters at sequence position has to be of length 1.\",\n SequencePosition, \"A\", \"C\", \"\"\n )\n\n def test_should_fail_at_too_log_seq_char(self):\n self.assertRaisesRegex(\n weCallException,\n \"All characters at sequence position has to be of length 1.\",\n SequencePosition, \"A\", \"CT\", \" \"\n )\n\n def test_should_fail_at_qual_assignment_to_deleted_base(self):\n self.assertRaisesRegex(\n weCallException,\n \"Cannot assign base quality to a deleted base.\",\n SequencePosition, \"A\", \"*\", \"2\"\n )\n\n def test_should_increase_ref_position_for_matching_base(self):\n seq_pos = SequencePosition(\"A\", \"C\", \" \")\n ref_pos = seq_pos.update_ref_pos(2)\n self.assertEqual(ref_pos, 3)\n\n def test_should_increase_ref_position_for_deletion(self):\n seq_pos = SequencePosition(\"A\", \"*\", \" \")\n ref_pos = seq_pos.update_ref_pos(2)\n self.assertEqual(ref_pos, 3)\n\n def test_should_not_increase_ref_position_for_insertion(self):\n seq_pos = SequencePosition(\"*\", \"C\", \" \")\n ref_pos = seq_pos.update_ref_pos(2)\n self.assertEqual(ref_pos, 2)\n", "id": "9643854", "language": "Python", "matching_score": 2.0196871757507324, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_sequence_position.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport unittest\nfrom wecall.genomics.chromosome import chromosome_comp\n\n\nclass TestChromosomeSort(unittest.TestCase):\n def test_should_sort_chrom_2_before_chrom_10(self):\n self.assertTrue(chromosome_comp('2', '10'))\n self.assertFalse(chromosome_comp('10', '2'))\n\n def test_should_sort_22_before_X(self):\n self.assertTrue(chromosome_comp('22', 'X'))\n self.assertFalse(chromosome_comp('X', '22'))\n\n def test_should_sort_X_before_Y(self):\n self.assertTrue(chromosome_comp('X', 'Y'))\n self.assertFalse(chromosome_comp('Y', 'X'))\n\n def test_should_sort_Y_before_MT(self):\n self.assertTrue(chromosome_comp('Y', 'MT'))\n self.assertFalse(chromosome_comp('MT', 'Y'))\n\n def test_should_sort_non_standard_chroms_after_standard_chroms(self):\n self.assertTrue(chromosome_comp(\"MT\", \"GL000193.1\"))\n self.assertFalse(chromosome_comp(\"GL000193.1\", \"MT\"))\n\n def test_should_sort_non_standard_chroms_lexicographically(self):\n self.assertTrue(chromosome_comp(\"GL000193.1\", \"GL000193.2\"))\n self.assertFalse(chromosome_comp(\"GL000193.2\", \"GL000193.1\"))\n", "id": "5819377", "language": "Python", "matching_score": 0.08905292302370071, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_chromosome.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall_test_drivers.ascii_quality_recalibration_runner import AsciiQualityRecalibrationTest\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\n\n\nclass TestQualityRecalibrationDeletion(AsciiQualityRecalibrationTest):\n def test_should_not_recalibrate_good_read_data_for_deletion(self):\n reference = \"ATCTAATAGCTATCAGCAATATCGCGCGTATTATTTATTTAT\"\n bam_spec = [\" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............*............. \",\n \" ,,,,,,,,*,,,,,,,,,,,,,,,,,,,, \",\n \"..............*....................... \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, bam_spec, bam_spec)\n\n\nclass TestQualityRecalibrationInsertion(AsciiQualityRecalibrationTest):\n def test_should_not_recalibrate_good_read_data_for_deletion(self):\n reference = \"ATCTAATAGCTATC*GCAATATCGCGCGTATTATTTATTTAT\"\n bam_spec = [\" ,,,,,,,,,,a,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............A............. \",\n \" ,,,,,,,,a,,,,,,,,,,,,,,,,,,,, \",\n \"..............A....................... \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, bam_spec, bam_spec)\n\n\nclass TestQualityRecalibrationBespokeQualities(AsciiQualityRecalibrationTest):\n def test_should_not_recalibrate_good_read_data_with_snp_1(self):\n reference = \"ATCTAATAGCTATCAGCAATATCGCGCGTATTATTTATTTAT\"\n bam_spec = [\" ,,,,,,,,,,t,,,,,,,,,,,,,,,,,,,,,, \",\n \" 0 \",\n \"..............T............. \",\n \" 0 \",\n \" ,,,,,,,,t,,,,,,,,,,,,,,,,,,,, \",\n \" 0 \",\n \"..............T....................... \",\n \" 0 \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, bam_spec, bam_spec)\n\n def test_should_not_recalibrate_good_read_data_with_snp_2(self):\n reference = \"ATCTAATAGCTATCAGCAATATCGCGCGTATTATTTATTTAT\"\n bam_spec = [\" ,,,,,,,,,,t,,,,,,,,,,,,,,,,,,,,,, \",\n \" 1 \",\n \"..............T............. \",\n \" 1 \",\n \" ,,,,,,,,t,,,,,,,,,,,,,,,,,,,, \",\n \" 1 \",\n \"..............T....................... \",\n \" 1 \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, bam_spec, bam_spec)\n\n\nclass TestQualityRecalibrationSingleSNP(AsciiQualityRecalibrationTest):\n def test_should_not_recalibrate_region_snp_on_two_forward_strands_out_of_eight(self):\n reference = \"ATCTAATAGCATCTAATAGCTAGCATCCGTAACAGCAATATCGCGCGTATTATTTATTTAT\"\n bam_spec = [\n \"..............................T..............................\",\n \" .....................T..............................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, bam_spec, bam_spec)\n\n def test_should_not_recalibrate_snp_on_one_forward_and_reverse_strand_out_of_eight(self):\n reference = \"ATCTAATAGCATCTAATAGCTAGCATCCGTAACAGCAATATCGCGCGTATTATTTATTTAT\"\n bam_spec = [\n \"..............................T.......................... \",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,t,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, bam_spec, bam_spec)\n\n def test_should_recalibrate_around_snp_on_two_forward_strands_out_of_nine(self):\n reference = \"ATCTAATAGCATCTAATAGCTAGCATCCGTAACAGCAATATCGCGCGTATTATTTATTTAT\"\n input_bam = [\n \"..............................T..............................\",\n \" .....................T..............................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"]\n\n output_bam = [\n \"..............................T..............................\",\n \" 000000000000000000000000000000000000000000\",\n \" .....................T..............................\",\n \" 0000000000000000000000000000000000000000\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, input_bam, output_bam)\n\n def test_should_recalibrate_snp_on_one_forward_and_reverse_strand_out_of_nine(self):\n reference = \"ATCTAATAGCATCTAATAGCTAGCATCCGTAACAGCAATATCGCGCGTATTATTTATTTAT\"\n input_bam = [\n \"..............................T..............................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,t,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"]\n\n output_bam = [\n \"..............................T..............................\",\n \" 000000000000000000000000000000000000000000\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ....................................................\",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,t,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" 0000000000000000000000000000000000000000 \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"]\n\n self.assert_quality_recalibrated_in_output_bam(reference, input_bam, output_bam)\n\n\nclass TestRealDataExamplesFromNA12878(AsciiWecallRunnerTest):\n\n def calls_variants_without_recalibration(self, ref, sequence_list, expected_ascii_haplotypes):\n self.calls_variants(\n ref, sequence_list,\n config_dict={\"recalibrateBaseQs\": False, \"overwrite\": True},\n expected_ascii_haplotypes=expected_ascii_haplotypes\n )\n\n def calls_variants_with_recalibration(self, ref, sequence_list, expected_ascii_haplotypes):\n self.calls_variants(\n ref, sequence_list,\n config_dict={\"recalibrateBaseQs\": True, \"overwrite\": True},\n expected_ascii_haplotypes=expected_ascii_haplotypes\n )\n\n def calls_variants_with_and_without_recalibration(self, ref, sequence_list, expected_ascii_haplotypes):\n self.calls_variants_with_recalibration(\n ref, sequence_list, expected_ascii_haplotypes)\n self.calls_variants_without_recalibration(\n ref, sequence_list, expected_ascii_haplotypes)\n\n def test_calls_two_good_snps_with_and_without_recalibration(self):\n self.calls_variants_with_and_without_recalibration(\n \"ACGCCCCCTGCAAAAACTACTAAAAA\",\n [\".T........................\",\n \".T........................\",\n \"...........C..............\",\n \"...........C..............\"],\n [\".T........................\", # Expected calls\n \"...........C..............\"]\n )\n\n @expectedFailure\n def test_calls_false_positive_snp_with_and_without_recalibration(self):\n self.calls_variants_without_recalibration(\n \"AGTGCCTGTTGCAAACTTAAAGTAT**********AA**********TAAAATAAA**********ATAAATAAAAAAAAATAAAAAAAAGAATA\",\n [\",,,,,,,,,,, ..........**********..**********.........**********.............................\",\n \"................. ......**********..**********.........**********.............................\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,, ...**********.............................\",\n \".........................**********..**********..... ..**********.............................\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,, ,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,, ..................\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,,,,, .................\",\n \"...............T.........**********G.**********.........**********.............. ...........\",\n \" 1 1 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,, ..........\",\n \",,,t,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,, ................\",\n \" 1 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,aataaaataa,,**********,,,,,,,,,**********,,,,,,,,,,,,, ........\",\n \" 3333333333 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,, ,,,,,\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,aataaaataa,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" 3333333333 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,aataaaataa,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,, \",\n \" 3333333333 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,aa,,,,,,,,,,,,,,,,,a,,,aataaaataa,,**********,,,,,,,,,**********,,,,,g,,,,,,,,,,,,,,,,,,,,,, \",\n \" 11 1 3333333333 1 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,taaaataaac,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" 3333333333 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,aataaaataa,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" 3333333333 \",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,**********,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,,,,,,,,,,,,,,,,,,**********,,**********,,,,,,,,,ataaaataaa,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" 3333333333 \",\n \".........................**********..**********.........**********.....AT....T............ ,,\",\n \" 1 \",\n \".........................**********..**********.........**********.............................\",\n \".........................**********..**********.........**********.....AT....T.................\"],\n\n [\".........................**********..**********.........**********.....AT......................\", # Expected calls # noqa\n \".........................AATAAAATAA..**********.........**********.....AT......................\"] # Expected calls # noqa\n )\n", "id": "655556", "language": "Python", "matching_score": 3.564532995223999, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_quality_recalibration.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\n\n\nclass TestCallingWithNsInReference(AsciiWecallRunnerTest):\n def test_should_not_call_deletion_of_gap_character(self):\n self.calls_variants(\n \"TTTTTTTTTTTNTTTTTTTTTTTTTTTT\",\n [\"...........*................\",\n \"...........*................\",\n \"...........*................\",\n \"...........*................\"],\n\n [\"............................\",\n \"............................\"], # Expected genotype\n )\n\n def test_should_not_deleletion_if_ref_is_N_after_aligning(self):\n self.calls_variants(\n \"TTTTTTTTTNAATCGTAATTTGACACAT\",\n [\"...........*................\",\n \"...........*................\",\n \"...........*................\",\n \"...........*................\"],\n\n [\"............................\",\n \"............................\"], # Expected genotype\n )\n\n def test_should_not_insertion_if_ref_is_N_after_aligning(self):\n self.calls_variants(\n \"TTTTTTTTNTG**ATCGTAATTTGACACAT\",\n [\"...........TG.................\",\n \"...........TG.................\",\n \"...........TG.................\",\n \"...........TG.................\"],\n\n [\"...........**.................\",\n \"...........**.................\"], # Expected genotype\n )\n\n def test_should_not_snp_if_ref_is_N(self):\n self.calls_variants(\n \"CTCNNNNNNNNNNTTTTTTTTTTTTTTT\",\n [\"....A.......................\",\n \"....A.......................\",\n \"....A.......................\",\n \"....A.......................\"],\n\n [\"............................\",\n \"............................\"], # Expected genotype\n )\n\n\nclass TestCallingWithNsInReads(AsciiWecallRunnerTest):\n def test_should_not_insertion_is_N(self):\n self.calls_variants(\n \"CTCT**TTTTTTTTTTCCCCCCCCCCCC\",\n [\"....NN......................\",\n \"....NN......................\",\n \"....NN......................\",\n \"....NN......................\"],\n\n [\"....**......................\",\n \"....**......................\"], # Expected genotype\n )\n\n def test_should_not_snp_if_alt_is_N(self):\n self.calls_variants(\n \"CTCTTTTTTTTTTTTTTTTTTTTTTTTT\",\n [\"....N.......................\",\n \"....N.......................\",\n \"....N.......................\",\n \"....N.......................\",\n \"............................\"],\n\n [\"............................\",\n \"............................\"], # Expected genotype\n )\n", "id": "9615017", "language": "Python", "matching_score": 1.6184170246124268, "max_stars_count": 8, "path": "test/wecall_acceptance/malformed_inputs/test_calling_with_non_standard_bases_in_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestLeftAlignment(AsciiWecallRunnerTest):\n def test_calls_deletion_aligned_to_homopolymer_start(self):\n self.calls_variants(\n \"AAATGAAAAAAAAAACTGTTACGGC\",\n [\".............*...........\",\n \"............*............\",\n \".........*...............\"],\n [\".....*...................\",\n \".....*...................\"]\n )\n\n def test_calls_deletion_aligned_to_complex_polymer_repeat_units(self):\n self.calls_variants(\n \"AAATGATCGTATCGTATCGTATCGTATCGTATCGTATCGTATCGTG\",\n [\"..................................*****.......\",\n \".................................*****........\",\n \"..............................*****...........\"],\n [\".....*****....................................\",\n \".....*****....................................\"]\n )\n\n def test_calls_deletion_correctly_after_a_snp(self):\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"AAATGAAAAAAAAAACTGTTACGGC\", chrom=\"1\"\n ).with_read(\n \"......T......*...........\", chrom=\"1\"\n ).with_read(\n \"......T.....*............\", chrom=\"1\"\n ).with_read(\n \"......T..*...............\", chrom=\"1\"\n )\n\n expect = svc_driver.call().with_output_vcf()\n\n expect.record_count(2)\n\n expect.has_record(\"1\", 4, \"GA\", \"G\")\n expect.has_record(\"1\", 7, \"A\", \"T\")\n\n def test_calls_deletion_correctly_around_a_snp(self):\n # The SNP should mean that the indel could be left-aligned further.\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"AAAGAATAAAAAAAACTGTTACGGC\", chrom=\"1\"\n ).with_read(\n \"......A......*...........\", chrom=\"1\"\n ).with_read(\n \"......A.....*............\", chrom=\"1\"\n ).with_read(\n \"......A*.................\", chrom=\"1\"\n )\n\n expect = svc_driver.call().with_output_vcf()\n\n expect.record_count(1)\n\n expect \\\n .has_record(\"1\", 5, \"AT\", \"A\")\n\n\nclass TestSingleDeletionCallingInRepetitiveSequence(AsciiWecallRunnerTest):\n\n def test_calls_deletion_to_left_of_A10_homopolymer(self):\n self.calls_variants(\n \"AAATGAAAAAAAAAACTGTTACGGC\",\n [\"....*....................\",\n \"....*....................\"]\n )\n\n def test_calls_deletion_to_left_of_A15_homopolymer(self):\n self.calls_variants(\n \"AAATGAAAAAAAAAAAAAAATCGGC\",\n [\"....*....................\",\n \"....*....................\"]\n )\n\n def test_calls_deletion_to_left_of_A18_homopolymer(self):\n self.calls_variants(\n \"AAATGAAAAAAAAAAAAAAAAAAGC\",\n [\"....*....................\",\n \"....*....................\"]\n )\n\n def test_calls_deletion_to_left_of_A20_homopolymer(self):\n self.calls_variants(\n \"AAATGAAAAAAAAAAAAAAAAAAAACTC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....*.......................\"]\n )\n\n def test_calls_deletion_to_left_of_A20_homopolymer_with_right_anchor(self):\n self.calls_variants(\n \"AAATGAAAAAAAAAAAAAAAAAAAACT\",\n [\"....*......................\",\n \"....*......................\",\n \"....*......................\"]\n )\n\n\nclass TestSNPAndDeletionCallingInRepetitiveSequence(AsciiWecallRunnerTest):\n\n def test_calls_correct_overlapping_deletion_and_snp_in_non_repetitive_sequence(self):\n self.calls_variants(\n \"TGTCAGGACATGGCATAACAAGATAC\",\n [\"......T...................\",\n \"......T...................\",\n \".....**...................\",\n \".....**...................\"]\n )\n\n def test_calls_correct_overlapping_deletion_and_snp_in_A2_homopolymer(self):\n self.calls_variants(\n \"TGTCGTAACATGGCATAACAAGATAC\",\n [\"......T...................\",\n \"......T...................\",\n \".....**...................\",\n \".....**...................\"]\n )\n\n def test_calls_correct_overlapping_deletion_and_snp_in_A3_homopolymer(self):\n self.calls_variants(\n \"TGTCGAAACATGGCATAACAAGATAC\",\n [\"......T...................\",\n \"......T...................\",\n \".....**...................\",\n \".....**...................\"]\n )\n\n def test_calls_correct_overlapping_deletion_and_snp_in_A4_homopolymer(self):\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"TGTCAAAACATGGCATAACAAGATAC\", chrom=\"1\"\n ).with_read(\n \"......T...................\", chrom=\"1\", n_rev=1, n_fwd=1\n ).with_read(\n \".....**...................\", chrom=\"1\", n_rev=1, n_fwd=1\n )\n\n expect = svc_driver.call().with_output_vcf()\n\n expect.record_count(2)\n\n expect \\\n .has_record(\"1\", 3, \"CAA\", \"C\")\n\n expect \\\n .has_record(\"1\", 6, \"A\", \"T\")\n\n def test_calls_correct_overlapping_deletion_and_snp_in_A6_homopolymer(self):\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"TGAAAAAACATGGCATAACAAGATAC\", chrom=\"1\"\n ).with_read(\n \"......T...................\", chrom=\"1\", n_rev=1, n_fwd=1\n ).with_read(\n \".....**...................\", chrom=\"1\", n_rev=1, n_fwd=1\n )\n\n expect = svc_driver.call().with_output_vcf()\n\n expect.record_count(2)\n\n expect \\\n .has_record(\"1\", 1, \"GAA\", \"G\")\n\n expect \\\n .has_record(\"1\", 6, \"A\", \"T\")\n\n\nclass TestCallingOverlappingDeletions(AsciiWecallRunnerTest):\n\n def test_calls_overlapping_deletions_in_non_repetitive_sequence(self):\n self.calls_variants(\n \"CTAGAATTCCGATACAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A3_homopolymer(self):\n self.calls_variants(\n \"CTAGAAATCCGATACAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A4_homopolymer(self):\n self.calls_variants(\n \"CTAGAAAACCGATACAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A5_homopolymer(self):\n self.calls_variants(\n \"CTAGAAAAACGATACAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A6_homopolymer(self):\n self.calls_variants(\n \"CTAGAAAAAAGATACAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A7_homopolymer(self):\n self.calls_variants(\n \"CTAGAAAAAAATCTCAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A8_homopolymer(self):\n self.calls_variants(\n \"CTAGAAAAAAATCTCAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A9_homopolymer(self):\n self.calls_variants(\n \"CTAGAAAAAAAACTCAGATAACAAACCC\",\n [\"....*.......................\",\n \"....*.......................\",\n \"....**......................\",\n \"....**......................\"],\n )\n\n def test_calls_overlapping_deletions_in_A10_homopolymer(self):\n svc_driver = SVCDriver(self) \\\n .with_ref_sequence(\n \"CTAGAAAAAAAAAACAGATAACAAACCC\", chrom=\"1\"\n ).with_read(\n \"....*.......................\", chrom=\"1\", n_rev=2, n_fwd=1\n ).with_read(\n \"....**......................\", chrom=\"1\", n_rev=2, n_fwd=1\n )\n\n expect = svc_driver.call().with_output_vcf()\n\n expect.record_count(2)\n expect.has_record(\"1\", 3, \"GAA\", \"G\")\n expect.has_record(\"1\", 3, \"GA\", \"G\")\n\n def test_should_be_able_to_call_distinct_homopolymer_insertions_in_a_long_homopolymer_region(self):\n self.calls_variants(\n \"ATTCAGATACTTTGCCCATTTTTAAGTTGGATCATTAGATTTTTTTCCTATAGAATTG****TTTTTTTTTTTTATTTCCTGTTATTAATCCCTTGTCAGATTTTTTTTTTGCAAATATTTTTT\", # noqa\n [\" ..................................................TTTT............................................................ \", # noqa\n \" .........................................................TT**................................................... \"], # noqa\n [\"..........................................................TTTT..............................................................\", # noqa\n \"..........................................................TT**..............................................................\"], # noqa\n n_fwd=7, n_rev=0\n )\n", "id": "9925166", "language": "Python", "matching_score": 3.576320171356201, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_calling_in_data_with_repetitive_sequence.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestStrandBiasPValue(BaseTest):\n def test_should_get_unknown_value_if_all_reads_are_forward(self):\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", chrom=\"1\"\n ).with_read(\n \".....................T....................\", chrom=\"1\", n_fwd=10, n_rev=0)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_record_for_variant(Variant(\"1\", 21, \"A\", \"T\")) \\\n .with_info().with_field(\"SBPV\", [None])\n\n def test_should_get_unknown_value_if_all_reads_are_reverse(self):\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", chrom=\"1\"\n ).with_read(\n \".....................T....................\", chrom=\"1\", n_fwd=0, n_rev=10)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_record_for_variant(Variant(\"1\", 21, \"A\", \"T\")) \\\n .with_info().with_field(\"SBPV\", [None])\n\n\nclass TestCallingWithForwardAndReverseReads(AsciiWecallRunnerTest):\n\n def test_calls_snp_on_full_length_forward_reads(self):\n self.calls_variants(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\"...........C.........\",\n \",,,,,,,,,,,c,,,,,,,,,\",\n \",,,,,,,,,,,c,,,,,,,,,\",\n \"...........C.........\"],\n\n [\"...........C.........\",\n \"...........C.........\"], # Expected genotype\n )\n\n def test_calls_snp_on_long_forward_reads(self):\n self.calls_variants(\n \"AAAAAAAAAAACGCCCCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ...................C......... \"]\n )\n\n def test_calls_snp_on_forward_and_reverse_reads(self):\n self.calls_variants(\n \"AAAAAAAAAAACGCCCCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \",\n \" ,,,,,,,,,,t,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............T............. \"]\n )\n\n def test_calls_del_and_snp_on_forward_and_reverse_reads(self):\n self.calls_variants(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\",\n [\" ..............C............. \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \",\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............*............. \"]\n )\n", "id": "7268986", "language": "Python", "matching_score": 3.4321987628936768, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_strand_bias.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestInfoAnnotations(BaseTest):\n def test_should_have_standard_set_of_info_keys_for_variant(self):\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"TAGAATTGTTTGAGCTCTTTGTATTTCCTGTTATTAATCCCTTGTCAGAAGGGTCGTTTG\", )\\\n .with_read(\n \"....................A.......................................\", n_fwd=3, n_rev=0)\n\n svc_driver.call()\\\n .with_output_vcf()\\\n .record_count(1)\\\n .has_record_for_variant(Variant(\"1\", 20, \"G\", \"A\"))\\\n .with_info()\\\n .has_keys(\"PP\", \"DP\", \"DPR\", \"DPF\", \"VC\", \"VCR\", \"VCF\", \"ABPV\", \"SBPV\", \"MQ\", \"QD\", \"BR\")\n\n def test_snps_at_same_location_should_have_expected_coverage(self):\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"TAGAATTGTTTGAGCTCTTTGTATTTCCTGTTATTAATCCCTTGTCAGAAGGGTCGTTTG\",)\\\n .with_read(\n \"....................A.......................................\", n_fwd=27, n_rev=12)\\\n .with_read(\n \"....................T.......................................\", n_fwd=0, n_rev=16)\n\n vcf_expect = svc_driver.call().with_output_vcf().record_count(2)\n\n vcf_expect\\\n .has_record_for_variant(Variant(\"1\", 20, \"G\", \"A\"))\\\n .with_info().with_field(\"DP\", [55]).with_field(\"VC\", [39])\n\n vcf_expect\\\n .has_record_for_variant(Variant(\"1\", 20, \"G\", \"T\"))\\\n .with_info().with_field(\"DP\", [55]).with_field(\"VC\", [16])\n\n def test_snp_and_insertion_at_same_location_should_have_expected_coverage(self):\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"TAGAATTGTTTGAGCTCTTTG**TATTTCCTGTTATTAATCCCTTGTCAGAAGGGTCGTTTG\",)\\\n .with_read(\n \"....................A**.......................................\", n_fwd=27, n_rev=12)\\\n .with_read(\n \".....................AT.......................................\", n_fwd=0, n_rev=16)\n\n vcf_expect = svc_driver.call().with_output_vcf().record_count(2)\n\n vcf_expect\\\n .has_record_for_variant(Variant(\"1\", 20, \"G\", \"A\"))\\\n .with_info().with_field(\"DP\", [55]).with_field(\"VC\", [39])\n\n vcf_expect\\\n .has_record_for_variant(Variant(\"1\", 20, \"G\", \"GAT\"))\\\n .with_info().with_field(\"DP\", [55]).with_field(\"VC\", [16])\n\n def test_snp_and_deletion_at_same_location_should_have_expected_coverage(self):\n svc_driver = SVCDriver(self)\\\n .with_ref_sequence(\n \"TAGAATTGTTTGAGCTCTTTGTATTTCCTGTTATTAATCCCTTGTCAGAAGGGTCGTTTG\",)\\\n .with_read(\n \"....................A.......................................\", n_fwd=27, n_rev=12)\\\n .with_read(\n \".....................**.....................................\", n_fwd=0, n_rev=16)\n\n vcf_expect = svc_driver.call().with_output_vcf().record_count(2)\n\n vcf_expect\\\n .has_record_for_variant(Variant(\"1\", 20, \"G\", \"A\"))\\\n .with_info().with_field(\"DP\", [55]).with_field(\"VC\", [39])\n\n vcf_expect\\\n .has_record_for_variant(Variant(\"1\", 20, \"GTA\", \"G\"))\\\n .with_info().with_field(\"DP\", [55]).with_field(\"VC\", [16])\n", "id": "6580981", "language": "Python", "matching_score": 3.4632205963134766, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_info_annotation.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestNonCanincalBases(BaseTest):\n def test_should_not_output_snp_with_unknown_character_in_ref(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATNGATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"..............T.........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_unknown_character_in_alt(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATGGATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"..............N.........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_unknown_base_before_insertion(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATN*ATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"...............T........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_unknown_base_before_deletion(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATNTATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"...............*........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_non_canonical_character_in_ref(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATKGATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"..............T.........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_non_canonical_character_in_alt(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATGGATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"..............K.........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_non_canonical_base_before_insertion(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATK*ATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"...............T........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_output_snp_with_non_canonical_base_before_deletion(self):\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n \"ATCGATCGATCGATKTATCGATCGATCGATCGATCGATCG\"\n ).with_read(\n \"...............*........................\", n_fwd=10, n_rev=10\n )\n\n expect = svc_driver.call()\n expect.with_output_vcf().record_count(0)\n", "id": "10735564", "language": "Python", "matching_score": 1.9373127222061157, "max_stars_count": 8, "path": "test/wecall_acceptance/malformed_inputs/test_non_canonical_bases.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.sequence_bank import SequenceBank\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestBAMWithoutDataOnChromosome(BaseTest):\n def test_should_be_able_to_process_BAM_files_with_missing_data_on_chromosomes(self):\n driver = SVCDriver(self) \\\n .with_ref_sequence(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', 0, chrom=\"1\") \\\n .with_ref_sequence(\n 'CGGCGGTAAAACGGAGCCCCAAGCTTTTTTCAAAACATGG', 0, chrom=\"2\") \\\n .with_read(\n '..................T.....................', n_fwd=10, n_rev=10, chrom='1')\n\n expect = driver.call(expected_success=True)\n expect.attempt_to_load_invalid_contig_warning(\"2\")\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(1)\n\n\nclass TestSingleSampleBAMWithoutReadGroup(BaseTest):\n def test_should_use_sample_name_if_available(self):\n chrom = '14'\n\n sequence_bank = SequenceBank(ReferenceChromosome(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', 0, chrom))\n sequence_bank.add_sequence(\n ' ...........A............. ', n_fwd=10, n_rev=10)\n\n driver = SVCDriver(self).with_ref_sequence(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', chrom=chrom)\\\n .with_bam_data('pi.bam', {'sample': sequence_bank}, True)\n\n expect = driver.call()\n\n expect.with_output_vcf().record_count(1).with_samples(['sample'])\n\n def test_should_use_filename_when_no_sample_name_available(self):\n chrom = '14'\n\n sequence_bank = SequenceBank(ReferenceChromosome(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', 0, chrom))\n sequence_bank.add_sequence(\n ' ...........A............. ', n_fwd=10, n_rev=10)\n\n driver = SVCDriver(self).with_ref_sequence(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', chrom=chrom\n ).with_bam_data('pi.bam', {'sample': sequence_bank}, False)\n\n expect = driver.call()\n\n expect.with_output_vcf().record_count(1).with_samples(['pi'])\n", "id": "6637780", "language": "Python", "matching_score": 1.7860997915267944, "max_stars_count": 8, "path": "test/wecall_acceptance/malformed_inputs/test_malformed_BAM.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestVarFilterIDs(BaseTest):\n def test_should_error_with_not_allowed_var_filter_id(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"JONNY\", \"SB\") \\\n .with_verbosity(0)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n )\n expect = svc.call(expected_success=False)\n expect.incorrect_var_ids_error(\"JONNY\")\n\n def test_should_error_with_not_allowed_var_filter_ids(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"JONNY\", \"ANDY\", \"SB\", \"EDWARD\", \"STEFANIE\") \\\n .with_verbosity(0)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n )\n expect = svc.call(expected_success=False)\n expect.incorrect_var_ids_error(\"JONNY\", \"ANDY\", \"EDWARD\", \"STEFANIE\")\n", "id": "7434805", "language": "Python", "matching_score": 0.8596725463867188, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_var_filter_ids.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bedutils.bedwriter import BEDWriterContextManager\nfrom wecall.utils.interval import ChromInterval\nfrom wecall.wecall_utils.log_utils import log_timing_parser\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\nimport os\n\n\nclass TestVariantCallerTimings(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.bed_filename = os.path.join(self.work_dir, \"_.bed\")\n self.log_filename = os.path.join(self.work_dir, \"_.log\")\n\n chrom = \"1\"\n\n with BEDWriterContextManager(self.bed_filename) as bed_file:\n bed_file.write_chrom_interval(ChromInterval(chrom, 0, 42))\n\n self.output_vcf = os.path.join(self.work_dir, \"output.vcf\")\n self.svc_driver = SVCDriver(self)\n self.svc_driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", chrom=chrom\n )\n self.svc_driver.with_read(\n \" ..............C............. \", chrom=chrom\n )\n self.svc_driver.with_log_timings(True)\n self.svc_driver.with_region_string(self.bed_filename)\n self.svc_driver.with_log_filename(self.log_filename)\n self.svc_driver.with_output_vcf_filename(self.output_vcf)\n\n def __outputs_timings_for_files(self, expected_files):\n observed_files = set()\n\n with open(self.log_filename, \"r\") as log_file:\n timing_data = log_timing_parser(log_file)\n self.assertGreater(len(timing_data), 0)\n for timing_data_item in timing_data:\n self.assertEqual(\"IO\", timing_data_item.timing_type)\n self.assertEqual(\"us\", timing_data_item.length_units)\n self.assertIn(\"file\", timing_data_item.metadata)\n observed_files.add(timing_data_item.metadata[\"file\"])\n\n for expected in expected_files:\n self.assertIn(expected, observed_files)\n\n def test_should_contain_timings_output_for_bam(self):\n self.svc_driver.with_bam_filenames(\n [os.path.join(self.work_dir, \"ba.bam\")])\n self.svc_driver.call()\n self.__outputs_timings_for_files(\n {os.path.join(self.work_dir, \"ba.bam\")})\n\n def test_should_contain_timings_output_for_fasta_file(self):\n ref = os.path.join(self.work_dir, \"ref.fa\")\n self.svc_driver.with_ref_filename(ref)\n\n self.svc_driver.call()\n self.__outputs_timings_for_files({ref})\n\n def test_should_contain_timings_output_for_fasta_index_file(self):\n ref = os.path.join(self.work_dir, \"ref.fa\")\n self.svc_driver.with_ref_filename(ref)\n\n self.svc_driver.call()\n self.__outputs_timings_for_files({ref + \".fai\"})\n\n def test_should_contain_timings_output_for_vcf(self):\n self.svc_driver.call()\n self.__outputs_timings_for_files({self.output_vcf})\n\n def test_should_contain_timings_output_for_vcf_when_variant_caller_run_in_parallel(self):\n self.svc_driver.with_number_of_jobs(1)\n self.svc_driver.with_work_dir(\n os.path.join(self.work_dir, \"vc_work_dir\"))\n\n self.svc_driver.call()\n self.__outputs_timings_for_files({self.output_vcf})\n\n def test_should_contain_timings_output_for_bed(self):\n self.svc_driver.call()\n self.__outputs_timings_for_files({self.bed_filename})\n", "id": "5478335", "language": "Python", "matching_score": 3.0034778118133545, "max_stars_count": 8, "path": "test/wecall_acceptance/wecall_runner/test_variant_caller_timings.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nfrom unittest import expectedFailure\n\nfrom wecall.bedutils.bedwriter import BEDWriterContextManager\nfrom wecall.utils.interval import ChromInterval\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestBedFileInput(BaseTest):\n\n @expectedFailure\n def test_bed_extension_is_not_required(self):\n region_filename = os.path.join(self.work_dir, \"bed.txt\")\n\n with BEDWriterContextManager(region_filename) as bed:\n bed.write_chrom_interval(ChromInterval(\"1\", 0, 29))\n\n svc = SVCDriver(self).with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\", chrom=\"1\"\n ).with_read(\n \"....G...................\", n_rev=10, n_fwd=10\n ).with_region_string(region_filename)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1)\n\n def test_should_run_if_interval_not_contained_in_reference(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\", chrom=\"1\"\n ).with_read(\n \"....G...................\", chrom=\"1\"\n ).with_bed_file(\n ['1\\t24\\t28']\n ).with_region_padding(0)\n\n svc.call(True).with_output_vcf().record_count(0)\n\n def test_should_warn_user_if_contigs_provided_not_in_reference(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\", chrom=\"1\"\n ).with_read(\n \"....G...................\", chrom=\"1\"\n ).with_bed_file([\"42\\t1\\t5\"])\n\n expect = svc.call(True)\n\n expect.with_log().bed_file_contains_contigs_that_are_not_present_in_the_reference_warning(\"42\")\n\n def test_should_warn_even_if_some_contigs_not_in_reference(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\", chrom=\"1\"\n ).with_read(\n \"....G...................\", chrom=\"1\"\n ).with_bed_file([\"1\\t0\\t10\", \"42\\t21881\\t22032\"])\n\n expect = svc.call(True)\n\n expect.with_log().bed_file_contains_contigs_that_are_not_present_in_the_reference_warning(\"42\")\n\n def test_ok_if_multiple_regions_not_contained_in_reference(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\", chrom=\"1\"\n ).with_read(\n \"....G...................\", chrom=\"1\"\n ).with_bed_file(\n [\"21\\t0\\t10\", \"42\\t21881\\t22032\", \"42\\t22032\\t22033\"]\n )\n\n expect = svc.call(True)\n\n expect.with_log().bed_file_contains_contigs_that_are_not_present_in_the_reference_warning(\"21\", \"42\")\n\n def test_disallow_mixing_bed_files_and_region_strings(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\"\n ).with_read(\n \"....G...................\"\n ).with_region_string(\"some_bed_file.bed,1:1-10\")\n\n expect = svc.call(False)\n\n expect.regions_contains_both_bedfile_and_region_string_error()\n\n def test_disallow_mixing_bed_files_and_region_strings2(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\"\n ).with_read(\n \"....G...................\"\n ).with_region_string(\"1:1-10,some_bed_file.bed\")\n\n expect = svc.call(False)\n\n expect.regions_contains_both_bedfile_and_region_string_error()\n\n def test_error_if_input_regions_file_does_not_exist(self):\n bed_file_name = \"some_bed_file.bed\"\n\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"ACGTACGTACGTACGTACGTACGT\"\n ).with_read(\n \"....G...................\"\n ).with_region_string(bed_file_name)\n\n expect = svc.call(False)\n\n expect.bedfile_does_not_exist_error(bed_file_name)\n", "id": "3900175", "language": "Python", "matching_score": 3.0043652057647705, "max_stars_count": 8, "path": "test/wecall_acceptance/regions_specification/test_bed_file_format.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall.bedutils.bedwriter import BEDWriterContextManager, BEDIndexer\nimport os\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall.utils.interval import Interval, ChromInterval\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\n\n\nclass TestCallingInsideBedRegion(AsciiWecallRunnerTest):\n def calls_variants_in_bed_region(\n self,\n ref,\n sequence_list,\n expected_ascii_haplotypes=None,\n expected_variant_stubs=None,\n bed_regions=[],\n compress=False\n ):\n\n bed_filename = os.path.join(self.work_dir, \"test.bed\")\n with BEDWriterContextManager(bed_filename) as bed_writer:\n # TODO - link chromosomes\n for region in bed_regions:\n bed_writer.write_chrom_interval(ChromInterval(DEFAULT_CHROM, region.start, region.end))\n\n if compress:\n indexer = BEDIndexer(bed_filename)\n indexer.index()\n bed_filename = indexer.compressed_filename\n\n self.calls_variants(\n ref,\n sequence_list,\n expected_ascii_haplotypes,\n expected_variant_stubs,\n n_fwd=10,\n n_rev=10,\n config_dict={\n \"regions\": bed_filename,\n \"allowMNPCalls\": \"True\"})\n\n def test_should_call_variants_when_whole_read_within_region(self):\n self.calls_variants_in_bed_region(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\".T...................\",\n \"...........C.........\"],\n [\".T...................\",\n \"...........C.........\"],\n\n bed_regions=[Interval(0, 12)]\n )\n\n def test_should_call_variants_when_whole_read_within_region_with_compressed_bed_file(self):\n self.calls_variants_in_bed_region(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\".T...................\",\n \"...........C.........\"],\n [\".T...................\",\n \"...........C.........\"],\n\n bed_regions=[Interval(0, 12)], compress=True\n )\n\n @expectedFailure\n def test_should_call_only_variant_that_is_within_region(self):\n self.calls_variants_in_bed_region(\n \"AAAAAAAAACGCCCCCTGCAAAAAAAAAA\",\n [\".........T...................\",\n \"..................T..........\"],\n\n [\".........T...................\",\n \".............................\", ],\n\n bed_regions=[Interval(8, 15)],\n )\n\n def test_should_call_variants_with_touching_and_unsorted_regions(self):\n self.calls_variants_in_bed_region(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n [\"......T...T..........\"],\n expected_variant_stubs=[(6, \"CCTGC\", \"TCTGT\")],\n bed_regions=[Interval(7, 12), Interval(2, 7), Interval(4, 5)]\n )\n", "id": "11218827", "language": "Python", "matching_score": 2.8970303535461426, "max_stars_count": 8, "path": "test/wecall_acceptance/regions_specification/test_calls_inside_bed_file_regions.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\n\n\nclass TestLeftAlignOutOfRead(AsciiWecallRunnerTest):\n\n def test_should_not_call_unleftalignable_insertion(self):\n self.calls_variants(\n \"CTAGAAAAAAAAAAAA*AATTAAAAAAAAAAACAGATAACAAACCC\",\n [\" ....A.......................... \",\n \" ......A....................... \",\n \" ,,,,a,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" ,,,,,,,,,a,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[]\n )\n\n def test_should_call_left_aligned_insertion_when_inside_read(self):\n self.calls_variants(\n \"CTAGAAAAAAAAAAAA*AATTAAAAAAAAAAACAGATAACAAACCC\",\n [\"................A.......................... \",\n \"................A....................... \",\n \",,,,,,,,,,,,,,,,a,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,,,,,,,,,a,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[(3, \"G\", \"GA\")]\n )\n\n def test_should_not_call_unleftalignable_deletion(self):\n self.calls_variants(\n \"CTAGAAAAAAAAAAAAAAATTAAAAAAAAAAACAGATAACAAACCC\",\n [\" ....*.......................... \",\n \" ......*....................... \",\n \" ,,,,*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" ,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[]\n )\n\n def test_should_call_left_aligned_deletion_when_inside_read(self):\n self.calls_variants(\n \"CTAGAAAAAAAAAAAAAAATTAAAAAAAAAAACAGATAACAAACCC\",\n [\"................*.......................... \",\n \"................*....................... \",\n \",,,,,,,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \",,,,,,,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[(3, \"GA\", \"G\")]\n )\n\n def test_should_not_call_insertion_at_start_of_reads(self):\n self.calls_variants(\n \"CTAGCTGACA*ACTATTAAAAAAAAAAAAACAGATAACAAACCCCCCC\",\n [\" T.................................. \",\n \" T............................... \",\n \" t,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" t,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[]\n )\n\n def test_should_not_call_insertion_at_end_of_reads(self):\n self.calls_variants(\n \"CTAGCTGACAACTATTAAAAAAAAAAAAACAGATAACAA*ACCCCCCC\",\n [\" .................................T \",\n \" ...............................T \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,t \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,t \"],\n expected_variant_stubs=[]\n )\n\n def test_should_not_call_deletion_at_start_of_reads(self):\n self.calls_variants(\n \"CTAGCTGACATACTATTAAAAAAAAAAAAACAGATAACAAACCCCCCC\",\n [\" *.................................. \",\n \" *............................... \",\n \" *,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" *,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[]\n )\n\n def test_should_not_call_deletion_at_end_of_reads(self):\n self.calls_variants(\n \"CTAGCTGACAACTATTAAAAAAAAAAAAACAGATAACAATACCCCCCC\",\n [\" .................................* \",\n \" ...............................* \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,* \",\n \" ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,* \"],\n expected_variant_stubs=[]\n )\n\n def test_should_call_snp_at_start_of_reads(self):\n self.calls_variants(\n \"CTAGCTGACAGACTATTAAAAAAAAAAAAACAGATAACAAACCCCCCC\",\n [\" T.................................. \",\n \" T............................... \",\n \" t,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\",\n \" t,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \"],\n expected_variant_stubs=[(10, \"G\", \"T\")]\n )\n", "id": "5171670", "language": "Python", "matching_score": 1.664785623550415, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_indels_at_edge_of_reads.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\n\n\nclass TestBlockBoundariesCluster(AsciiWecallRunnerTest):\n def calls_variants_with_defined_block_size(self, ref, sequence_list, expected_variants=None):\n self.calls_variants(\n ref,\n sequence_list,\n config_dict={\n \"maxBlockSize\": self.max_block_size,\n \"maxClusterDist\": 3,\n \"minClusterDist\": 3},\n expected_ascii_haplotypes=None,\n expected_variant_stubs=expected_variants,\n n_fwd=20,\n n_rev=20)\n\n def test_should_call_del_and_snp_either_side_of_border(self):\n self.max_block_size = 20\n self.calls_variants_with_defined_block_size(\n # 0123456789012345678901234567890123456789\"\n \"ACGCTCACTGACGCTCACTGATACTGACTGATCGCTGGTT\",\n [\"T................*G*T*C.................\"], # input\n\n [\n (0, \"A\", \"T\"), (16, \"ACT\", \"A\"), (19, \"GA\", \"G\"), (22, \"A\", \"C\"),\n ]\n )\n\n def test_should_call_snp_at_the_end_of_the_reference(self):\n self.max_block_size = 10\n self.calls_variants_with_defined_block_size(\n # 012345678901234567890123456789\"\n \"ACGCTCACTGATACTGACTGATCGCTGGTT\",\n [\".............................C\"], # input\n\n [(29, \"T\", \"C\"), ]\n )\n\n def test_should_call_snp_at_the_begin_of_the_reference(self):\n self.max_block_size = 10\n self.calls_variants_with_defined_block_size(\n # 012345678901234567890123456789\"\n \"ACGCTCACTGATACTGACTGATCGCTGGTT\",\n [\"T.............................\"], # input\n\n [(0, \"A\", \"T\"), ]\n )\n\n def test_should_left_align_isolated_del_across_boundary_when_there_are_nearby_variants_on_left(self):\n self.max_block_size = 20\n self.calls_variants_with_defined_block_size(\n # 0123456789012345678901234567890123456789\"\n \"ACGCTCACTTACGCTCACTTTTTCTGACTGATCGCTGGTT\",\n [\"T...............T.....*.................\"], # input\n\n [(0, \"A\", \"T\"), (16, \"A\", \"T\"), (17, \"CT\", \"C\")]\n )\n", "id": "12512668", "language": "Python", "matching_score": 2.24535870552063, "max_stars_count": 8, "path": "test/wecall_acceptance/regions_specification/test_block_boundaries.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestVariantClustering(AsciiWecallRunnerTest):\n\n def calls_variants_with_coverage_20(\n self, ref, sequence_list, expected_variants=None):\n self.calls_variants(\n ref, sequence_list, expected_ascii_haplotypes=None,\n expected_variant_stubs=expected_variants,\n n_fwd=20, n_rev=20\n )\n\n def test_calls_eight_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.................\"]\n )\n\n def test_calls_nine_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C..............\"]\n )\n\n def test_calls_ten_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*...........\"]\n )\n\n def test_calls_eleven_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T........\"]\n )\n\n def test_calls_twelve_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*......\"]\n )\n\n def test_calls_thirteen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAAAAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.........\"]\n )\n\n def test_calls_fourteen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.......\"]\n )\n\n def test_calls_fifteen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAAAAAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.T........\"]\n )\n\n def test_calls_sixteen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAATAACAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.T.*......\"]\n )\n\n def test_calls_seventeen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAATAACAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.T.*.T....\"]\n )\n\n def test_calls_eighteen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAATAAATAACAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.T.*.T.*.......\"]\n )\n\n def test_calls_nineteen_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAATAAATAAACAAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.T.*.T.*.T.......\"]\n )\n\n def test_calls_twenty_variants(self):\n self.calls_variants_with_coverage_20(\n \"ACGCTCACTGCAGTCGTTAGAAATAAATAAATAAATAAATACAAATT\",\n [\".T.*.A.*.A..*.G.*.C.*.T.*.T.*.T.*.T.*.T.*......\"]\n )\n\n\nclass TestSNPClustering(AsciiWecallRunnerTest):\n\n def calls_variants_with_coverage_20_no_MNPs(\n self, ref, sequence_list, expected_variants=None):\n self.calls_variants(\n ref, sequence_list,\n config_dict={\"allowMNPCalls\": False},\n expected_ascii_haplotypes=None,\n expected_variant_stubs=expected_variants,\n n_fwd=20, n_rev=20\n )\n\n def test_calls_eight_SNPs(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\".T.T.A.G.A..C.G.A....................\"]\n )\n\n def test_calls_eight_adjecent_SNPs(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\".TCTAAGGC............................\"]\n )\n\n def test_calls_nine_SNPs(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\".T.T.A.G.A..C.G.A..C.................\"]\n )\n\n def test_calls_nine_adjacent_SNPs(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\"............CGGTACGCT................\"]\n )\n\n def test_calls_twenty_adjacent_SNPs(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGTTAGTGCAATGAAAAAAAAAA\",\n [\"........ACTCCGTACGCTACGTGCTC.........\"]\n )\n\n\nclass TestCallingTwoNearbyClusters(AsciiWecallRunnerTest):\n\n def calls_variants_with_coverage_20_no_MNPs(\n self, ref, sequence_list, expected_variants=None):\n self.calls_variants(\n ref, sequence_list,\n config_dict={\"maxClusterDist\": 5, \"allowMNPCalls\": False},\n expected_ascii_haplotypes=None,\n expected_variant_stubs=expected_variants,\n n_fwd=20, n_rev=20\n )\n\n def test_calls_two_sets_of_eight_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTTCCGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\".TGTGATGG.............GCTTCGTC............\"]\n )\n\n def test_calls_two_sets_of_nine_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTTCCGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\".TGTGATGGT............GCTTCGTCT...........\"]\n )\n\n def test_calls_two_sets_of_ten_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTTCCGTCGTTAGAAAAAAAAAAAAAAAAA\",\n [\".TGTGATGGTG...........GCTTCGTCTG..........\"]\n )\n\n def test_calls_two_sets_of_eleven_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCCGTAGTTAGAAAAAAAAAAAAAAAAA\",\n [\".TGTGATGGTGT.........GCTTCGTCTGT.........\"]\n )\n\n def test_calls_two_sets_of_twelve_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGCTGGTTAGAAAAAAAAAAAAAAAAA\",\n [\".TGTGATGGTGTC........GCTTCGTCTGTC........\"]\n )\n\n\nclass TestCallingThreeNearbyClusters(AsciiWecallRunnerTest):\n\n def calls_variants_with_coverage_20_no_MNPs(\n self, ref, sequence_list, expected_variants=None):\n self.calls_variants(\n ref, sequence_list,\n config_dict={\"maxClusterDist\": 5, \"allowMNPCalls\": False},\n expected_ascii_haplotypes=None,\n expected_variant_stubs=expected_variants,\n n_fwd=20, n_rev=20\n )\n\n def test_calls_three_sets_of_eight_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGCTGACTGATCGCTGGTTAGAAAAAAAACCTGACTGAGTACAAAAAAAAACGTAGTACGTA\",\n [\".TGTGATGG....................GCTTCGTC..................GGTCTCGT..........\"])\n\n def test_calls_three_sets_of_nine_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGCCTGACTGATGGTTAGAAAAAAAACGCTGACTGATACAAAAAAAAACGTAGTACGTA\",\n [\".TGTGATGGT...................GCTTCGTCT.................GGTCTCGTT.........\"])\n\n def test_calls_three_sets_of_ten_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"AAAAAAAACGCTCACTGCAGTCGCCTGACTGATGGTTAGAAAAAAAACGTCTGACTGAACAAAAAAAAACGTAGTACGTA\",\n [\" .TGTGATGGTG..................GCTTCGTCTG................GGTCTCGTTG........\"])\n\n def test_calls_three_sets_of_eleven_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGCCTGACTGATGGTTAGAAAAAAAACGTACCTGACTGAAAAAAAAAACGTAGTACGTA\",\n [\".TGTGATGGTGG.................GCTTCGTCTGG...............GGTCTCGTTGG.......\"])\n\n def test_calls_three_sets_of_twelve_SNPs_in_separate_clusters(self):\n self.calls_variants_with_coverage_20_no_MNPs(\n \"ACGCTCACTGCAGTCGCTCTGACTGAGGTTAGAAAAAAAACGTACTGACTGACAAAAAAAAACGTAGTACGTA\",\n [\".TGTGATGGTGGT................GCTTCGTCTGGT..............GGTCTCGTTGGT......\"])\n\n\nclass TestCallingOnVeryLongVariantClusters(BaseTest):\n\n def test_call_on_ridiculous_snp_cluster(self):\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AGCTAGCGCTAGCGCTCGACAGATCGAGATAGCCGGGCTAAGATTAGATCGCGATGCGATGCACGTACGCATGCATACGA\"\n ).with_read(\n \"CAGGCATATGCATATGTACTCACGTACACGCATTAAATGCCACGGCACGTATACGATACGATCTAGCTATCGATCGCTAC\", n_fwd=20, n_rev=20\n ).with_allow_MNP_calls(False)\n\n expect = driver.call()\n\n expect \\\n .with_output_vcf() \\\n .record_count(80)\n\n\nclass TestCallingWithIndelsInRepetitiveSequence(BaseTest):\n def test_indel_that_gets_left_aligned_over_snp_gets_correct_genotype(self):\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"ATCGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAGATTACAATCG\"\n ).with_read(\n \"............................................T*******....\", n_fwd=10, n_rev=10, sample_name=\"sample\"\n ).with_read(\n \"..G******...............................................\", n_fwd=10, n_rev=10, sample_name=\"sample\")\n\n expect = driver.call()\n expect\\\n .with_output_vcf()\\\n .has_record_for_variants(\n Variant(\"1\", 1, \"TCGATTA\", \"T\"),\n Variant(\"1\", 2, \"CGATTACA\", \"C\"),\n Variant(\"1\", 8, \"C\", \"G\"),\n Variant(\"1\", 51, \"A\", \"T\"),\n ).with_sample(\"sample\").has_phased_genotypes('1|.', '.|1', '1|.', '0|1')\n\n\nclass TestAllowMNPCallsParameter(BaseTest):\n\n def test_calls_SNPs_when_not_allowed_to_call_MNPs(self):\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAAAAAAA\"\n ).with_read(\n \"............CGGTACGCT................\", n_fwd=20, n_rev=20\n ).with_allow_MNP_calls(False)\n\n expect = driver.call()\n\n expect \\\n .with_output_vcf() \\\n .record_count(9)\n\n def test_calls_MNPs_when_allowed_to_call_MNPs(self):\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"ACGCTCACTGCAGTCGTTAGAAAAAAAAAAAAAAAAA\"\n ).with_read(\n \"............CGGTACGCT................\", n_fwd=20, n_rev=20\n ).with_allow_MNP_calls(True)\n\n expect = driver.call()\n\n expect \\\n .with_output_vcf() \\\n .record_count(1)\n", "id": "11342558", "language": "Python", "matching_score": 5.457340240478516, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_variant_clustering.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestMNPCalling(AsciiWecallRunnerTest):\n @expectedFailure\n def test_should_call_mnp_at_the_end_of_read(self):\n expected_variant_stubs = {\n (3, \"CTT\", \"C\"),\n (18, \"GT\", \"G\"),\n (25, \"C\", \"T\"),\n # this MNP is not called and neither are the composing SNPs\n (37, \"GCCG\", \"ACCT\"),\n (45, \"CTT\", \"C\"),\n (53, \"T\", \"TCTG\")\n }\n\n self.calls_variants(\n \"AACCTTGGACGTTATTCTGTCAATGCATCCCATTGCCGCCGCAACCTTGGACGT***TATTCTGTC\",\n [\" ...**.............*.....T...........A..T. ...**......CTG.........\", ],\n n_fwd=10, n_rev=10,\n expected_variant_stubs=expected_variant_stubs\n )\n\n def test_calls_mnp_formed_by_overlapping_reads(self):\n sn = \"a_sample\"\n\n svc_driver = SVCDriver(self).with_allow_MNP_calls(True)\n svc_driver.with_ref_sequence(\n \"AACCTTGGACGTTATTCTGTCAATGCATCCCATTGCCGCCGCAACCTTGGACGTTATTCTGTC\", chrom=\"1\"\n ).with_read(\n \"..................T.. ....C.......C............................\", sample_name=sn, n_fwd=3, n_rev=3\n ).with_read(\n \"..................T.......C... ...C............................\", sample_name=sn, n_fwd=3, n_rev=3\n ).with_output_phased_genotypes(True)\n\n svc_driver.call().with_output_vcf()\\\n .has_record_for_variant(Variant(\"1\", 18, \"GTCAATGCATCCCATTG\", \"TTCAATGCCTCCCATTC\"))\\\n .with_sample(sn)\\\n .has_genotype(\"1|1\")\n", "id": "2673208", "language": "Python", "matching_score": 3.3016719818115234, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_calls_mnps.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\nref_alt = \"<NON_REF>\"\n\n\nclass TestCallingWithPloidy3(BaseTest):\n def test_should_call_variants(self):\n chrom = 'chr1'\n sample_name = 'sample'\n svc = SVCDriver(self) \\\n .with_ploidy(3)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTC***AACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"......C.........G.....ATG.......***.........\", n_rev=10, n_fwd=10, chrom=chrom, sample_name=sample_name\n ).with_read(\n \"......C...............ATG.......***.........\", n_rev=10, n_fwd=10, chrom=chrom, sample_name=sample_name\n ).with_read(\n \"......................ATG.......***.........\", n_rev=10, n_fwd=10, chrom=chrom, sample_name=sample_name)\n\n expect = svc.call()\n\n vcf = expect \\\n .with_output_vcf() \\\n .record_count(4)\n\n vcf.has_record_for_variant(Variant(chrom, 6, 'T', 'C')).with_sample(sample_name).has_genotype('0/1/1')\n vcf.has_record_for_variant(Variant(chrom, 16, 'T', 'G')).with_sample(sample_name).has_genotype('0/0/1')\n vcf.has_record_for_variant(Variant(chrom, 21, 'C', 'CATG')).with_sample(sample_name).has_genotype('1/1/1')\n vcf.has_record_for_variant(Variant(chrom, 28, 'TTAC', 'T')).with_sample(sample_name).has_genotype('1/1/1')\n\n def test_should_support_refcalls(self):\n chrom = 'chr1'\n sample_name = 'sample'\n svc = SVCDriver(self) \\\n .with_ploidy(3) \\\n .with_output_ref_calls(True)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCTCAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\", n_rev=10, n_fwd=10, chrom=chrom, sample_name=sample_name\n )\n\n expect = svc.call()\n\n vcf = expect \\\n .with_output_vcf() \\\n .record_count(1)\n\n vcf.has_record_for_variant(Variant(chrom, 0, 'A', ref_alt)).with_sample(sample_name).has_genotype('0/0/0')\n", "id": "4441638", "language": "Python", "matching_score": 4.316399574279785, "max_stars_count": 8, "path": "test/wecall_acceptance/ploidy/test_ploidy_3.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestCallingVariantWithLowPercentageSupport(BaseTest):\n def test_somatic_call_on_homozygous_ref_background(self):\n chrom = \"1\"\n somatic_sample = \"andy\"\n\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', chrom=chrom\n ).with_read(\n '........................................', n_rev=85, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_read(\n '..................T.....................', n_rev=15, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_ploidy(3)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(1) \\\n .has_record_for_variant(Variant(chrom, 18, \"C\", \"T\")) \\\n .with_sample(somatic_sample)\n\n def test_somatic_call_on_homozygous_alt_background(self):\n chrom = \"1\"\n somatic_sample = \"andy\"\n\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', chrom=chrom\n ).with_read(\n '..........*.............................', n_rev=85, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_read(\n '..........*.......T.....................', n_rev=15, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_ploidy(3)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 18, \"C\", \"T\")) \\\n .with_sample(somatic_sample)\n\n def test_somatic_call_on_het_alt_background(self):\n chrom = \"1\"\n somatic_sample = \"andy\"\n\n svc_driver = SVCDriver(self)\n svc_driver.with_ref_sequence(\n 'CGGCGGTCGAACGGAGCCCCAAGCGAAGCTCAAAACATGG', chrom=chrom\n ).with_read(\n '..........T.............................', n_rev=50, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_read(\n '..........*.............................', n_rev=35, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_read(\n '..........*.......T.....................', n_rev=15, n_fwd=0, chrom=chrom, sample_name=somatic_sample\n ).with_ploidy(3)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 18, \"C\", \"T\")) \\\n .with_sample(somatic_sample)\n", "id": "3805027", "language": "Python", "matching_score": 2.86371111869812, "max_stars_count": 8, "path": "test/wecall_acceptance/somatic_variant_calls/test_calling_variant_with_low_percentage_support.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nref_alt = \"<NON_REF>\"\n\n\nclass TestRefCallingMinDepthComputation(BaseTest):\n def test_depth_computation_all_reads_spanning_reference(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \".........................................\", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True)\n\n expect = driver.call()\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n def test_depth_computation_all_reads_spanning_reference_with_one_snp(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \"................T........................\", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True).with_allow_MNP_calls(False)\n\n expect = driver.call()\n vcf_expect = expect.with_output_vcf()\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 17, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n def test_depth_computation_all_reads_spanning_reference_with_variants(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \"................T.T......................\", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True).with_allow_MNP_calls(False)\n\n expect = driver.call()\n vcf_expect = expect.with_output_vcf()\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 17, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 19, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n def test_depth_computation_all_reads_spanning_reference_with_insertion(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAC*AAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \"................T.......................\", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True).with_allow_MNP_calls(False)\n\n expect = driver.call()\n vcf_expect = expect.with_output_vcf()\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 15, \"C\", \"CT\"))\\\n .with_sample(sample_name).has_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 16, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n def test_depth_computation_all_reads_spanning_reference_with_deletion(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAACACAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \"................*.......................\", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True).with_allow_MNP_calls(False)\n\n expect = driver.call()\n vcf_expect = expect.with_output_vcf()\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 15, \"CA\", \"C\"))\\\n .with_sample(sample_name).has_read_depth(10)\n\n vcf_expect \\\n .has_record_for_variant(Variant(chrom, 17, \"C\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n def test_min_depth_computation_with_mixed_depth_of_reads(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \".............................. \", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_read(\n \" ..............................\", n_rev=3, n_fwd=3, sample_name=sample_name\n ).with_output_ref_calls(True)\n\n expect = driver.call()\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 10, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(16).has_min_read_depth(16)\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 30, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(6).has_min_read_depth(6)\n\n def test_min_depth_computation_start_boundary_conditions(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \" .......................................\", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True)\n\n expect = driver.call()\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(0).has_min_read_depth(0)\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 1, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n def test_min_depth_computation_end_boundary_conditions(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \"....................................... \", n_rev=5, n_fwd=5, sample_name=sample_name\n ).with_output_ref_calls(True)\n\n expect = driver.call()\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(10).has_min_read_depth(10)\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 39, \"A\", ref_alt))\\\n .with_sample(sample_name).has_read_depth(0).has_min_read_depth(0)\n\n def test_min_depth_computation_with_mixed_depth_of_reads_when_no_chunking_occurs(self):\n sample_name = \"bah\"\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \"........................................\", n_rev=10, n_fwd=10, sample_name=sample_name\n ).with_read(\n \"................................... \", n_rev=2, n_fwd=2, sample_name=sample_name\n ).with_read(\n \" ................................... \", n_rev=1, n_fwd=1, sample_name=sample_name\n ).with_output_ref_calls(True)\n\n expect = driver.call()\n\n expect\\\n .with_output_vcf()\\\n .has_record_for_variant(Variant(chrom, 0, \"A\", ref_alt))\\\n .with_sample(sample_name)\\\n .has_read_depth(round(20 + 4 * 35 / 40 + 2 * 35 / 40))\\\n .has_min_read_depth(20)\n", "id": "5981631", "language": "Python", "matching_score": 2.2544772624969482, "max_stars_count": 8, "path": "test/wecall_acceptance/reference_calling/test_annotations.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestCallingUsingSkippedSequenceBasic(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.scv = SVCDriver(self)\n self.scv.with_turn_on_large_variant_calls(True)\n self.scv.with_verbosity(6)\n\n def test_should_call_variants_minimal_example(self):\n self.scv.with_verbosity(6)\n self.scv.with_ref_sequence(\n \"ATAAAAAATATGTACATAAAAATCAAAATCAAAGAAAGAACATGCAGTAGCTGAAAAAAAATATCTTCTCACCCTAAAACTGCTCTATGTTTTAAACTATTATTGCTAGGATCACTAGGACTTAGTAAAAAGCAATGCCTTACACAGGCAAC\", # noqa\n pos_from=10\n ).with_read(\n \" ..............***********........ \", # noqa\n cigar_string=\"14M8S\", read_mate_start=91, n_rev=2, n_fwd=2, read_start=66\n ).with_read(\n \" .......***********.............. \", # noqa\n cigar_string=\"7S14M\", read_start=91, n_rev=2, n_fwd=2, read_mate_start=66\n )\n expect = self.scv.call()\n expect.with_output_vcf() \\\n .has_record_for_variant(Variant(\"1\", 79, 'CACCCTAAAACT', 'C'))\n\n def test_should_call_variants_simple_example(self):\n l_ref_padding = \"CTTAAAGTGTAATAAAAAATATGTACATAAAAATCAAAATCAAAGAAAGAACATGCAGTAGCTGAAAAAAAATATCTTCTC\"\n r_ref_padding = \"GCTCTATGTTTTAAACTATTATTGCTAGGATCACTAGGACTTAGTAAAAAGCAATGCCTTACACAGGCAACAA\"\n overlap = \"AAAAGCAT\"\n remaining_deleted = \"ACCCTAAAACTTAGAGTATTCTCAATAAAAAAAAAAAAATTAAAAAAAAA\"\n seq_buffer = 4 # amount of soft clipping\n self.call_deletion(l_ref_padding, overlap, remaining_deleted, r_ref_padding, seq_buffer)\n\n def test_should_call_variants_simple_example_no_overlap(self):\n l_ref_padding = \"CTTAAAGTGTAATAAAAAATATGTACATAAAAATCAAAATCAAAGAAAGAACATGCAGTAGCTGAAAAAAAATATCTTCTC\"\n r_ref_padding = \"GCTCTATGTTTTAAACTATTATTGCTAGGATCACTAGGACTTAGTAAAAAGCAATGCCTTACACAGGCAACAA\"\n overlap = \"\"\n remaining_deleted = \"ACCCTAAAACTTAGAGTATTCTCAATAAAAAAAAAAAAATTAAAAAAAAA\"\n seq_buffer = 8 # amount of soft clipping\n self.call_deletion(l_ref_padding, overlap, remaining_deleted, r_ref_padding, seq_buffer)\n\n def test_should_call_variants_simple_example_some_overlap(self):\n l_ref_padding = \"CTTAAAGTGTAATAAAAAATATGTACATAAAAATCAAAATCAAAGAAAGAACATGCAGTAGCTGAAAAAAAATATCTTCTC\"\n r_ref_padding = \"GCTCTATGTTTTAAACTATTATTGCTAGGATCACTAGGACTTAGTAAAAAGCAATGCCTTACACAGGCAACAA\"\n overlap = \"GCAT\"\n remaining_deleted = \"ACCCTAAAACTTAGAGTATTCTCAATAAAAAAAAAAAAATTAAAAAAAAA\"\n seq_buffer = 6 # amount of soft clipping\n self.call_deletion(l_ref_padding, overlap, remaining_deleted, r_ref_padding, seq_buffer)\n\n def call_deletion(self, l_ref_padding, overlap, remaining_deleted, r_ref_padding, seq_buffer):\n chrom = '20'\n ref_start = 9696680\n sample_name = 'sample'\n\n reference_sequence = l_ref_padding + overlap + remaining_deleted + overlap + r_ref_padding\n\n del_str = \"*\" * (len(overlap) + len(remaining_deleted))\n\n alt_read_1 = l_ref_padding + overlap + del_str + \".\" * seq_buffer + \" \" * (len(r_ref_padding) - seq_buffer)\n ref_read_1 = l_ref_padding + overlap + \".\" * seq_buffer + \" \" * (len(r_ref_padding) - seq_buffer + len(del_str))\n\n alt_read_2 = \" \" * (len(l_ref_padding) - seq_buffer) + \".\" * seq_buffer + del_str + overlap + r_ref_padding\n ref_read_2 = \" \" * (len(l_ref_padding) - seq_buffer + len(del_str)) + \".\" * seq_buffer + overlap + r_ref_padding\n\n event_start = ref_start + len(l_ref_padding)\n event_end = event_start + len(overlap) + len(remaining_deleted)\n\n self.scv.with_ref_sequence(\n reference_sequence, chrom=chrom, pos_from=ref_start\n ).with_read(\n alt_read_1, n_fwd=2, n_rev=2, chrom=chrom, sample_name=sample_name, read_start=ref_start,\n cigar_string='{}M{}S'.format(len(l_ref_padding) + len(overlap), seq_buffer), read_mate_start=event_end\n ).with_read(\n alt_read_2, n_fwd=2, n_rev=2, chrom=chrom, sample_name=sample_name, read_start=event_end,\n cigar_string='{}S{}M'.format(seq_buffer, len(overlap) + len(r_ref_padding)), read_mate_start=ref_start\n ).with_read(\n ref_read_1, n_fwd=2, n_rev=2, chrom=chrom, sample_name=sample_name, read_start=ref_start,\n cigar_string='{}M'.format(len(l_ref_padding) + len(overlap) + seq_buffer), read_mate_start=event_end\n ).with_read(\n ref_read_2, n_fwd=2, n_rev=2, chrom=chrom, sample_name=sample_name, read_start=event_end - seq_buffer,\n cigar_string='{}M'.format(seq_buffer + len(overlap) + len(r_ref_padding)), read_mate_start=ref_start)\n\n expect = self.scv.call()\n\n base_before = reference_sequence[len(l_ref_padding) - 1:len(l_ref_padding)]\n\n variant = Variant(\n chrom,\n event_start - 1,\n base_before + overlap + remaining_deleted,\n base_before\n )\n\n expect.with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(sample_name)\\\n .has_genotype('0/1')\n", "id": "12203933", "language": "Python", "matching_score": 3.1358015537261963, "max_stars_count": 8, "path": "test/wecall_acceptance/calling_using_skipped_sequence/test_calling_with_skipped_sequence_basic.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestCallingForTrio(AsciiWecallRunnerTest):\n\n def test_multi_sample_variant_calling(self):\n reference = \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\"\n samples = {\n \"NA12878\": [\" ..............C............. \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \",\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............*............. \"],\n\n \"NA12891\": [\" ..............C............. \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \",\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............*............. \"],\n\n \"NA12892\": [\" ..............C............. \",\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \",\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \",\n \"..............*............. \"]\n }\n\n expected_haplotypes = {\n \"NA12878\": [\"..............*...........................\",\n \".....................C....................\"],\n\n \"NA12891\": [\"..............*...........................\",\n \".....................C....................\"],\n\n \"NA12892\": [\"..............*...........................\",\n \".....................C....................\"]\n }\n\n self.calls_variants_from_samples(\n reference, samples, expected_haplotypes)\n\n def test_multi_sample_variant_calling_with_some_regions_covered_in_only_one_sample_new(self):\n chrom = \"1\"\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTTCTCTAATTGGGTCACGTATGCATGACGTTGTGGGGAACCCCTGG\", chrom=chrom\n ).with_read(\n \" ..............C............. \", sample_name=\"NA12878\" # noqa\n ).with_read(\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \", sample_name=\"NA12878\" # noqa\n ).with_read(\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \", sample_name=\"NA12878\" # noqa\n ).with_read(\n \"..............*............. \", sample_name=\"NA12878\" # noqa\n ).with_read( # noqa\n \" ..............A................\", sample_name=\"NA12878\" # noqa\n ).with_read(\n \" ..............A................\", sample_name=\"NA12878\" # noqa\n ).with_read(\n \" ..............A................\", sample_name=\"NA12878\" # noqa\n ).with_read(\n \" ..............A................\", sample_name=\"NA12878\" # noqa\n ).with_read(\n \" ..............C............. \", sample_name=\"NA12891\" # noqa\n ).with_read(\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \", sample_name=\"NA12891\" # noqa\n ).with_read(\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \", sample_name=\"NA12891\" # noqa\n ).with_read(\n \"..............*............. \", sample_name=\"NA12891\" # noqa\n ).with_read(\n \" ..............C............. \", sample_name=\"NA12892\" # noqa\n ).with_read(\n \" ,,,,,,,,,,,,,,,,,,,c,,,,,,,,, \", sample_name=\"NA12892\" # noqa\n ).with_read(\n \" ,,,,,,,,,,*,,,,,,,,,,,,,,,,,,,,,, \", sample_name=\"NA12892\" # noqa\n ).with_read(\n \"..............*............. \", sample_name=\"NA12892\" # noqa\n )\n\n expect = driver.call()\n\n vcf = expect \\\n .with_output_vcf() \\\n .record_count(3)\n\n C_CA = vcf.has_record_for_variant(Variant(chrom, 13, \"CA\", \"C\"))\n C_CA.with_sample(\"NA12878\").has_genotype(\"0/1\")\n C_CA.with_sample(\"NA12891\").has_genotype(\"0/1\")\n C_CA.with_sample(\"NA12892\").has_genotype(\"0/1\")\n\n A_C = vcf.has_record_for_variant(Variant(chrom, 21, \"A\", \"C\"))\n A_C.with_sample(\"NA12878\").has_genotype(\"0/1\")\n A_C.with_sample(\"NA12891\").has_genotype(\"0/1\")\n A_C.with_sample(\"NA12892\").has_genotype(\"0/1\")\n\n T_A = vcf.has_record_for_variant(Variant(chrom, 69, \"T\", \"A\"))\n T_A.with_sample(\"NA12878\").has_genotype(\"1/1\")\n T_A.with_sample(\"NA12891\").has_genotype(\"./.\")\n T_A.with_sample(\"NA12892\").has_genotype(\"./.\")\n", "id": "7606102", "language": "Python", "matching_score": 3.8182921409606934, "max_stars_count": 8, "path": "test/wecall_acceptance/multi_sample_diploid/test_trio.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nref_alt = \"<NON_REF>\"\n\n\nclass TestRefCallingMaxRefCallSize(AsciiWecallRunnerTest):\n def test_splits_reference_call_into_three_records(self):\n chrom = \"1\"\n sample = \"bah.asdhaslkdghalsdkfq25451c`52980biqweuo8!\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \" \", sample_name=sample\n ).with_output_ref_calls(True).with_max_ref_call_size(20)\n\n vcf_expect = driver.call().with_output_vcf()\n\n vcf_expect.record_count(3)\n vcf_expect.has_record(chrom, 0, \"A\", ref_alt).with_sample(sample).has_genotype(\"0/0\")\n vcf_expect.has_record(chrom, 20, \"A\", ref_alt).with_sample(sample).has_genotype(\"0/0\")\n vcf_expect.has_record(chrom, 40, \"A\", ref_alt).with_sample(sample).has_genotype(\"0/0\")\n", "id": "2416620", "language": "Python", "matching_score": 3.0300252437591553, "max_stars_count": 8, "path": "test/wecall_acceptance/reference_calling/test_cli.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestNormalizeVariantCallsOption(BaseTest):\n def test_should_normalize_to_having_large_indel_only_a_few_other_variants(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"GCCCCAGCCTCCCAAAGTGCATTGATTTTGTTGTTGTTGTGCTTATTTGCACTCCAGCCTGGCCTCTCCTTTCTTG\", chrom=chrom\n ).with_read(\n \"....................***********TGGGATTACAAGTGTGAACCATCGT....................\",\n n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \"....................*****************.......................................\",\n n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_normalize_variant_calls(True)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(9)\n\n vcf_expect.has_record(chrom, 19, \"CATTGATTTTGTTGTTGT\", \"C\").with_sample(\n sample_name).has_genotype(\"1|1\")\n vcf_expect.has_record(chrom, 39, \"T\", \"G\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 41, \"C\", \"A\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 44, \"A\", \"ACAAG\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 46, \"T\", \"G\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 49, \"C\", \"A\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 51, \"C\", \"CCA\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 54, \"C\", \"G\").with_sample(\n sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 55, \"A\", \"T\").with_sample(\n sample_name).has_genotype(\"0|1\")\n\n @expectedFailure\n def test_should_call_long_hom_deletion_separately_from_short_het_deletions(self):\n sample_name = \"NA12878\"\n chrom = \"3\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACTACTTGTCCTTTCCGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACGAGGTCAGGAGATCGAGACCA\"\n \"TCCTGGCTAACACGGTGAAACCCCGTCTCTACTAAAAAAATACAAAAAATTAGCCGGGCGTGGTAGCGGGCGCCTGTAGTCCCAGCTACTCGGGAGGCTGA\"\n \"GGCAGGGGAATGGCGTGAACCCGGGAGGCGGAGCTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAA\"\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAACTACTTGTCCTTTCCTTTGTGCGTGTGTGCGTGTGTGTGTGTGTGT\",\n chrom=chrom\n ).with_read(\n \"ACTACTTGTCCTTTCC*************************************************************************************\"\n \"*****************************************************************************************************\"\n \"*****************************************************************************************************\"\n \"**************************************************************TTTGTGCGT******GTGTGTGTGTGTGTGT\",\n n_fwd=20, n_rev=20, sample_name=sample_name, chrom=chrom\n ).with_read(\n \"ACTACTTGTCCTTTCC*************************************************************************************\"\n \"*****************************************************************************************************\"\n \"*****************************************************************************************************\"\n \"**************************************************************TTTGTGCGTGT****GTGTGTGTGTGTGTGT\",\n n_fwd=20, n_rev=20, sample_name=sample_name, chrom=chrom\n ).with_normalize_variant_calls(True)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(3)\n\n vcf_expect.has_record(\n chrom, 16,\n \"CGGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACGAGGTCAGGAG\"\n \"ATCGAGACCATCCTGGCTAACACGGTGAAACCCCGTCTCTACTAAAAAAATACAAAAAATTAGCCGGGCGTGGTAG\"\n \"CGGGCGCCTGTAGTCCCAGCTACTCGGGAGGCTGAGGCAGGGGAATGGCGTGAACCCGGGAGGCGGAGCTTGCAGT\"\n \"GAGCCGAGATCGCGCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAAAAAAAAAAAAAAAA\"\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAGAACTACTTGTCCTTTCC\",\n \"C\"\n ).with_sample(sample_name).has_genotype(\"1|1\")\n\n vcf_expect.has_record(chrom, 374, \"TGTGTGC\", \"T\").with_sample(sample_name).has_genotype(\"0|1\")\n vcf_expect.has_record(chrom, 376, \"TGTGC\", \"T\").with_sample(sample_name).has_genotype(\"1|0\")\n\n def test_should_call_basic_snps(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"GCCCCAGCCTCCCAAAGTGCATTGATTTTGTTGTTGTTGTGCTTATTTGCACTCCAGCCTGGCCTCTCCTTTCTTG\", chrom=chrom\n ).with_read(\n \"...............T.........A...............G..................................\",\n n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \".........................A..........................A.......................\",\n n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_normalize_variant_calls(True)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(4)\n\n vcf_expect.has_record(chrom, 15, \"A\", \"T\").with_sample(\n sample_name).has_genotype(\"1|0\")\n vcf_expect.has_record(chrom, 25, \"T\", \"A\").with_sample(\n sample_name).has_genotype(\"1|1\")\n vcf_expect.has_record(chrom, 41, \"C\", \"G\").with_sample(\n sample_name).has_genotype(\"1|0\")\n vcf_expect.has_record(chrom, 52, \"T\", \"A\").with_sample(\n sample_name).has_genotype(\"0|1\")\n", "id": "3840889", "language": "Python", "matching_score": 3.7561252117156982, "max_stars_count": 8, "path": "test/wecall_acceptance/output_representations/test_normalized_variant_calls.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestComplexIndelOutputRepresentations(BaseTest):\n def test_converts_del_plus_ins_to_snps(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCTGC**AAAAAAAAAATCGTCTGTG\", chrom=chrom\n ).with_read(\n \"........**AT...................\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \"..........**...................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(2)\n\n vcf_expect.has_record(chrom, 8, \"G\", \"A\")\n vcf_expect.has_record(chrom, 9, \"C\", \"T\")\n\n def test_always_puts_snp_on_right_1(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAATCGTCTGTG\", chrom=chrom\n ).with_read(\n \"........T*...................\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \".............................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(2)\n\n vcf_expect.has_record(chrom, 7, \"TG\", \"T\")\n vcf_expect.has_record(chrom, 9, \"C\", \"T\")\n\n def test_always_puts_snp_on_right_2(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCTGCAAAAAAAAAATCGTCTGTG\", chrom=chrom\n ).with_read(\n \"........*T...................\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \".............................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(2)\n\n vcf_expect.has_record(chrom, 7, \"TG\", \"T\")\n vcf_expect.has_record(chrom, 9, \"C\", \"T\")\n\n def test_joins_left_neighbouring_snp_to_deletion_and_simplifies_representation(self):\n sample_name = \"sir_freedom_fries\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCTGTAAAAAAAAAATCGTCTGTG\", chrom=chrom\n ).with_read(\n \"........T*...................\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \".............................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(1)\n vcf_expect.has_record(chrom, 7, \"TG\", \"T\")\n\n def test_switches_left_neighbouring_snp_to_insertion(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCTG*AAAAAAAAAATCGTCTGTG\", chrom=chrom\n ).with_read(\n \"........TC...................\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \".........*...................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(2)\n\n vcf_expect.has_record(chrom, 6, \"C\", \"CT\")\n vcf_expect.has_record(chrom, 8, \"G\", \"C\")\n\n def test_leaves_insertion_on_left(self):\n sample_name = \"a_sample\"\n chrom = \"1\"\n\n svc_driver = SVCDriver(self).with_ref_sequence(\n \"ACGCCCCT*CAAAAAAAAAATCGTCTGTG\", chrom=chrom\n ).with_read(\n \"........GT...................\", n_fwd=10, n_rev=10, sample_name=sample_name\n ).with_read(\n \"........*....................\", n_fwd=10, n_rev=10, sample_name=sample_name)\n\n expect = svc_driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.record_count(2)\n\n vcf_expect.has_record(chrom, 7, \"T\", \"TG\")\n vcf_expect.has_record(chrom, 8, \"C\", \"T\")\n", "id": "6946414", "language": "Python", "matching_score": 3.3113107681274414, "max_stars_count": 8, "path": "test/wecall_acceptance/output_representations/test_indels_next_to_snps.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestReferenceCallingMultiSample(BaseTest):\n def test_calls_whole_region_as_reference_if_no_variants(self):\n chrom = \"1\"\n sample_1 = \"sample_1\"\n sample_2 = \"sample_2\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ........................... \", chrom=chrom, sample_name=sample_1, n_fwd=5, n_rev=5\n ).with_read(\n \" ................................ \", chrom=chrom, sample_name=sample_2, n_fwd=5, n_rev=5\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 41)\n\n def test_calls_correct_reference_when_one_sample_has_snp_and_tother_has_indel(self):\n chrom = \"1\"\n sample_1 = \"sample_1\"\n sample_2 = \"sample_2\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACG*CCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........T................ \", chrom=chrom, sample_name=sample_1\n ).with_read(\n \" ...............T........... \", chrom=chrom, sample_name=sample_1\n ).with_read(\n \" ...........T.*................ \", chrom=chrom, sample_name=sample_2\n ).with_read(\n \" ...........T.*..................... \", chrom=chrom, sample_name=sample_2\n ).with_read(\n \"...............T.*......... \", chrom=chrom, sample_name=sample_2\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n\n # Has only 4 records which are:-\n vcf_expect.has_reference_calls_for_region(chrom, 0, 15)\n vcf_expect.has_record(chrom, 15, \"C\", \"T\")\n vcf_expect.has_record(chrom, 16, \"G\", \"GT\")\n vcf_expect.has_reference_calls_for_region(chrom, 17, 41)\n\n def test_calls_correct_ref_calls_with_one_snp_even_if_reference_for_tother(self):\n chrom = \"1\"\n sample_1 = \"sample_1\"\n sample_2 = \"sample_2\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........T................ \", chrom=chrom, sample_name=sample_1\n ).with_read(\n \" ...............T........... \", chrom=chrom, sample_name=sample_1\n ).with_read(\n \" ............................... \", chrom=chrom, sample_name=sample_2\n ).with_read(\n \" ................................... \", chrom=chrom, sample_name=sample_2\n ).with_read(\n \"........................... \", chrom=chrom, sample_name=sample_2\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 17)\n vcf_expect.has_record(chrom, 17, \"C\", \"T\").with_sample(sample_2).has_genotype(\"0/0\")\n vcf_expect.has_reference_calls_for_region(chrom, 18, 41)\n", "id": "9441169", "language": "Python", "matching_score": 3.8948802947998047, "max_stars_count": 8, "path": "test/wecall_acceptance/reference_calling/test_multi_sample_ref_calling.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall.utils.interval import ChromInterval\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestRefCalling(BaseTest):\n def test_dont_call_reference_between_variant_and_insertion_due_to_vcf_rep_issues(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACG*CCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........T................ \", chrom=chrom\n ).with_read(\n \" ...............T........... \", chrom=chrom\n ).with_read(\n \" ...........T.*................ \", chrom=chrom\n ).with_read(\n \" ...........T.*..................... \", chrom=chrom\n ).with_read(\n \"...............T.*......... \", chrom=chrom\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n\n # Has only 4 records which are:-\n vcf_expect.has_reference_calls_for_region(chrom, 0, 15)\n vcf_expect.has_record(chrom, 15, \"C\", \"T\")\n vcf_expect.has_record(chrom, 16, \"G\", \"GT\")\n vcf_expect.has_reference_calls_for_region(chrom, 17, 41)\n\n def test_calls_whole_region_as_reference_if_no_variants(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ........................... \", chrom=chrom\n ).with_read(\n \" ........................... \", chrom=chrom\n ).with_read(\n \" ............................... \", chrom=chrom\n ).with_read(\n \" ................................... \", chrom=chrom\n ).with_read(\n \"........................... \", chrom=chrom\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 41)\n\n def test_calls_correct_ref_calls_with_one_snp(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........T................ \", chrom=chrom\n ).with_read(\n \" ...............T........... \", chrom=chrom\n ).with_read(\n \" ............................... \", chrom=chrom\n ).with_read(\n \" ................................... \", chrom=chrom\n ).with_read(\n \"........................... \", chrom=chrom\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 17)\n vcf_expect.has_record(chrom, 17, \"C\", \"T\")\n vcf_expect.has_reference_calls_for_region(chrom, 18, 41)\n\n def test_calls_correct_ref_calls_with_one_mnp(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........TTTT............. \", chrom=chrom\n ).with_read(\n \" ...............TTTT........ \", chrom=chrom\n ).with_read(\n \" ............................... \", chrom=chrom\n ).with_read(\n \" ................................... \", chrom=chrom\n ).with_read(\n \"........................... \", chrom=chrom\n ).with_output_ref_calls(True).with_allow_MNP_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 17)\n vcf_expect.has_record(chrom, 17, \"CCCC\", \"TTTT\")\n vcf_expect.has_reference_calls_for_region(chrom, 21, 41)\n\n def test_calls_ref_calls_correctly_with_mnp_that_contains_snp(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........TTTT............. \", chrom=chrom\n ).with_read(\n \" ...............TTTT........ \", chrom=chrom\n ).with_read(\n \" ..............T................ \", chrom=chrom\n ).with_read(\n \" ..............T.................... \", chrom=chrom\n ).with_read(\n \"..................T........ \", chrom=chrom\n ).with_output_ref_calls(True).with_allow_MNP_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n\n vcf_expect.has_reference_calls_for_region(chrom, 0, 17)\n vcf_expect.has_record(chrom, 17, \"CCCC\", \"TTTT\")\n vcf_expect.has_record(chrom, 18, \"C\", \"T\")\n vcf_expect.has_reference_calls_for_region(chrom, 21, 41)\n\n def test_calls_correct_ref_calls_with_one_del(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACTCCCCATAAAAAAATTTTTTTTTTT\",\n ).with_read(\n \" ..........*................ \", chrom=chrom\n ).with_read(\n \" ...............*........... \", chrom=chrom\n ).with_read(\n \" ............................... \", chrom=chrom\n ).with_read(\n \" ................................... \", chrom=chrom\n ).with_read(\n \"........................... \", chrom=chrom\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 16)\n vcf_expect.has_record(chrom, 16, \"TC\", \"T\")\n vcf_expect.has_reference_calls_for_region(chrom, 18, 41)\n\n def test_calls_correct_ref_calls_with_one_ins(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAACGCACG*CCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..........*................ \", chrom=chrom\n ).with_read(\n \" ...............*........... \", chrom=chrom\n ).with_read(\n \" .............T................. \", chrom=chrom\n ).with_read(\n \" .............T..................... \", chrom=chrom\n ).with_read(\n \".................T......... \", chrom=chrom\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 16)\n vcf_expect.has_record(chrom, 16, \"G\", \"GT\")\n vcf_expect.has_reference_calls_for_region(chrom, 17, 41)\n\n def test_calls_correct_ref_calls_with_cluster_of_variants(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAATAACGCACG*CCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..*.......*................ \", chrom=chrom\n ).with_read(\n \" .......*.......*........... \", chrom=chrom\n ).with_read(\n \" .............T......T.......... \", chrom=chrom\n ).with_read(\n \" .............T......T.............. \", chrom=chrom\n ).with_read(\n \".................T......T.. \", chrom=chrom\n ).with_output_ref_calls(True)\n\n vcf_expect = driver.call().with_output_vcf()\n vcf_expect.has_reference_calls_for_region(chrom, 0, 8)\n vcf_expect.has_record(chrom, 8, \"TA\", \"T\")\n vcf_expect.has_reference_calls_for_region(chrom, 10, 16)\n vcf_expect.has_record(chrom, 16, \"G\", \"GT\")\n vcf_expect.has_reference_calls_for_region(chrom, 17, 23)\n vcf_expect.has_record(chrom, 23, \"A\", \"T\")\n vcf_expect.has_reference_calls_for_region(chrom, 24, 41)\n\n def test_calls_reference_on_location_with_low_quality_variant_support(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAATAACGCACGCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..*....................... \", n_fwd=2, n_rev=1\n ).with_read(\n \".................T.....T......... \",\n \" 1 \", n_fwd=1, n_rev=1\n ).with_read(\n \".......................T.. \", n_fwd=1, n_rev=0\n ).with_output_ref_calls(True)\n\n expect = driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.has_reference_calls(ChromInterval(chrom, 0, 8))\n\n vcf_expect.has_record_for_variant(Variant(chrom, 8, \"TA\", \"T\"))\n\n vcf_expect.has_reference_calls(ChromInterval(chrom, 10, 23))\n\n vcf_expect.has_record_for_variant(Variant(chrom, 23, \"A\", \"T\"))\n\n vcf_expect.has_reference_calls(ChromInterval(chrom, 24, 41))\n\n def test_calls_correct_reference_between_clusters_with_uncalled_indel_between(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAATAACGCACGCCCCATAAAAAAATTTTTTTTTTT\", chrom=chrom\n ).with_read(\n \" ..*....................... \", n_fwd=10, n_rev=10\n ).with_read(\n \".......................*............ \", n_fwd=1, n_rev=1\n ).with_read(\n \".......................T.. \", n_fwd=10, n_rev=10\n ).with_output_ref_calls(True).with_max_cluster_distance(5)\n\n expect = driver.call()\n vcf_expect = expect.with_output_vcf()\n vcf_expect.has_reference_calls(ChromInterval(chrom, 0, 8))\n\n vcf_expect.has_record_for_variant(Variant(chrom, 8, \"TA\", \"T\"))\n\n vcf_expect.has_reference_calls(ChromInterval(chrom, 10, 23))\n\n vcf_expect.has_record_for_variant(Variant(chrom, 23, \"A\", \"T\"))\n\n vcf_expect.has_reference_calls(ChromInterval(chrom, 24, 41))\n", "id": "5743252", "language": "Python", "matching_score": 4.122104644775391, "max_stars_count": 8, "path": "test/wecall_acceptance/reference_calling/test_basic_calling.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\nfrom wecall_test_drivers.vcf_expectation import ref_alt\n\n\nclass TestRefCallingQuality(BaseTest):\n def test_get_unknown_quality_if_no_reads_span_region(self):\n chrom = \"1\"\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\", chrom=chrom\n ).with_read(\n \" \",\n ).with_output_ref_calls(True)\n\n driver.call().with_output_vcf().has_record(chrom, 0, \"A\", ref_alt).with_quality(None)\n", "id": "3140987", "language": "Python", "matching_score": 1.1128146648406982, "max_stars_count": 8, "path": "test/wecall_acceptance/reference_calling/test_quality.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.ascii_wecall_runner import AsciiWecallRunnerTest\nfrom wecall.vcfutils.genotype_call import GenotypeCall as GT\nfrom wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank\nfrom wecall_test_drivers.vcf_expectation import VCFExpectation\n\n\nclass TestSNPGenotyping(AsciiWecallRunnerTest):\n\n def test_genotypes_snp_as_heterozygous_when_supported_by_2_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\"..........................................\",\n \"..........................................\",\n \"..........................................\",\n \".....................C....................\",\n \".....................C....................\"],\n\n [(21, \"A\", \"C\", {\"GT\": GT(\"1/0\"),\n \"GQ\": [66],\n \"DP\": [5],\n \"AD\": [3, 2]})]\n )\n\n def test_genotypes_snp_as_heterozygous_when_supported_by_3_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\"..........................................\",\n \"..........................................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\"],\n\n [(21, \"A\", \"C\", {\"GT\": GT(\"1/0\"),\n \"GQ\": [66],\n \"DP\": [5],\n \"AD\": [2, 3]})]\n )\n\n def test_genotypes_snp_as_heterozygous_when_supported_by_4_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\"..........................................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\"],\n\n [(21, \"A\", \"C\", {\"GT\": GT(\"1/0\"),\n \"GQ\": [27],\n \"DP\": [5],\n \"AD\": [1, 4]})]\n )\n\n def test_genotypes_snp_as_homozygous_when_supported_by_5_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\"],\n\n [(21, \"A\", \"C\", {\"GT\": GT(\"1/1\"),\n \"GQ\": [12],\n \"DP\": [5],\n \"AD\": [0, 5]})]\n )\n\n def test_genotypes_snp_as_heterozygous_when_supported_by_9_reads_out_of_10(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \"..........................................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\"],\n\n [(21, \"A\", \"C\", {\"GT\": GT(\"0/1\"),\n \"GQ\": [12],\n \"DP\": [10],\n \"AD\": [1, 9]})]\n )\n\n def test_genotypes_snp_as_homozygous_when_supported_by_15_reads_out_of_16(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \"..........................................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\",\n \".....................C....................\"],\n\n [(21, \"A\", \"C\", {\"GT\": GT(\"1/1\"),\n \"GQ\": [7],\n \"DP\": [16],\n \"AD\": [1, 15]})]\n )\n\n\nclass TestDeletionGenotyping(AsciiWecallRunnerTest):\n\n def test_genotypes_one_base_deletion_as_heterozygous_when_supported_by_2_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\"..........................................\",\n \"..........................................\",\n \"..........................................\",\n \".....................*....................\",\n \".....................*....................\"],\n\n [(20, \"CA\", \"C\", {\"GT\": GT(\"1/0\"),\n \"DP\": [5],\n \"AD\": [3, 2]})]\n )\n\n def test_genotypes_one_base_deletion_as_heterozygous_when_supported_by_3_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\"..........................................\",\n \"..........................................\",\n \".....................*....................\",\n \".....................*....................\",\n \".....................*....................\"],\n\n [(20, \"CA\", \"C\", {\"GT\": GT(\"1/0\"),\n \"DP\": [5],\n \"AD\": [2, 3]})]\n )\n\n def test_genotypes_one_base_deletion_as_heterozygous_when_supported_by_4_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\"..........................................\",\n \".....................*....................\",\n \".....................*....................\",\n \".....................*....................\",\n \".....................*....................\"],\n\n [(20, \"CA\", \"C\", {\"GT\": GT(\"1/0\"),\n \"DP\": [5],\n \"AD\": [1, 4]})]\n )\n\n def test_genotypes_one_base_deletion_as_homozygous_when_supported_by_5_reads_out_of_5(self):\n self.calls_variants_with_sample_data(\n \"AAAAAAAAAAACGCACCCCCCATAAAAAAAATTTTTTTTTTT\", # input\n [\".....................*....................\",\n \".....................*....................\",\n \".....................*....................\",\n \".....................*....................\",\n \".....................*....................\"],\n\n [(20, \"CA\", \"C\", {\"GT\": GT(\"1/1\"),\n \"DP\": [5],\n \"AD\": [0, 5]})]\n )\n\n\nclass TestGenotypeLikelihoodValues(BaseTest):\n def test_should_have_near_zero_AA_genotype_likelihood_for_hom_alt_call(self):\n chr1 = 'chr1'\n sample_bank = SampleBank(\"TTTTTAAAAAAAAAAAAAAAAAAAA\", chrom=chr1)\n\n sequence_bank = sample_bank.add_sample_name('sample_1')\n\n sequence_bank.add_sequence(\n \"............C............\", n_fwd=20, n_rev=20)\n\n vc_wrapper_builder = VariantCallerBuilderFromSampleBank(\n sample_bank, self.work_dir)\n variant_output = vc_wrapper_builder.build().run().output_vcf\n\n vcf_expectation = VCFExpectation(self, variant_output)\n record_expectation = vcf_expectation.has_record_for_variant(Variant(chr1, 12, \"A\", \"C\"))\n sample_expectation = record_expectation.with_sample(\"sample_1\")\n\n sample_expectation.has_genotype(\"1|1\").has_AA_genotype_likelihood(0)\n\n def test_should_have_near_zero_RA_genotype_likelihood_for_het_call(self):\n chr1 = 'chr1'\n sample_bank = SampleBank(\"TTTTTAAAAAAAAAAAAAAAAAAAA\", chrom=chr1)\n\n sequence_bank = sample_bank.add_sample_name('sample_1')\n\n sequence_bank.add_sequence(\n \"............C............\", n_fwd=20, n_rev=20)\n sequence_bank.add_sequence(\n \".........................\", n_fwd=20, n_rev=20)\n\n vc_wrapper_builder = VariantCallerBuilderFromSampleBank(\n sample_bank, self.work_dir)\n variant_output = vc_wrapper_builder.build().run().output_vcf\n\n vcf_expectation = VCFExpectation(self, variant_output)\n record_expectation = vcf_expectation.has_record_for_variant(Variant(chr1, 12, \"A\", \"C\"))\n sample_expectation = record_expectation.with_sample(\"sample_1\")\n\n sample_expectation.has_genotype(\"1|0\").has_RA_genotype_likelihood(0)\n\n def test_should_have_near_zero_RR_genotype_likelihood_for_hom_alt_call(self):\n chr1 = 'chr1'\n sample_bank = SampleBank(\"TTTTTAAAAAAAAAAAAAAAAAAAA\", chrom=chr1)\n\n sequence_bank_1 = sample_bank.add_sample_name('sample_1')\n sequence_bank_1.add_sequence(\n \".........................\", n_fwd=20, n_rev=20)\n\n sequence_bank_2 = sample_bank.add_sample_name('sample_2')\n sequence_bank_2.add_sequence(\n \"............C............\", n_fwd=20, n_rev=20)\n\n vc_wrapper_builder = VariantCallerBuilderFromSampleBank(sample_bank, self.work_dir)\n variant_output = vc_wrapper_builder.build().run().output_vcf\n\n vcf_expectation = VCFExpectation(self, variant_output)\n record_expectation = vcf_expectation.has_record_for_variant(Variant(chr1, 12, \"A\", \"C\"))\n sample_expectation = record_expectation.with_sample(\"sample_1\")\n\n sample_expectation.has_genotype(\"0|0\").has_RR_genotype_likelihood(0.0)\n", "id": "8004733", "language": "Python", "matching_score": 3.7936601638793945, "max_stars_count": 8, "path": "test/wecall_acceptance/genotyping/test_genotyping_in_clean_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom os import path\nfrom unittest import expectedFailure\n\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.bamutils.sequence_bank import AsciiVariantGenerator\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.info_data import InfoData\nfrom wecall.vcfutils.vcf_builder import VCFBuilder\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\nfrom wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank\nfrom wecall_test_drivers.variant_caller_wrapper import CANDIDATE_VARIANTS_FILE_KEY\n\n\nclass TestCandidateVariantSpecification(BaseTest):\n def calls_variants(self, ref, sequence_list, candidate_ascii_haplotypes, prior, expected_ascii_haplotypes):\n sample_bank = SampleBank(ref)\n sample_bank.add_sample_with_seqs_and_quals(\"TEST\", sequence_list, 1, 0)\n\n variant_generator = AsciiVariantGenerator(sample_bank.reference)\n candidate_variants = variant_generator.get_variants(candidate_ascii_haplotypes)\n expected_variants = variant_generator.get_variants(expected_ascii_haplotypes)\n\n candidate_variant_list = VCFBuilder(path.join(self.work_dir, \"candiate_variants.vcf\"))\n candidate_variant_list.schema.set_info_data('AF', 'A', 'Float', 'Allele Frequency')\n for var in candidate_variants:\n candidate_variant_list.with_record_from_variant(\n var, info=InfoData(candidate_variant_list.schema, {\"AF\": prior})\n )\n candidate_variant_list.build().index()\n\n vc_wrapper_builder = VariantCallerBuilderFromSampleBank(sample_bank, self.work_dir)\n vc_wrapper_builder.configuration[CANDIDATE_VARIANTS_FILE_KEY] = candidate_variant_list.compressed_filename\n callset = vc_wrapper_builder.build().run().get_variant_callset(self)\n\n self.assertEqual(callset.get_variants(), set(expected_variants))\n\n @expectedFailure\n def test_incorrectly_aligned_reads_support_deletion(self):\n self.calls_variants(\n \"TGTTATTAATCCCTTGTCAGATGTTATTAATCCCTTGTCAGT***CGCAAATATTTT\", # reference\n [\" ......................................GCAA \",\n \" ......................................GCAA \",\n \" ......................................GCAA \",\n \" ......................................GCAA \",\n \" ......................................GCAA \",\n \" ......................................GCAA \",\n ],\n # 01234567890123456789012345678901234567890123456789\n [\"..........................................****...........\"], # candidates\n [0.72],\n [\"..........................................****...........\", # expected output\n \"..........................................****...........\"]\n )\n\n def test_incorrectly_aligned_reads_supporting_snp(self):\n self.calls_variants(\n \"TGTTATTAATCCCTTGCCAGAAACCATATCTTTTTTTTTTGCAAATATTTT\", # reference\n [\" TGTTATTAATCCCTTGCCATAAACCATATC \", # input reads\n \" TAATCCCTTGCCATAAACCATATCTTTTTTTTTTG \"],\n\n # 01234567890123456789012345678901234567890123456789\n [\"...................T...............................\"], # candidates\n [0.00000005],\n [\"...................T...............................\", # expected output\n \"...................T...............................\"]\n )\n\n def test_incorrectly_aligned_reads_supporting_large_deletion_with_homopolymer_region(self):\n self.calls_variants(\n \"TGTTATAAAAAAAAAAAATAAATAAATATATAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCTCTAAAAAAAAT\",\n [\"...................................TCTCT........T .T............T...T...T.T.T..................\",\n # input reads that ALL support a long deletion\n \"...................................T .T...T.T.T................. \",\n ],\n # 01234567890123456789012345678901234567890123456789\n [\"...............................***********************************************..................\"],\n # candidate\n [0.00064],\n [\"...............................***********************************************..................\",\n # expected output\n \"...............................***********************************************..................\"]\n )\n\n @expectedFailure # \"WIP Need to be able to modify the quality mapping\"\n def test_should_not_call_snp_only_supported_by_low_quality_reads(self):\n self.calls_variants(\n \"TGTTATTAATCCCTTGCCAGAAACCATATCTTTTTTTTTTGCAAATATTTT\", # reference\n [\"........................T..........................\",\n \" 1 \",\n \"........................T..........................\",\n \" 1 \",\n \"........................T..........................\",\n \" 1 \",\n \"........................T..........................\",\n \" 1 \"],\n [\"........................T..........................\"],\n [1e-3],\n [\"...................................................\",\n \"...................................................\"]\n )\n\n def test_should_have_zero_bad_reads_for_candidate_variant_with_no_reads_covering_variant(self):\n chrom = \"1\"\n candidate_variant_list = VCFBuilder(path.join(self.work_dir, \"candiate_variants.vcf\"))\n candidate_variant_list.schema.set_info_data('AF', 'A', 'Float', 'Allele Frequency')\n variant_1 = Variant(chrom, 30, 'T', 'C')\n candidate_variant_list.with_record_from_variant(\n variant_1, info=InfoData(candidate_variant_list.schema, {\"AF\": [0.72]}))\n candidate_variant_list.build().index()\n\n svc_driver = SVCDriver(self)\\\n .with_allow_MNP_calls(True)\\\n .with_ref_sequence(\n \"TGTTATTAATCCCTTGTCAGATGTTATTAATCCCTTGTCAGTCCCTTGTCAGT\", chrom=chrom)\\\n .with_read(\n \"...........................C.. ......................\", n_fwd=10, n_rev=10, sample_name='sample_1')\\\n .with_read(\n \" \", n_fwd=10, n_rev=10, sample_name='sample_2')\\\n .with_candidate_variants_file(candidate_variant_list.compressed_filename)\n\n expect = svc_driver.call()\n\n vcf_expect = expect.with_output_vcf()\n vcf_expect.missing_record_for_variant(variant_1)\n", "id": "7198459", "language": "Python", "matching_score": 3.2765650749206543, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_candidate_variant_specification.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom collections import OrderedDict\nimport copy\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.bamutils.sequence import Sequence\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank\n\nDEFAULT_SAMPLE_NAME = 'TEST_SAMPLE'\n\n\nclass AsciiWecallRunnerTest(BaseTest):\n def calls_variants(\n self,\n ref,\n sequence_list,\n expected_ascii_haplotypes=None,\n expected_variant_stubs=None,\n n_fwd=None,\n n_rev=None,\n config_dict=None, ):\n sample_bank = self.__build_default_sample_bank(\n ref, sequence_list, n_fwd, n_rev)\n variant_callset = self.__run_wecall(sample_bank, config_dict)\n expected_variants = self._get_expected_variants(\n expected_ascii_haplotypes, expected_variant_stubs, sample_bank)\n\n self.assertEqual(variant_callset.get_variants(), expected_variants)\n\n def calls_variants_with_genotype(\n self,\n ref,\n sequence_list,\n expected_haplotypes=None,\n expected_variants_with_genotypes=None,\n config_dict=None):\n self.__validate_expected_calls(\n expected_haplotypes,\n expected_variants_with_genotypes)\n sample_bank = self.__build_default_sample_bank(ref, sequence_list)\n variant_callset = self.__run_wecall(sample_bank, config_dict)\n wecall_calls = variant_callset.get_variants_with_genotypes()\n\n if expected_variants_with_genotypes is None:\n expected_calls_for_default_sample = {\n sample_bank.sample_names[0]: expected_haplotypes}\n expected_calls = self.__get_expected_calls_from_sample_ascii_haplotypes(\n expected_calls_for_default_sample, sample_bank.reference)\n else:\n expected_calls = {}\n sample_name = sample_bank.sample_names[0]\n for expected_stub in expected_variants_with_genotypes:\n variant = self._variant_from_stub(\n sample_bank.reference.chrom, expected_stub)\n expected_calls[variant] = {\n sample_name: GenotypeCall(\n expected_stub[3])}\n\n self.assertDictEqual(expected_calls, wecall_calls)\n\n def calls_variants_with_info_annotation(\n self,\n ref,\n sequence_list,\n expected_variants_and_info_annotation,\n config_dict=None,\n ):\n sample_bank = self.__build_default_sample_bank(ref, sequence_list)\n variant_callset = self.__run_wecall(sample_bank, config_dict)\n expected_calls = {}\n\n for expected_stub in expected_variants_and_info_annotation:\n variant = self._variant_from_stub(\n sample_bank.reference.chrom, expected_stub)\n expected_calls[variant] = {}\n\n for sample_data_key, value in expected_stub[3].items():\n expected_calls[variant][sample_data_key] = value\n\n actual_calls = variant_callset.get_variant_records()\n\n # assert the expected and actual variants called are the same\n self.assertEqual(set(expected_calls.keys()), set(actual_calls.keys()))\n\n for variant in expected_calls:\n expected_sample_data = expected_calls[variant]\n actual_sample_data = {}\n\n for field in expected_sample_data:\n value = actual_calls[variant].info[field]\n actual_sample_data[field] = value\n\n self.assertEqual(expected_sample_data, actual_sample_data)\n\n def __run_wecall(self, sample_bank, config_dict):\n vc_builder = VariantCallerBuilderFromSampleBank(\n sample_bank, self.work_dir)\n if config_dict is not None:\n for key, value in config_dict.items():\n vc_builder.configuration[key] = value\n vc_wrapper = vc_builder.build()\n vc_callset = vc_wrapper.run().get_variant_callset(self)\n return vc_callset\n\n def calls_variants_from_samples(self,\n ref,\n sample_seqs,\n expected_haplotypes=None,\n expected_call_stubs=None,\n config_dict=None):\n \"\"\"\n :param expected_haplotypes: dictionary: {sample_name : list of two ascii sequences expressing the genotype}\n :param expected_call_stubs: dictionary: {variant_stub: dictionary {sample_name: str(genotype)} }\n \"\"\"\n self.__validate_expected_calls(\n expected_haplotypes, expected_call_stubs)\n sample_bank = SampleBank(ref)\n\n for sample_name, sequence_list in sample_seqs.items():\n sample_bank.add_sample_with_seqs_and_quals(\n sample_name, sequence_list)\n\n variant_callset = self.__run_wecall(sample_bank, config_dict)\n wecall_calls = variant_callset.get_variants_with_genotypes()\n\n if expected_call_stubs is None:\n self.__filter_none_genotypes(wecall_calls)\n expected_calls = self.__get_expected_calls_from_sample_ascii_haplotypes(\n expected_haplotypes, sample_bank.reference)\n else:\n expected_calls = {}\n for variant_stub, genotypes in expected_call_stubs.items():\n variant = self._variant_from_stub(\n sample_bank.reference.chrom, variant_stub)\n expected_calls[variant] = OrderedDict()\n for sample_name, genotype in genotypes.items():\n expected_calls[variant][sample_name] = GenotypeCall(\n genotype)\n\n self.maxDiff = None # print the whole message if the following assertion fails\n self.assertDictEqual(expected_calls, wecall_calls)\n\n def calls_variants_with_sample_data_and_filters(\n self,\n ref,\n sequence_list,\n expected_variants_and_sample_data,\n config_dict=None):\n\n sample_bank = self.__build_default_sample_bank(ref, sequence_list)\n variant_callset = self.__run_wecall(sample_bank, config_dict)\n expected_calls = {}\n\n for expected_stub in expected_variants_and_sample_data:\n variant = self._variant_from_stub(\n sample_bank.reference.chrom, expected_stub)\n expected_calls[variant] = {}\n\n for sample_data_key, value in expected_stub[3].items():\n expected_calls[variant][sample_data_key] = value\n\n expected_calls[variant][\"FILTERS\"] = set()\n\n for filterValue in expected_stub[4]:\n if filterValue != \"PASS\":\n expected_calls[variant][\"FILTERS\"].add(filterValue)\n\n sample_name = sample_bank.sample_names[0]\n actual_calls = variant_callset.get_variant_records()\n\n # assert the expected and actual variants called are the same\n self.assertEqual(set(expected_calls.keys()), set(actual_calls.keys()))\n\n for variant in expected_calls:\n expected_sample_data = expected_calls[variant]\n actual_sample_data = {}\n\n for field in expected_sample_data:\n if field != \"FILTERS\":\n value = actual_calls[variant].sample_info.get_genotype_data(sample_name)[\n field]\n actual_sample_data[field] = value\n\n actual_sample_data[\"FILTERS\"] = actual_calls[variant].filters\n\n self.assertEqual(expected_sample_data, actual_sample_data)\n\n def calls_variants_with_sample_data(self,\n ref,\n sequence_list,\n expected_variants_and_sample_data,\n config_dict=None):\n sample_bank = self.__build_default_sample_bank(ref, sequence_list)\n variant_callset = self.__run_wecall(sample_bank, config_dict)\n expected_calls = {}\n\n for expected_stub in expected_variants_and_sample_data:\n variant = self._variant_from_stub(\n sample_bank.reference.chrom, expected_stub)\n expected_calls[variant] = {}\n\n for sample_data_key, value in expected_stub[3].items():\n expected_calls[variant][sample_data_key] = value\n\n sample_name = sample_bank.sample_names[0]\n actual_calls = variant_callset.get_variant_records()\n\n # assert the expected and actual variants called are the same\n self.assertEqual(set(expected_calls.keys()), set(actual_calls.keys()))\n\n for variant in expected_calls:\n expected_sample_data = expected_calls[variant]\n actual_sample_data = {}\n\n for field in expected_sample_data:\n value = actual_calls[variant].sample_info.get_genotype_data(sample_name)[\n field]\n actual_sample_data[field] = value\n\n self.assertEqual(expected_sample_data, actual_sample_data)\n\n def __build_default_sample_bank(\n self,\n ref,\n sequence_list,\n n_fwd=None,\n n_rev=None):\n sample_bank = SampleBank(ref)\n sample_bank.add_sample_with_seqs_and_quals(\n DEFAULT_SAMPLE_NAME, sequence_list, n_fwd, n_rev)\n\n return sample_bank\n\n @staticmethod\n def __validate_expected_calls(expected_ascii, expected_stubs):\n if expected_ascii is None and expected_stubs is None:\n raise weCallException(\n \"Expected variants have to be provided either in the ascii or variant stub format.\"\n )\n\n @staticmethod\n def _get_expected_variants(\n ascii_haplotypes,\n expected_variant_stubs,\n sample_bank):\n if ascii_haplotypes is None and expected_variant_stubs is None:\n return sample_bank.variants\n elif ascii_haplotypes is None:\n return {\n AsciiWecallRunnerTest._variant_from_stub(\n sample_bank.reference.chrom,\n stub) for stub in expected_variant_stubs}\n else:\n return set(\n AsciiWecallRunnerTest.__get_expected_calls_from_haplotypes(\n ascii_haplotypes,\n sample_bank.reference).keys())\n\n @staticmethod\n def __get_expected_calls_from_sample_ascii_haplotypes(\n ascii_haplotypes, reference):\n calls_per_variant = {}\n for sample_name, ascii_strings in ascii_haplotypes.items():\n calls_for_sample = AsciiWecallRunnerTest.__get_expected_calls_from_haplotypes(\n ascii_strings, reference)\n for variant, genotype in calls_for_sample.items():\n if variant in calls_per_variant and sample_name in calls_per_variant[variant]:\n raise weCallException(\n \"Cannot supply multiple genotypes for \"\n \"sample_name {} and variant {}.\".format(\n sample_name, variant))\n if variant not in calls_per_variant:\n # ordered dict only to comply with what the actual calls\n # look like\n calls_per_variant[variant] = OrderedDict()\n\n calls_per_variant[variant][sample_name] = genotype\n\n return calls_per_variant\n\n @staticmethod\n def __get_expected_calls_from_haplotypes(ascii_strings, reference):\n if len(ascii_strings) != 2:\n raise weCallException(\n \"Expected calls have to be defined as a diploid.\")\n if not all(len(str) == reference.length_with_deletions()\n for str in ascii_strings):\n raise weCallException(\n \"Ascii haplotypes have to be of the same length as the reference\")\n\n vars_from_hap1 = Sequence(reference, ascii_strings[0]).variants\n vars_from_hap2 = Sequence(reference, ascii_strings[1]).variants\n\n calls = {}\n for var in vars_from_hap1.intersection(vars_from_hap2):\n calls[var] = GenotypeCall(\"1/1\")\n for var in vars_from_hap1.symmetric_difference(vars_from_hap2):\n calls[var] = GenotypeCall(\"0/1\")\n\n return calls\n\n @staticmethod\n def _variant_from_stub(chrom, stub):\n pos = stub[0]\n ref = stub[1]\n alt = stub[2]\n return Variant(chrom, pos, ref, alt)\n\n @staticmethod\n def __filter_none_genotypes(calls):\n calls_copy = copy.deepcopy(calls)\n for variant, genotypes in calls_copy.items():\n for sample_name, genotype in genotypes.items():\n if genotype == GenotypeCall(\"./.\"):\n calls[variant].popitem(sample_name)\n", "id": "10918364", "language": "Python", "matching_score": 3.6634366512298584, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/ascii_wecall_runner.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom collections import OrderedDict\nfrom wecall.bamutils.read_sequence import HIGH_QUALITY\nfrom wecall.bamutils.sequence_bank import SequenceBank\nfrom wecall.bamutils.sequence_quality import SequenceQuality\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM, ReferenceChromosome\n\n\nclass SampleBank(object):\n\n def __init__(self, reference_string, pos_from=0, chrom=DEFAULT_CHROM):\n self.reference = ReferenceChromosome(reference_string, pos_from, chrom)\n self.__samples = OrderedDict()\n\n def __getitem__(self, sample_name):\n return self.__samples[sample_name]\n\n def __len__(self):\n return len(self.__samples)\n\n def get(self, sample_name, default=None):\n try:\n return self[sample_name]\n except KeyError:\n return default\n\n @property\n def sample_names(self):\n return list(self.__samples.keys())\n\n @property\n def variants(self):\n vars = set()\n for seq_bank in list(self.__samples.values()):\n vars.update(seq_bank.variants)\n\n return vars\n\n def add_sample_name(self, sample_name):\n if sample_name in self.__samples:\n raise weCallException(\n \"Sample {} already exists in the SampleBank.\".format(sample_name))\n sequence_bank = SequenceBank(self.reference)\n self.__samples[sample_name] = sequence_bank\n return sequence_bank\n\n def add_sample_with_seqs_and_quals(\n self,\n sample_name,\n seq_qual_list,\n n_fwd=None,\n n_rev=None,\n mapping_quality=HIGH_QUALITY\n ):\n self.add_sample_name(sample_name)\n\n current_seq = None\n for look_ahead in seq_qual_list:\n if current_seq is None:\n current_seq = look_ahead\n continue\n\n if SequenceQuality.is_valid_qual(look_ahead):\n self.__samples[sample_name].add_sequence(\n current_seq,\n quality_string=look_ahead,\n n_fwd=n_fwd,\n n_rev=n_rev,\n mapping_quality=mapping_quality\n )\n current_seq = None\n else:\n self.__samples[sample_name].add_sequence(\n current_seq, n_fwd=n_fwd, n_rev=n_rev, mapping_quality=mapping_quality)\n current_seq = look_ahead\n\n if current_seq is not None:\n self.__samples[sample_name].add_sequence(\n current_seq, n_fwd=n_fwd, n_rev=n_rev, mapping_quality=mapping_quality)\n", "id": "9732345", "language": "Python", "matching_score": 2.844346761703491, "max_stars_count": 8, "path": "python/wecall/bamutils/sample_bank.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.bamutils.sequence_bank import SequenceBank, AsciiVariantGenerator\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\nfrom wecall.genomics.variant import Variant\n\n\nclass TestSequenceBank(TestCase):\n\n def test_should_fail_at_seq_with_different_length_to_reference(self):\n # Given\n ref_seq = \"AAAA\"\n seq = \"CC\"\n sequence_bank = SequenceBank(ReferenceChromosome(ref_seq))\n # Then\n self.assertRaises(weCallException, sequence_bank.add_sequence, seq)\n\n def test_should_be_able_to_add_snp_using_dsl_syntax(self):\n # Given\n input_ref = \"CCC\"\n snp_input = \".T.\"\n # When\n sequence_bank = SequenceBank(ReferenceChromosome(input_ref))\n sequence_bank.add_sequence(snp_input)\n read_lists = [builder.build_reads(0, {}) for builder in sequence_bank]\n reads = [read for read_list in read_lists for read in read_list]\n # Then\n self.assertEqual(reads[0].pos, 0)\n self.assertEqual(reads[0].seq, 'CTC')\n\n def test_should_be_able_to_add_snp_using_whitespace_dsl_syntax(self):\n # Given\n input_ref = \"CC*AAGG\"\n snp_input = \" .T. \"\n # When\n sequence_bank = SequenceBank(ReferenceChromosome(input_ref))\n sequence_bank.add_sequence(snp_input)\n read_lists = [builder.build_reads(0, {}) for builder in sequence_bank]\n reads = [read for read_list in read_lists for read in read_list]\n # Then\n self.assertEqual(reads[0].pos, 2)\n self.assertEqual(reads[0].seq, 'ATG')\n\n\nclass TestAsciiVariantGenerator(TestCase):\n def test_should_generate_variant_from_ascii_text(self):\n ref = \"ATAAAAAAAAAT\"\n alt_1 = \".A........*.\"\n alt_2 = \".C..........\"\n variant_generator = AsciiVariantGenerator(ReferenceChromosome(ref))\n\n gen_vars = variant_generator.get_variants([alt_1, alt_2])\n\n self.assertEqual(\n gen_vars,\n {\n Variant(variant_generator.reference.chrom, 1, \"T\", \"A\"),\n Variant(variant_generator.reference.chrom, 1, \"T\", \"C\"),\n Variant(variant_generator.reference.chrom, 9, \"AA\", \"A\")\n }\n )\n", "id": "1740478", "language": "Python", "matching_score": 3.2980284690856934, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_sequence_bank.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\n\n\nclass TestSequenceReference(TestCase):\n def test_should_raise_when_gap_in_reference(self):\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in reference sequence \\'TA AA\\'\",\n ReferenceChromosome,\n \"TA AA\"\n )\n\n def test_should_raise_when_unknown_character_in_reference(self):\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in reference sequence \\'TA&AA\\'\",\n ReferenceChromosome,\n \"TA&AA\"\n )\n\n def test_should_raise_when_dot_in_reference(self):\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in reference sequence \\'TA.AA\\'\",\n ReferenceChromosome,\n \"TA.AA\"\n )\n\n def test_should_allocate_pos_from_and_pos_to_based_on_reference_size(self):\n # Given\n input_ref_seq = \"ACCCT\"\n # When\n seq_ref = ReferenceChromosome(input_ref_seq)\n # Then\n self.assertEqual(seq_ref.pos_from, 0)\n self.assertEqual(seq_ref.pos_to, len(input_ref_seq))\n\n def test_should_ignore_asterixes_in_reference_sequence_in_computing_pos_to(self):\n # Given\n ref_seq = ReferenceChromosome(\"C*C*C\")\n # Then\n self.assertEqual(ref_seq.pos_to, 3)\n\n def test_should_be_able_to_access_ref_char(self):\n # Given\n seq_ref = ReferenceChromosome(\"AC*T*G\")\n # Then\n self.assertEqual(seq_ref[0], \"A\")\n self.assertEqual(seq_ref[1], \"C\")\n self.assertEqual(seq_ref[2], \"T\")\n self.assertEqual(seq_ref[3], \"G\")\n\n def test_should_correctly_getitem_for_offset_reference(self):\n # Given\n seq_ref = ReferenceChromosome(\"A*T\", 10)\n # Then\n self.assertEqual(seq_ref[10], \"A\")\n self.assertEqual(seq_ref[11], \"T\")\n\n def test_should_get_correct_fasta_string(self):\n # Given\n seq_ref = ReferenceChromosome(\"AC*T*G\")\n # Then\n self.assertEqual(seq_ref.fasta_string(), \"ACTG\")\n\n def test_should_get_correct_fasta_string_for_offset_reference(self):\n # Given\n seq_ref = ReferenceChromosome(\"AC*T*G\", 5)\n # Then\n self.assertEqual(seq_ref.fasta_string(), \"NNNNNACTG\")\n", "id": "1432686", "language": "Python", "matching_score": 3.4630563259124756, "max_stars_count": 8, "path": "test/test_utils/genomics/test_reference_chromosome.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.bamutils.sequence import Sequence\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\nfrom wecall.genomics.variant import Variant\n\n\nclass TestGetVariantsFromSequence(TestCase):\n\n def test_should_raise_when_whitespace_in_seq(self):\n ref = ReferenceChromosome(\"AAAA\")\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in sequence .*\",\n Sequence,\n ref,\n \" *..\"\n )\n\n def test_should_raise_when_illegal_char_in_seq(self):\n ref = ReferenceChromosome(\"AAAA\")\n self.assertRaisesRegex(\n weCallException,\n \"Illegal character in sequence \\'..*F\\'\",\n Sequence,\n ref,\n \"..*F\"\n )\n\n def test_should_raise_if_ref_and_seq_have_different_length(self):\n ref = ReferenceChromosome(\"AAA\")\n self.assertRaises(weCallException, Sequence, ref, \"..\")\n\n def test_finds_snp(self):\n ref = ReferenceChromosome(\"AAAAAAAAAAAAA\")\n seq = Sequence(ref, \".C...........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 1, \"A\", \"C\")})\n\n def test_should_find_multiple_snps(self):\n ref = ReferenceChromosome(\"AAAAAAAAAAAAA\")\n seq = Sequence(ref, \".C.........T.\")\n self.assertEqual(\n seq.variants, {\n Variant(\n ref.chrom, 1, \"A\", \"C\"), Variant(\n ref.chrom, 11, \"A\", \"T\")})\n\n def test_finds_snp_after_asterix(self):\n ref = ReferenceChromosome(\"T*CATAAAAAAAA\")\n seq = Sequence(ref, \".*.C.........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 2, \"A\", \"C\")})\n\n def test_finds_snp_at_the_ref_start(self):\n ref = ReferenceChromosome(\"CATAAAAAAAA\")\n seq = Sequence(ref, \"T..........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 0, \"C\", \"T\")})\n\n def test_finds_snp_at_the_ref_end(self):\n ref = ReferenceChromosome(\"CATAAAAAAAT\")\n seq = Sequence(ref, \"..........C\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 10, \"T\", \"C\")})\n\n def test_find_single_base_deletion(self):\n ref = ReferenceChromosome(\"TTAAAAAAAAAAT\")\n seq = Sequence(ref, \"..*..........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 1, \"TA\", \"T\")})\n\n def test_find_multiple_single_base_deletion(self):\n ref = ReferenceChromosome(\"TTAAAAAGAAAAT\")\n seq = Sequence(ref, \"..*.....*....\")\n self.assertEqual(\n seq.variants, {\n Variant(\n ref.chrom, 1, \"TA\", \"T\"), Variant(\n ref.chrom, 7, \"GA\", \"G\")})\n\n def test_find_multi_base_deletion(self):\n ref = ReferenceChromosome(\"TTAGCAAAAAAAT\")\n seq = Sequence(ref, \"..***........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 1, \"TAGC\", \"T\")})\n\n def test_find_multi_base_deletion_with_deletion_in_reference(self):\n ref = ReferenceChromosome(\"TTA*AAAAAAAAT\")\n seq = Sequence(ref, \"..**.........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 1, \"TA\", \"T\")})\n\n def test_should_not_find_deletion_on_left_edge(self):\n ref = ReferenceChromosome(\"TAGCAAAAAAAT\")\n seq = Sequence(ref, \"*...........\")\n print((seq.variants))\n self.assertEqual(len(seq.variants), 0)\n\n def test_should_not_find_deletion_on_right_edge(self):\n ref = ReferenceChromosome(\"TTAGCAAAAAACT\")\n seq = Sequence(ref, \"............*\")\n self.assertEqual(len(seq.variants), 0)\n\n def test_should_not_find_long_deletion_on_left_edge(self):\n ref = ReferenceChromosome(\"TAGCAAAAAAAT\")\n seq = Sequence(ref, \"***.........\")\n self.assertEqual(len(seq.variants), 0)\n\n def test_should_not_find_long_deletion_on_right_edge(self):\n ref = ReferenceChromosome(\"TTAGCAAAAAACT\")\n seq = Sequence(ref, \"..........***\")\n self.assertEqual(len(seq.variants), 0)\n\n def test_find_adjacent_snp_and_deletion(self):\n ref = ReferenceChromosome(\"TTAAAAAAAAAT\")\n seq = Sequence(ref, \".G*.........\")\n self.assertEqual(\n seq.variants, {\n Variant(\n ref.chrom, 1, \"T\", \"G\"), Variant(\n ref.chrom, 1, \"TA\", \"T\")})\n\n def test_find_adjacent_deletion_and_snp(self):\n ref = ReferenceChromosome(\"TCATAAAAAAAT\")\n seq = Sequence(ref, \".*G.........\")\n self.assertEqual(\n seq.variants, {\n Variant(\n ref.chrom, 0, \"TC\", \"T\"), Variant(\n ref.chrom, 2, \"A\", \"G\")})\n\n def test_should_find_single_base_insertion(self):\n ref = ReferenceChromosome(\"CT*AAAAAAAAAT\")\n seq = Sequence(ref, \"..G..........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 1, \"T\", \"TG\")})\n\n def test_should_find_multi_base_insertion(self):\n ref = ReferenceChromosome(\"CT**AAAAAAAAT\")\n seq = Sequence(ref, \"..GC.........\")\n self.assertEqual(seq.variants, {Variant(ref.chrom, 1, \"T\", \"TGC\")})\n\n def test_find_adjacent_insertion_and_snp(self):\n ref = ReferenceChromosome(\"T*ATAAAAAAAT\")\n seq = Sequence(ref, \".CG.........\")\n self.assertEqual(\n seq.variants, {\n Variant(\n ref.chrom, 0, \"T\", \"TC\"), Variant(\n ref.chrom, 1, \"A\", \"G\")})\n\n def test_find_adjacent_snp_and_insertion(self):\n ref = ReferenceChromosome(\"TA*AAAAAAAT\")\n seq = Sequence(ref, \".GC........\")\n self.assertEqual(\n seq.variants, {\n Variant(\n ref.chrom, 1, \"A\", \"G\"), Variant(\n ref.chrom, 1, \"A\", \"AC\")})\n\n def test_find_multiple_variants(self):\n ref = ReferenceChromosome(\"TA*AAAGCTAACT\")\n seq = Sequence(ref, \".GC...T...**.\")\n self.assertEqual(seq.variants, {\n Variant(ref.chrom, 1, \"A\", \"G\"),\n Variant(ref.chrom, 1, \"A\", \"AC\"),\n Variant(ref.chrom, 5, \"G\", \"T\"),\n Variant(ref.chrom, 8, \"AAC\", \"A\")\n })\n\n def test_raise_at_dot_overlapping_asterix(self):\n ref = ReferenceChromosome(\"TA*AAAAAAAT\")\n self.assertRaisesRegex(\n weCallException,\n \"Invalid sequence at ref position 1.\",\n Sequence,\n ref,\n \"...........\")\n\n\nclass TestGetCigarFromSequence(TestCase):\n def test_should_get_empty_cigar(self):\n ref = ReferenceChromosome(\"\")\n seq = Sequence(ref, \"\")\n self.assertEqual(str(seq.cigar), \"\")\n\n def test_should_get_correct_cigar_for_dots(self):\n ref = ReferenceChromosome(\"CCAA\")\n seq = Sequence(ref, \"....\")\n self.assertEqual(str(seq.cigar), \"4M\")\n\n def test_should_get_correct_cigar_for_snp(self):\n ref = ReferenceChromosome(\"TTT\")\n seq = Sequence(ref, \".G.\")\n self.assertEqual(str(seq.cigar), \"3M\")\n\n def test_should_get_correct_cigar_for_padding_and_deletion(self):\n ref = ReferenceChromosome(\"TTT\")\n seq = Sequence(ref, \".*.\")\n self.assertEqual(str(seq.cigar), \"1M1D1M\")\n\n def test_should_get_correct_cigar_for_padding_and_insertion(self):\n ref = ReferenceChromosome(\"T*T\")\n seq = Sequence(ref, \".C.\")\n self.assertEqual(str(seq.cigar), \"1M1I1M\")\n\n def test_should_get_correct_cigar_for_multiple_events(self):\n ref = ReferenceChromosome(\"CCC***AAATTT\")\n seq = Sequence(ref, \"A*.T*T...**C\")\n self.assertEqual(str(seq.cigar), \"1M1D1M2I3M2D1M\")\n", "id": "3191231", "language": "Python", "matching_score": 2.1136960983276367, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_sequence.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\nfrom wecall.bamutils.cigar import Cigar\n\n\nclass TestCigar(TestCase):\n def test_should_be_able_to_create_cigar_for_insertion(self):\n cigar = Cigar([(Cigar.INSERTION, 10)])\n self.assertEqual(str(cigar), \"10I\")\n\n def test_should_be_able_to_create_cigar_for_deletion(self):\n cigar = Cigar([(Cigar.DELETION, 10)])\n self.assertEqual(str(cigar), \"10D\")\n\n def test_should_be_able_to_create_cigar_for_match(self):\n cigar = Cigar([(Cigar.MATCH, 10)])\n self.assertEqual(str(cigar), \"10M\")\n\n def test_should_reduce_cigars_correctly_on_construction(self):\n cigar = Cigar([(Cigar.MATCH, 10), (Cigar.MATCH, 7)])\n self.assertEqual(str(cigar), \"17M\")\n\n def test_should_be_able_to_add_cigars(self):\n cigar_1 = Cigar([(Cigar.DELETION, 10), (Cigar.MATCH, 7)])\n cigar_2 = Cigar([(Cigar.MATCH, 6), (Cigar.INSERTION, 7)])\n\n self.assertEqual(str(cigar_1 + cigar_2), \"10D13M7I\")\n", "id": "10074099", "language": "Python", "matching_score": 1.5204273462295532, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_cigar.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport itertools\n\n\nclass Cigar(object):\n MATCH = object()\n INSERTION = object()\n DELETION = object()\n SKIP = object()\n SOFT_CLIP = object()\n HARD_CLIP = object()\n PADDING = object()\n SEQUENCE_MATCH = object()\n SEQUENCE_MISMATCH = object()\n\n def __init__(self, cigar_sequence=None):\n if cigar_sequence is None:\n cigar_sequence = []\n # sequence of cigar components as tuples of (type_flag, length).\n self.__cigar = Cigar.__reduce(cigar_sequence)\n\n @staticmethod\n def from_string(cigar_string):\n raise NotImplementedError()\n\n def __str__(self):\n return \"\".join(Cigar.__item_to_string(*item) for item in self.__cigar)\n\n def __repr__(self):\n return \"<Cigar: {!s}>\".format(self)\n\n @staticmethod\n def __item_to_string(flag, length):\n return \"{}{}\".format(length, Cigar.__get_flag_character(flag))\n\n @staticmethod\n def __get_flag_character(flag):\n return {\n Cigar.MATCH: \"M\",\n Cigar.INSERTION: \"I\",\n Cigar.DELETION: \"D\",\n Cigar.SKIP: \"N\",\n Cigar.SOFT_CLIP: \"S\",\n Cigar.HARD_CLIP: \"H\",\n Cigar.PADDING: \"P\",\n Cigar.SEQUENCE_MATCH: \"=\",\n Cigar.SEQUENCE_MISMATCH: \"X\"\n }[flag]\n\n def __add__(self, other):\n assert(isinstance(other, Cigar))\n return Cigar(Cigar.__reduce(self.__cigar + other.__cigar))\n\n @staticmethod\n def join(*cigars):\n return Cigar(\n Cigar.__reduce(\n itertools.chain(\n *\n tuple(\n item.__cigar for item in cigars))))\n\n @staticmethod\n def __reduce(cigar_sequence):\n result = []\n iterator = iter(cigar_sequence)\n try:\n prev_item = next(iterator)\n except StopIteration:\n # no items in iterator\n pass\n else:\n for next_item in iterator:\n if prev_item[0] is next_item[0]:\n # combine\n prev_item = (prev_item[0], prev_item[1] + next_item[1])\n else:\n # append\n result.append(prev_item)\n prev_item = next_item\n result.append(prev_item)\n\n # Cigar items like 0M are technically correct but ugly.\n result = [item for item in result if item[1] > 0]\n return result\n", "id": "1252581", "language": "Python", "matching_score": 1.1632007360458374, "max_stars_count": 8, "path": "python/wecall/bamutils/cigar.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport ast\nimport re\n\n\nclass TimingDataItem(object):\n\n def __init__(self, timing_type, length, length_units, metadata):\n self.timing_type = timing_type\n self.length = length\n self.length_units = length_units\n self.metadata = metadata\n\n def __eq__(self, other):\n return all((\n self.timing_type == other.timing_type,\n self.length == other.length,\n self.length_units == other.length_units,\n self.metadata == other.metadata,\n ))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\ndef log_timing_parser(fp):\n\n log_timing_regex = re.compile(\n \"^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} -- TIMING -- (?P<payload>.*)\\s*$\"\n )\n timing_log_lines = []\n for line in fp:\n match = log_timing_regex.match(line)\n if match:\n timing_log_lines.append(match.group('payload'))\n\n timing_message_regex = re.compile(\n \"^(?P<type>\\w+)\\s+(?P<length>\\d+)(?P<units>\\w+):\\s*\"\n \"(?P<metadata>(?:[a-zA-z][-_0-9a-zA-Z]*=\\\"(?:(?:\\\\\\\")|[^\\\"])*\\\";\\s*)*)$\")\n errors = []\n timing_data = []\n for timing_message in timing_log_lines:\n match = timing_message_regex.match(timing_message)\n if match:\n metadata_item_regex = re.compile(\n '^(?P<key>[a-zA-z][-_0-9a-zA-Z]*)=(?P<value>\\\"(?:(?:\\\\\\\")|[^\\\"])*\\\");$')\n metadata_item_match = metadata_item_regex.match(\n match.group('metadata'))\n\n timing_data.append(TimingDataItem(\n match.group('type'),\n int(match.group('length')),\n match.group('units'),\n {(metadata_item_match.group('key')): ast.literal_eval(metadata_item_match.group('value'))}\n ))\n else:\n errors.append('failed to parse {!r}'.format(timing_message))\n if errors:\n raise Exception(\n 'failed to parse log timings:\\n{!r}'.format(\n '\\n'.join(errors)))\n\n return timing_data\n", "id": "10611559", "language": "Python", "matching_score": 0.5939496755599976, "max_stars_count": 8, "path": "python/wecall/wecall_utils/log_utils.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport re\nimport sys\n\n\ndef latex_encode(unencoded):\n return unencoded \\\n .replace('\\\\', '\\\\textbackslash{}') \\\n .replace('_', '\\\\_') \\\n .replace('#', '\\\\#') \\\n .replace('$', '\\\\$') \\\n .replace('%', '\\\\%') \\\n .replace('&', '\\\\&') \\\n .replace('{', '\\\\{') \\\n .replace('}', '\\\\}') \\\n .replace('\\\\textbackslash\\\\{\\\\}', '\\\\textbackslash{}') \\\n .replace('^', '\\\\textasciicircum{}') \\\n .replace('~', '\\\\textasciitilde{}') \\\n .replace(': ', ':~')\n\n\nclass Section:\n\n def __init__(self, heading, options=None):\n self.heading = heading\n self.options = [] if options is None else options\n\n def __repr__(self):\n return '<{!s}: heading={!r}, len(options)={!r}>'.format(\n type(self).__name__, self.heading, len(self.options)\n )\n\n def __eq__(self, other):\n return all((\n self.heading == other.heading,\n self.options == other.options,\n ))\n\n\nclass Option:\n\n def __init__(self, name, argspec, desc):\n self.name = name\n self.argspec = argspec\n self.desc = desc\n\n def __repr__(self):\n return '<{!s}: name={!r}, argspec={!r}, desc={!r}>'.format(\n type(self).__name__, self.name, self.argspec, self.desc\n )\n\n def __eq__(self, other):\n return all((\n self.name == other.name,\n self.argspec == other.argspec,\n self.desc == other.desc,\n ))\n\n\ndef tokenise_boost_cpp_help(message):\n it = iter(message.split('\\n'))\n next(it) # skip a header line\n tokens = [\n ('empty', re.compile('^$')),\n ('section', re.compile('^(?P<name>.*):$')),\n ('option', re.compile(\n '^ (?P<name>\\S+) (?P<argspec>( ?arg| ?\\[.*?\\]| ?\\(.*?\\))*)(?: +(?P<desc>.*))?$')),\n # intented text will be at least 3 spaces, exact number is\n # data-dependent\n ('indented', re.compile('^ {3,}(?P<text>.*)$')),\n ]\n for line in it:\n for name, regex in tokens:\n match = regex.match(line)\n if match:\n yield name, match\n break\n else:\n raise Exception('failed to match {!r}'.format(line))\n\n\ndef parse_boost_cpp_help(message):\n sections = []\n current_section = None\n current_option = None\n\n for name, match in tokenise_boost_cpp_help(message):\n if name == 'empty':\n continue\n elif name == 'section':\n current_section = Section(match.group('name'))\n sections.append(current_section)\n continue\n elif name == 'option':\n current_option = Option(**match.groupdict())\n current_section.options.append(current_option)\n elif name == 'indented':\n if current_option.desc is None:\n current_option.desc = match.group('text')\n else:\n current_option.desc = ' '.join(\n (current_option.desc, match.group('text')))\n else:\n raise Exception(\n 'unhandled token type {!r}: {!r}'.format(\n name, match.string))\n\n return sections\n\n\ndef section_to_latex(sections):\n section_header = \\\n '\\\\subsection{{{heading}}}\\n' \\\n '\\\\label{{subsection:{label}}}\\n' \\\n '\\\\begin{{tabular}}{{p{{0.3\\\\linewidth}}p{{0.2\\\\linewidth}}p{{0.45\\\\linewidth}}}}\\n' \\\n 'Option & Arguments & Description \\\\\\\\ \\\\hline\\n'\n option_line = '\\\\textbf{{{name}}} & \\\\textit{{{argspec}}} & {desc}\\\\\\\\\\n'\n section_footer = '\\\\end{tabular}'\n return '\\n\\n'.join((\n section_header.format(\n heading=section.heading,\n label=section.heading.lower(),\n ) +\n ''.join((option_line.format(\n name=latex_encode(option.name).replace('-', '{-}'),\n argspec=latex_encode(option.argspec).replace('-', '{-}'),\n desc=latex_encode(option.desc),\n ) for option in section.options)) +\n section_footer\n for section in sections\n ))\n\n\ndef help_to_latex_main(argv):\n message = sys.stdin.read()\n sections = parse_boost_cpp_help(message)\n documentation = section_to_latex(sections)\n print(documentation)\n\n\nif __name__ == '__main__':\n sys.exit(help_to_latex_main(sys.argv[:]))\n", "id": "9484182", "language": "Python", "matching_score": 1.9672995805740356, "max_stars_count": 8, "path": "scripts/help_to_latex.py" }, { "content": "#!/usr/bin/env python\n# All content Copyright (C) 2018 Genomics plc\nfrom __future__ import print_function\nimport sys\nimport re\nimport ast\nimport argparse\nimport os\n\n\nprint('WARNING: {!r} is obsolete.'.format(os.path.abspath(__file__)))\n\n\ndef main(args):\n exe, template_path, target_path, substitutions = parse_args(args)\n with open(template_path, \"r\") as template_fp:\n with open(target_path, \"w\") as target_fp:\n render_template(template_fp, target_fp, substitutions)\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument('input_template', help='Template to be rendered')\n parser.add_argument('output_target', help='Rendered template')\n parser.add_argument(\n 'substitutions',\n nargs=\"+\",\n help=\"Substitutions in the form +Field=Value\")\n exe = args[0]\n parsed_args = parser.parse_args(args[1:])\n substitutions = parse_substitutions(parsed_args.substitutions)\n return exe, parsed_args.input_template, parsed_args.output_target, substitutions\n\n\ndef parse_substitutions(args):\n '''python scripts/renderTemplate a b +a=\" \" +version=1.2.3 +hehe[] $(ls /) \\;g \\; +blah=4\n\n and the example template to go with the above\n\n {hehe:\" \"}\n {blah}\n {version}\n '''\n\n single_value_regex = re.compile(\n '^\\\\+(?P<field>[a-zA-Z_][a-zA-Z0-9_]*)=(?P<value>.*)$', re.S)\n\n multiple_value_regex = re.compile(\n '^\\\\+(?P<field>[a-zA-Z_][a-zA-Z0-9_]*)\\\\[\\\\]$', re.S)\n multiple_value_terminator_regex = re.compile('^;$', re.S)\n\n substitutions = {}\n it = iter(args)\n for token in it:\n single_value_match = single_value_regex.match(token)\n if single_value_match:\n substitutions[single_value_match.group(\n 'field')] = single_value_match.group('value')\n continue\n multiple_value_match = multiple_value_regex.match(token)\n if multiple_value_match:\n array = ArraySubstitution()\n substitutions[multiple_value_match.group('field')] = array\n for array_element_token in it:\n if multiple_value_terminator_regex.match(array_element_token):\n break\n else:\n array += array_element_token\n\n continue\n raise Exception(\n 'unexpected extra argument: {token!r}'.format(\n token=token))\n return substitutions\n\n\nclass ArraySubstitution(object):\n\n def __init__(self):\n self.__data = []\n\n def __repr__(self):\n return('<{name!s}: {data!r}>'.format(name=type(self).__name__, data=self.__data))\n\n # replacement = '{field:foo}'; format_spec = \"foo\"\n def __format__(self, format_spec=None):\n maybeMatch = re.match('^(\".*\")$', format_spec, re.S)\n if maybeMatch:\n separator = ast.literal_eval(maybeMatch.groups()[0])\n else:\n raise Exception(\n \"No match while trying to parse separator out of format_spec\")\n\n return separator.join(self.__data)\n\n def __iadd__(self, element):\n self.__data.append(element)\n return self\n\n def __eq__(self, other):\n return self.__data == other.__data\n\n\ndef render_template(template_fp, target_fp, substitutions):\n print(template_fp.read().format(**substitutions), file=target_fp)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "id": "4677939", "language": "Python", "matching_score": 1.301629900932312, "max_stars_count": 8, "path": "scripts/renderTemplate.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport json\nimport sys\n\n\ndef main(filename, property):\n with open(filename, 'r') as fp:\n props = json.load(fp)\n print(props[property])\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n raise Exception(\n \"Usage:\\n\\t{exe} filename property\".format(\n exe=sys.argv[0]))\n filename = sys.argv[1]\n property = sys.argv[2]\n main(filename, property)\n", "id": "11683468", "language": "Python", "matching_score": 0.011451922357082367, "max_stars_count": 8, "path": "scripts/get-property.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n# -*- coding: utf8 -*-\n\nimport setuptools\n\nsetuptools.setup(\n name=\"wecall\",\n url=\"www.genomicsplc.com\",\n author=\"Genomics\",\n author_email=\"<EMAIL>\",\n description=\"wecall\",\n license=\"Genomics PLC Proprietary License\",\n keywords=\"wecall\",\n packages=setuptools.find_packages(),\n py_modules=['wecall'],\n install_requires=['pysam']\n)\n", "id": "10405501", "language": "Python", "matching_score": 0.3360724151134491, "max_stars_count": 8, "path": "python/setup.py" }, { "content": "#!/cm/local/apps/python3/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: bright\nshort_description: Bright cm\ndescription:\n - Manage Bright Cluster Management entities.\nnotes:\n - This was created mainly to manage Slurm configuration in Bright 9.0, and has been tested mainly for those types of entities, but should theoretically work for almost any Bright settings. It uses the C(pythoncm) Bright interface, so see the Bright Developer documentation for naming and typing conventions.\n - You will likely have to set C(ansible_python_interpreter=/cm/local/apps/python3/bin/python3) if running this on a bright node.\n - Since bright is picky about types, if you use templates to set values, you may want to set the ansible configuration C(jinja2_native) to preserve types.\nauthor:\n - <NAME> (@dylex)\nrequirements:\n - pythoncm\noptions:\n name:\n required: false\n description:\n - The name of the entity to be managed.\n - If omitted, just return a list of entities.\n type: str\n key:\n description:\n - The uniqueKey of the entity to be managed.\n type: int\n type:\n description:\n - The type (using the pythoncm CamelCase name) of the entity to be managed, e.g., C(PhysicalNode), C(JobQueue), etc.\n - Required unless key is specified. If both key and type are specified, both must match.\n state:\n description: Intended state\n choices: [ absent, present ]\n default: present\n clone:\n type: str\n description:\n - When creating an entity C(state=present), clone it from existing entity instead of creating from scratch.\n attrs:\n type: dict\n description:\n - Attributes to set on the entity C(state=present).\n - Referenced entities can be specified by name. Contained entities can be specified by nested dicts, including C(childType) to create specific types.\n - Lists are replaced entirely. Lists of contained entities can be selectived updated by dicts keyed on the name of the entity.\n default: {}\n\"\"\"\n\nEXAMPLES = \"\"\"\n- bright:\n type: SlurmWlmCluster\n name: slurm\n attrs:\n gresTypes: [gpu]\n cgroups:\n constrainCores: true\n vars:\n ansible_python_interpreter: /cm/local/apps/python3/bin/python3\n\n- bright:\n type: SlurmJobQueue\n name: gen\n attrs:\n maxTime: 7-0\n allowAccounts: ALL\n options: [QoS=gen]\n- bright:\n type: ConfigurationOverlay\n name: slurm-client-category\n clone: slurm-client\n attrs:\n categories:\n - category1\n - category2\n roles:\n slurmclient:\n childType: SlurmClientRole\n wlmCluster: slurm\n realMemory: 256000\n coresPerSocket: 20\n sockets: 2\n features: [skylake,ib]\n queues: [gen]\n genericResources:\n - alias: gpu0\n name: gpu\n count: '1'\n file: /dev/nvidia0\n type: v100\n\"\"\"\n\n\nRETURN = \"\"\"\nname:\n description: resolved name of the entity\n type: str\n returned: when entity exists at any point\nkey:\n description: resolved uniqueKey of the entity\n type: int\n returned: when entity exists at any point\ntype:\n description: specific type of entity\n type: str\n returned: when entity exists at any point\nentity:\n description: full entity\n type: dict\n returned: when entity exists at any point\nentities:\n description: all entries of given type\n type: list\n returned: when name is omitted\n\"\"\"\n\nimport traceback\n\nfrom ansible.module_utils.basic import AnsibleModule, missing_required_lib\nfrom ansible.module_utils._text import to_native\n\nCM_IMP_ERR = None\ntry:\n import pythoncm.cluster\n\n HAS_CM = True\nexcept ImportError:\n CM_IMP_ERR = traceback.format_exc()\n HAS_CM = False\n\ndef getitem(l, i):\n try:\n return l[i]\n except IndexError:\n return None\n\nclass Entity(object):\n types = [m for m in dir(pythoncm.entity) if isinstance(getattr(pythoncm.entity, m), type)] if HAS_CM else []\n\n def __init__(self, module):\n self.module = module\n self.state = module.params['state']\n self.name = module.params['name']\n self.key = module.params['key']\n self.type = module.params['type']\n self.clone = module.params['clone']\n self.attrs = module.params['attrs']\n\n def absent(self):\n if not self.entity:\n return\n\n self.result['changed'] = True\n if self.module.check_mode:\n return\n\n r = self.entity.remove()\n if not r.success:\n self.result['failed'] = True\n\n def gettype(self, typ):\n return getattr(pythoncm.entity, typ)\n\n def getentity(self, name, typ):\n e = self.cluster.get_by_name(name, typ)\n if not e:\n raise KeyError(\"%s:%s\"%(typ, name))\n return e\n\n def makeentity(self, cur, val, field, name=None):\n from pythoncm.entity.meta_data import MetaData\n try:\n MetaData = MetaData.Type\n except AttributeError:\n pass\n\n if val is None:\n return\n\n elif field.kind == MetaData.RESOLVE:\n if type(val) is not str:\n raise TypeError('Expected %s name, not %r'%(field.instance, val))\n return self.getentity(val, field.instance)\n\n elif field.kind == MetaData.ENTITY:\n if type(val) is str:\n val = {'name':val}\n if type(val) is not dict:\n raise TypeError('Expected %s attributes, not %r'%(field.instance, val))\n if not cur:\n cur = self.gettype(val.get('childType', val.get('baseType', field.instance)))(cluster = self.cluster)\n self.changed.add(cur)\n if name and hasattr(cur, 'name'):\n cur.name = name\n self.setentity(cur, val)\n return cur\n\n def setentity(self, ent, src):\n fields = {f.name: f for f in ent.fields()}\n for k, v in src.items():\n c = getattr(ent, k)\n f = fields[k]\n if f.instance:\n if f.vector:\n if type(v) is list:\n v = [self.makeentity(getitem(c, i), x, f) for (i, x) in enumerate(v)]\n elif type(v) is dict:\n l = c.copy()\n for n, e in v.items():\n try:\n i = next(i for (i, x) in enumerate(l) if x and x.name == n)\n except StopIteration:\n i = len(l)\n l.append(None)\n l[i] = self.makeentity(l[i], e, f, n)\n v = l\n elif type(v) is str and issubclass(self.gettype(f.instance), pythoncm.entity.Device):\n from pythoncm.device_selection import DeviceSelection\n d = DeviceSelection(self.cluster)\n #d.add_devices_in_text_range(v, True)\n d.add_devices(pythoncm.namerange.expand.Expand.expand(v), True)\n v = d.get_sorted_by_name()\n else:\n raise TypeError('%s: expected %s list'%(k, f.instance))\n else:\n v = self.makeentity(c, v, f)\n if c != v:\n if f.readonly:\n raise PermissionError(\"%s is readonly\"%(k))\n self.changed.add(ent)\n setattr(ent, k, v)\n\n def present(self):\n self.changed = set()\n\n if not self.entity:\n if self.clone:\n clone = self.getentity(self.clone, self.type)\n self.entity = clone.clone()\n else:\n self.entity = self.gettype(self.type)(cluster = self.cluster)\n self.changed.add(self.entity)\n if hasattr(self.entity, 'name'):\n self.entity.name = self.name\n\n self.setentity(self.entity, self.attrs)\n\n if self.changed:\n self.result['changed'] = True\n err = self.entity.check()\n if err:\n self.result['failed'] = True\n self.result['msg'] = err\n elif not self.module.check_mode:\n res = self.entity.commit(wait_for_remote_update=True)\n if not res.good:\n self.result['failed'] = True\n self.result['msg'] = str(res)\n\n def run(self):\n self.cluster = pythoncm.cluster.Cluster() # TODO: settings\n if self.key is not None:\n self.entity = self.cluster.get_by_key(self.key)\n if self.type and self.entity.baseType != self.type and self.entity.childType != self.type:\n return {'failed': True, 'msg': 'key/type mismatch'}\n elif self.name is not None:\n self.entity = self.cluster.get_by_name(self.name, self.type)\n else:\n l = self.cluster.get_by_type(self.gettype(self.type))\n return {'entities': [e.to_dict() for e in l]}\n\n self.result = {}\n try:\n getattr(self, self.state)()\n except Exception as e:\n self.result['failed'] = True\n self.result['msg'] = to_native(e)\n if self.entity:\n self.result['entity'] = self.entity.to_dict()\n self.result['name'] = self.entity.resolve_name\n self.result['key'] = self.entity.uniqueKey\n self.result['type'] = self.entity.childType or self.entity.baseType\n return self.result\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='str'),\n key=dict(type='int'),\n type=dict(type='str', choices=Entity.types if HAS_CM else None),\n state=dict(type='str', default='present', choices=['absent','present']),\n clone=dict(type='str'),\n attrs=dict(type='dict', default={}),\n ),\n mutually_exclusive=[('name','key')],\n required_one_of=[('type','key')],\n supports_check_mode=True,\n )\n\n if not HAS_CM:\n module.fail_json(msg=missing_required_lib('pythoncm'),\n exception=CM_IMP_ERR)\n\n result = Entity(module).run()\n module.exit_json(**result)\n\nif __name__ == '__main__':\n main()\n", "id": "1970289", "language": "Python", "matching_score": 4.098174095153809, "max_stars_count": 0, "path": "bright.py" }, { "content": "#!/usr/bin/python\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: slurm\nshort_description: Manage slurm clusters\nauthor: <NAME> (@dylex)\ndescription:\n - Provide an interface to sacctmgr, mostly mirrors the command-line interface\noptions:\n state:\n description:\n - The action to take, either add/modify, delete, or list.\n - Equivalent to the first argument to sacctmgr.\n choices: [\"present\", \"absent\", \"list\"]\n default: present or list\n entity:\n description:\n - The type of entity to list or modify.\n - Equivalent to the second argument to sacctmgr.\n - To manipulate associations, specify \"parent=\" with account, \"account=\" with user, \"cluster=\" with resource, or \"account=\" with transactions.\n required: true\n choices: [\"cluster\", \"qos\", \"resource\", \"account\", \"user\", \"events\", \"reservation\", \"transactions\", \"tres\", \"wckey\"]\n name:\n description:\n - The name of the entity to modify, for cluster, qos, resource, account, user, reservation, tres, or wckey\n required: false\n args:\n type: dict\n description:\n - Other arguments are the same as to sacctmgr, except all are lower-case.\n - Rather than WithClusters or WithAssoc, if you specify \"parent=\", \"account=\", or \"cluster=\" they will be inferred.\n - For some arguments, sacctmgr may report values in a different format than it accepts them. In this case, you can specify a dict with C(set) as the value to set, and C(test) as the value to compare against.\n - For TRES values, like sacctmgr, you must explicitly set C(res=-1) to clear resource contraints. These will be ignored when comparing.\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: create slurm qos\n hosts: slurm\n slurm:\n entity: qos\n state: present\n name: defq\n args:\n priority: 10\n maxwall: 7-00:00:00\n grptres: node=1000,mem=10000000M\n maxtresperuser: cpu=1000,node=-1\n gracetime:\n set: 60\n test: 00:01:00\n\n- name: list all user associations\n hosts: slurm\n slurm:\n entity: user\n state: list\n args:\n account: ''\n\n- name: create slurm user association\n hosts: slurm\n slurm:\n entity: user\n state: present\n name: {{user}}\n args:\n account: {{slurm_account}}\n\"\"\"\n\nRETURN = \"\"\"\nentity_type:\n description: the list of matching entities, before any actions are taken\n type: list\n returned: always\n\"\"\"\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nclass Args(list):\n \"\"\"A special list for slurm command line key=value arguments.\"\"\"\n def add(self, field, value=None):\n self.append(field + ('=' + str(value) if value is not None else ''))\n\nclass Parser(object):\n \"\"\"Generic argument parser\"\"\"\n def editable(self):\n return False\n\n def format(self, sacctmgr):\n pass\n\n def parse(self, sacctmgr):\n pass\n\n def sets(self, sacctmgr):\n pass\n\nclass Param(Parser):\n \"\"\"Single argument parameter\"\"\"\n def __init__(self, name):\n self.name = name.lower()\n\n def parse(self, sacctmgr):\n self.val = sacctmgr.params.pop(self.name, None)\n\n def set(self, sacctmgr, val):\n sacctmgr.sets.add(self.name, val)\n\nclass Fmt(Param):\n \"\"\"Parameter that can also be read\"\"\"\n def __init__(self, name, fmt=None):\n super(Fmt, self).__init__(name)\n self.fmt = fmt.lower() if fmt else self.name\n\n def format(self, sacctmgr):\n sacctmgr.format.append(self.fmt)\n\n def parse(self, sacctmgr):\n super(Fmt, self).parse(sacctmgr)\n\n def cur(self, sacctmgr):\n return [r[self.name] for r in sacctmgr.cur]\n\nclass RO(Fmt):\n \"\"\"Parameter that can only be read\"\"\"\n pass\n\nclass Filt(Param):\n \"\"\"Parameter that can only filter results\"\"\"\n def parse(self, sacctmgr):\n super(Filt, self).parse(sacctmgr)\n if self.val:\n sacctmgr.keys.append(self.fmt)\n sacctmgr.filter.add(self.name, self.val)\n\nclass RF(RO, Filt):\n \"\"\"Parameter that can read and filter results\"\"\"\n def parse(self, sacctmgr):\n super(RF, self).parse(sacctmgr)\n\nclass Key(RF):\n \"\"\"Required, primary key parameter\"\"\"\n def editable(self):\n return True\n\n def parse(self, sacctmgr):\n super(Key, self).parse(sacctmgr)\n if not self.val and sacctmgr.state != 'list':\n sacctmgr.fail('missing required argument: %s' % self.name)\n\nclass RW(Fmt):\n \"\"\"Parameter that can be read and modified\"\"\"\n def editable(self):\n return True\n\n def eq(self, val):\n return str(self.val) == val\n\n def parse(self, sacctmgr):\n if sacctmgr.state == 'present':\n super(RW, self).parse(sacctmgr)\n if type(self.val) is dict:\n self.set_val = self.val['set']\n self.val = self.val['test']\n else:\n self.set_val = self.val\n elif sacctmgr.state == 'absent':\n sacctmgr.params.pop(self.name, None)\n\n def sets(self, sacctmgr):\n if self.set_val is None:\n return\n cur = self.cur(sacctmgr)\n if not cur or not all(map(self.eq, cur)):\n self.set(sacctmgr, self.set_val)\n\nclass RWSet(RW):\n def eq(self, val):\n return set(self.val.split(',')) == set(val.split(','))\n\nclass TRES(RWSet):\n def parse(self, sacctmgr):\n super(TRES, self).parse(sacctmgr)\n try:\n self.val = ','.join(v for v in self.val.split(',') if not v.endswith('=-1'))\n except AttributeError:\n pass\n\nclass Act(Param):\n \"\"\"Parameter that causes an action\"\"\"\n def parse(self, sacctmgr):\n if sacctmgr.state == 'present':\n super(Act, self).parse(sacctmgr)\n elif sacctmgr.state == 'absent':\n sacctmgr.params.pop(self.name, None)\n\n def sets(self, sacctmgr):\n if self.val is None:\n return\n if sacctmgr.cur:\n self.set(sacctmgr, self.val)\n\nclass List(Parser):\n \"\"\"Set of parameters\"\"\"\n def __init__(self, *l):\n self.list = l\n\n def editable(self):\n return self.list[0].editable()\n\n def format(self, sacctmgr):\n for p in self.list:\n p.format(sacctmgr)\n\n def parse(self, sacctmgr):\n for p in self.list:\n p.parse(sacctmgr)\n\n def sets(self, sacctmgr):\n for p in self.list:\n p.sets(sacctmgr)\n\nclass With(Parser):\n \"\"\"Parameters dependent on a With* argument, only supplied if the given key parameter is\"\"\"\n def __init__(self, w, k, *l):\n self.args = w\n self.key = k\n self.sub = List(*l)\n\n def parse(self, sacctmgr):\n self.key.format(sacctmgr)\n self.key.parse(sacctmgr)\n if self.key.val is not None:\n sacctmgr.args.append(self.args)\n self.sub.format(sacctmgr)\n self.sub.parse(sacctmgr)\n\n def sets(self, sacctmgr):\n if self.key.val is not None:\n self.sub.sets(sacctmgr)\n\nclass Opt(Param):\n \"\"\"Optional flag controlling list results\"\"\"\n def parse(self, sacctmgr):\n if sacctmgr.state != 'list':\n return\n super(Opt, self).parse(sacctmgr)\n if self.val is None:\n return\n try:\n self.val = sacctmgr.module._check_type_bool(self.val)\n except (TypeError, ValueError):\n sacctmgr.fail(msg=\"argument %s could not be converted to bool\" % self.name)\n if self.val:\n sacctmgr.args.append(self.name)\n\nENTITIES = dict(\n cluster = List(Key('Name', 'Cluster'), RF('Classification'),\n RW('DefaultQOS'), RO('Flags'), RO('RPC'), RW('QosLevel'), RW('Fairshare'), TRES('GrpTRES'), RW('GrpJobs'), RW('GrpMemory'), RW('GrpNodes'), RW('GrpSubmitJob'), RW('MaxTRESMins'), RW('MaxJobs'), RW('MaxNodes'), RW('MaxSubmitJobs'), RW('MaxWall')),\n qos = List(Key('Name'),\n RW('Description'), RO('Id'), RW('PreemptMode'), RW('Flags'), RW('GraceTime'), RW('GrpJobs'), RW('GrpSubmitJob'), RW('GrpTRESMins'), TRES('GrpTRES'), RW('GrpWall'), RW('MaxTRESMins'), TRES('MaxTRESPerJob'), TRES('MaxTRESPerNode'), TRES('MaxTRESPerUser'), RW('MaxJobs'), RW('MaxSubmitJobsPerUser'), RW('MaxWall'), RW('Preempt'), RW('Priority'), RW('UsageFactor'), RW('UsageThreshold'),\n Act('RawUsage'), Opt('WithDeleted'),\n TRES('MaxTRES'), RW('MaxJobsPerUser'), TRES('MinTRESPerJob')),\n resource = List(Key('Name'),\n RW('Description'), RW('Count'), RW('Flags'), RO('Id'), RW('ServerType'), RW('Server'), RW('Type'),\n With('WithClusters', RF('Cluster'), RW('PercentAllowed', 'Allocated'))),\n account = List(Key('Name', 'Account'),\n RW('Description'), RW('Organization'),\n With('WithAssoc', RF('Parent', 'ParentName'), RF('Cluster'), RW('DefaultQOS'), RW('QOSLevel'), RW('Fairshare'), RW('GrpTRESMins'), RW('GrpTRESRunMins'), TRES('GrpTRES'), RW('GrpJobs'), RW('GrpMemory'), RW('GrpNodes'), RW('GrpSubmitJob'), RW('GrpWall'), RW('MaxTRESMins'), TRES('MaxTRES'), RW('MaxJobs'), RW('MaxNodes'), RW('MaxSubmitJobs'), RW('MaxWall'),\n Act('RawUsage')),\n Opt('WithDeleted')),\n user = List(Key('Name', 'User'),\n RW('DefaultAccount'), RW('AdminLevel'),\n With('WithAssoc', RF('Account'), RF('Cluster'), RF('Partition'), RW('DefaultQOS'), RW('DefaultWCKey'), RWSet('QosLevel'), RW('Fairshare'), RW('MaxTRESMins'), TRES('MaxTRES'), RW('MaxJobs'), RW('MaxNodes'), RW('MaxSubmitJobs'), RW('MaxWall'),\n Act('RawUsage')),\n Opt('WithDeleted')),\n events = List(RF('Cluster'), RF('Nodes', 'ClusterNodes'), RF('Start'), RF('End'), RF('State'), RF('Reason'), RF('User'), RF('Event'), RO('CPUCount'), RO('Duration'),\n Filt('Start'), Filt('End'), Filt('MaxCPUs'), Filt('MinCPUs'), Opt('All_Clusters'), Opt('All_Time')),\n reservation = List(RF('Name'), RF('Cluster'), RO('TRES'), RF('Start'), RF('End'), RF('ID'),\n Filt('Nodes')),\n transactions = List(RO('Time'), RF('Action'), RF('Actor'), RO('Where'), RO('Info'), RF('Cluster'), RF('ID'),\n With('WithAssoc', RF('Account'), RF('User'))),\n tres = List(RF('Name'), RF('Type'), RF('ID'),\n Opt('WithDeleted')),\n wckey = List(RF('Name'), RF('Cluster'), RF('User'), RF('ID'),\n Filt('End'), Filt('Start'), Opt('WithDeleted')),\n)\n\nclass SAcctMgr(object):\n def __init__(self):\n self.module = AnsibleModule(\n argument_spec = dict(\n state = dict(choices=['present', 'absent', 'list']),\n entity = dict(required=True, choices=ENTITIES.keys()),\n name = dict(type='str'),\n args = dict(type='dict', default={}),\n ),\n supports_check_mode = True\n )\n self.bin = self.module.get_bin_path('sacctmgr', True, ['/opt/slurm/bin', '/cm/shared/apps/slurm/current/bin'])\n self.entity = self.module.params['entity']\n self.params = self.module.params['args']\n try:\n self.params['name'] = self.module.params['name']\n except KeyError:\n pass\n\n self.result = {}\n self.format = []\n self.keys = []\n self.filter = Args()\n self.args = Args()\n self.sets = Args()\n\n def exit(self, **args):\n for (k, v) in args:\n self.result[k] = v\n self.module.exit_json(**self.result)\n\n def fail(self, msg):\n self.result['msg'] = msg\n self.module.fail_json(**self.result)\n\n def change(self):\n \"\"\"Register a change and possibly exit in check mode.\"\"\"\n self.result['changed'] = True\n if self.module.check_mode:\n self.exit()\n \n def cmd(self, readonly, *args):\n cmd = [self.bin]\n if readonly:\n cmd.append('-r')\n else:\n #self.fail(\" \".join(args))\n cmd.append('-i')\n self.change()\n cmd.extend(args)\n (_, o, e) = self.module.run_command(cmd, check_rc=True)\n if e != '':\n self.fail(e)\n return o\n\n def list(self):\n \"\"\"Parse the output of a list command.\"\"\"\n cmd = ['-nP', 'list', self.entity, 'format=' + ','.join(self.format)] + self.args + self.filter\n l = [r.split('|') for r in self.cmd(True, *cmd).splitlines()]\n n = len(self.format)\n if any(len(r) != n for r in l):\n self.fail('unexpected list output for %s' % self.entity)\n #return list(filter(lambda d: all(d[k] for k in self.keys), map(lambda r: dict(zip(self.format, r)), l)))\n return [d for d in (dict(zip(self.format, r)) for r in l) if all(d[k] for k in self.keys)]\n\n def create(self):\n cmd = ['add', self.entity] + self.filter + self.sets\n self.cmd(False, *cmd)\n\n def modify(self):\n cmd = ['modify', self.entity] + self.filter + [\"set\"] + self.sets\n self.cmd(False, *cmd)\n\n def delete(self):\n cmd = ['delete', self.entity] + self.filter\n self.cmd(False, *cmd)\n\n def main(self):\n parser = ENTITIES[self.entity]\n editable = parser.editable()\n self.state = self.module.params.get('state') or ('present' if editable else 'list')\n if not editable and self.state != 'list':\n self.fail('cannot set state=%s for %s' % (self.state, self.entity))\n parser.format(self)\n parser.parse(self)\n if self.params:\n self.fail('unhandled arguments: %s' % ','.join(self.params.iterkeys()))\n self.cur = self.list()\n self.result[self.entity] = self.cur\n if self.state == 'list':\n pass\n elif self.state == 'present':\n parser.sets(self)\n if not self.cur:\n self.create()\n elif self.sets:\n self.modify()\n elif self.state == 'absent':\n if self.cur:\n self.delete()\n self.exit()\n\nif __name__ == '__main__':\n SAcctMgr().main()\n", "id": "5349439", "language": "Python", "matching_score": 2.079416513442993, "max_stars_count": 9, "path": "slurm.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n# -*- coding: utf8 -*-\n\nimport logging\nfrom math import factorial\n\nfrom wecall.common.exceptions import weCallException\nfrom wecall.vcfutils.stringutils import from_vcf_str\n\nlogger = logging.getLogger(__name__)\n\n# Module level constants\nUNKNOWN = '.'\n\n\ndef n_choose_k(n, k):\n return int(factorial(n) / factorial(k) / factorial(n - k))\n\n\ndef _parse_flag(value):\n \"\"\"\n Parses a 'flag' info field. If flag is used as a\n proper flag the value is None and it is assumed that\n that means True. Missing flag is unclear and hence not parsed.\n \"\"\"\n if value == UNKNOWN:\n return None\n else:\n if value is None:\n return True\n if isinstance(value, bool):\n return value\n value = value.upper()\n if value in {'1', 'YES', 'TRUE'}:\n return True\n elif value in {'0', 'NO', 'FALSE'}:\n return False\n else:\n # For strict VCF parsing configure parser to throw on log warnings.\n # TODO: Work out how to configure logger to do this.\n logging.warning(\"Invalid flag {}\".format(value))\n raise weCallException(\"Invalid flag {}\".format(value))\n\n\ndef make_optional(cls, default):\n def optional(value):\n if value is '.':\n return None\n else:\n try:\n return cls(value)\n except ValueError as ex:\n logger.warn(\n '{} - using {} as default value'.format(ex, default))\n return default\n return optional\n\n\nDATATYPE_MAPPING = {\n \"Integer\": make_optional(int, 0),\n \"Float\": make_optional(float, 0.0),\n \"Flag\": _parse_flag,\n \"Character\": make_optional(str, ''),\n \"String\": make_optional(str, ''),\n}\n\n\ndef parse_cardinality_A(values, number_alts, number_field, number_genotypes):\n if values and len(values) != number_alts:\n logger.warn(\n \"Incorrect num ALT values {} instead of {}\".format(\n len(values), number_alts))\n\n # If data missing for Allele in VCF mark it as unknown.\n extracted_data = [[value] for value in values]\n extracted_data.extend([UNKNOWN] * (number_alts - len(extracted_data)))\n return extracted_data\n\n\ndef parse_cardinality_R(values, number_alts, number_field, number_genotypes):\n assert len(values) == number_alts + 1\n return [[values[0], value] for value in values[1:]]\n\n\ndef parse_cardinality_G(values, number_alts, number_field, number_genotypes):\n assert len(values) == number_genotypes\n return [[value] for value in values]\n\n\ndef parse_unknown_cardinality(\n values,\n number_alts,\n number_field,\n number_genotypes):\n return [values for _ in range(number_alts)]\n\n\ndef parse_cardinality(values, number_alts, number_field, number_genotypes):\n if number_field == '0':\n assert values == [True]\n else:\n number = from_vcf_str(number_field, int)\n if len(values) != number:\n logger.debug(\n \"Incorrect num values {} instead of {}\".format(\n len(values), number))\n\n return parse_unknown_cardinality(\n values, number_alts, number_field, number_genotypes)\n\n\nCARDINALITY_MAPPING = {\n 'A': parse_cardinality_A,\n 'R': parse_cardinality_R,\n 'G': parse_cardinality_G,\n UNKNOWN: parse_unknown_cardinality\n}\n\n\ndef pad_vcf_data(parsed_data, expected_number_items):\n if len(parsed_data) != expected_number_items:\n logger.warn(\n 'expected {} items in {!r}'.format(\n expected_number_items,\n parsed_data))\n parsed_data.extend(\n [None] for index in range(\n expected_number_items -\n len(parsed_data)))\n return parsed_data\n\n\ndef make_split_info_alt_func(cardinality):\n if cardinality == 'A':\n def split_alts(data, n_alts):\n return pad_vcf_data([[value] for value in data], n_alts)\n elif cardinality == 'R':\n def split_alts(data, n_alts):\n ref = data[0]\n return pad_vcf_data([[ref, alt] for alt in data[1:]], n_alts)\n else:\n def split_alts(data, n_alts):\n return [[item for item in data] for _ in range(n_alts)]\n return split_alts\n\n\n# This is to work out the position of genotype likelihood, see VCF spec\ndef choose_gl_position_for_diploid(first_allele_pos, second_allele_pos):\n return int(second_allele_pos * (second_allele_pos + 1) / 2) + \\\n first_allele_pos\n\n\ndef make_split_sample_alt_func(cardinality, parser):\n if cardinality == 'A':\n def split_alts(data, n_alts, gt):\n return pad_vcf_data([[parser(value)] for value in data], n_alts)\n elif cardinality == 'R':\n def split_alts(data, n_alts, gt):\n ref = parser(data[0])\n return pad_vcf_data([[ref, parser(alt)]\n for alt in data[1:]], n_alts)\n elif cardinality == 'G':\n def split_alts(data, n_alts, gt):\n if gt is None:\n logger.warn('Unknown ploidy when parsing genotype likelihood')\n return [data for _ in range(n_alts)]\n\n if len(gt) not in {1, 2}:\n logger.warn(\n \"Unable to handle ploidy other than haploid or diploid.\")\n return [data for _ in range(n_alts)]\n\n expected_number_of_genotype_likelihoods = n_choose_k(\n len(gt) + n_alts, n_alts)\n\n if len(data) == expected_number_of_genotype_likelihoods:\n if len(gt) == 1:\n return [[parser(data[0]), parser(data[1 + index])]\n for index in range(n_alts)]\n elif len(gt) == 2:\n return [\n [\n parser(data[0]),\n parser(data[choose_gl_position_for_diploid(0, 1 + index)]),\n parser(data[choose_gl_position_for_diploid(1 + index, 1 + index)])\n ]\n for index in range(int(n_alts))\n ]\n else:\n logger.warn(\n \"Incorrect number of values 'G' cardinality, expected {}, got {}\".format(\n expected_number_of_genotype_likelihoods, len(data)))\n if len(gt) == 1:\n return [[None, None] for _ in range(n_alts)]\n elif len(gt) == 2:\n return [[None, None, None] for _ in range(n_alts)]\n else:\n def split_alts(data, n_alts, gt):\n return [[parser(item) for item in data] for _ in range(n_alts)]\n return split_alts\n\n\nclass InfoMetadata(object):\n\n def __init__(\n self,\n number,\n data_type,\n description,\n source=None,\n version=None):\n self.number = number\n self.data_type = data_type\n self.description = description\n self.source = source\n self.version = version\n self.parser = FieldMetadata(\n None, number, data_type, description, source, version)\n self.split_alts = make_split_info_alt_func(number)\n\n def __repr__(self):\n data_items = (\n \"number={!r}\".format(self.number),\n \"data_type={!r}\".format(self.data_type),\n \"description={!r}\".format(self.description),\n \"source={!r}\".format(self.source),\n \"version={!r}\".format(self.version),\n )\n return \"<{!s}: {!s}>\".format(\n type(self).__name__, \", \".join(data_items))\n\n def __eq__(self, other):\n return all((\n self.number == other.number,\n self.data_type == other.data_type,\n self.description == other.description,\n self.source == other.source,\n self.version == other.version,\n ))\n\n\nclass SampleMetadata(object):\n\n def __init__(self, number, data_type, description):\n self.number = number\n self.data_type = data_type\n self.description = description\n self.parser = FieldMetadata(None, number, data_type, description)\n self.split_alts = make_split_sample_alt_func(\n number, self.parser.parse_func)\n\n def __repr__(self):\n data_items = (\n \"number={!r}\".format(self.number),\n \"data_type={!r}\".format(self.data_type),\n \"description={!r}\".format(self.description),\n )\n return \"<{!s}: {!s}>\".format(\n type(self).__name__, \", \".join(data_items))\n\n def __eq__(self, other):\n return all((\n self.number == other.number,\n self.data_type == other.data_type,\n self.description == other.description,\n ))\n\n\nclass FilterMetadata(object):\n\n def __init__(self, description):\n self.description = description\n\n def __eq__(self, other):\n return all((\n self.description == other.description,\n ))\n\n def __repr__(self):\n return '<{cls}: {val}>'.format(\n cls=type(self).__name__,\n val='{key}={value!r}'.format(\n key='description',\n value=self.description))\n\n\nclass ContigMetadata(object):\n\n def __init__(self, length=None):\n self.length = length\n\n def __eq__(self, other):\n return all((\n self.length == other.length,\n ))\n\n def __repr__(self):\n return '<{cls}: {val}>'.format(\n cls=type(self).__name__,\n val='{key}={value!r}'.format(\n key='length',\n value=self.length))\n\n\nclass AdapterMetadata(object):\n\n def __init__(self, adapter, hash, date):\n self.adapter = adapter\n self.hash = hash\n self.date = date\n\n def __eq__(self, other):\n return all((\n self.adapter == other.adapter,\n self.hash == other.hash,\n self.date == other.date,\n ))\n\n def __repr__(self):\n return '<{cls}: {val}>'.format(\n cls=type(self).__name__, val='; '.join((\n '{key}={value!r}'.format(key=key, value=value) for key, value in [\n ('adapter', self.adapter),\n ('hash', self.hash),\n ('date', self.date),\n ]\n ))\n )\n\n\nclass FieldMetadata(object):\n \"\"\"\n A class corresponding to either \"##FORMAT\" or \"##INFO\"\n line in the vcf header\n \"\"\"\n\n # TODO: split into type-specific parsers, improving speed by simplifying\n # code paths\n\n def __init__(\n self,\n name,\n number_field,\n data_type_field,\n description,\n source=None,\n version=None):\n self.name = name\n self.number_field = number_field\n self.cardinality_func = CARDINALITY_MAPPING.get(\n number_field, parse_cardinality)\n self.data_type_field = data_type_field\n self.parse_func = DATATYPE_MAPPING[data_type_field]\n self.description = description\n self.source = source\n self.version = version\n\n def __eq__(self, other):\n return (\n self.name == other.name and\n self.number_field == other.number_field and\n self.cardinality_func == other.cardinality_func and\n self.data_type_field == other.data_type_field and\n self.parse_func == other.parse_func and\n self.description == other.description and\n self.source == other.source and\n self.version == other.version\n )\n\n def __str__(self):\n remainder = \"\"\n if self.source:\n remainder += \",Source=\\\"{}\\\"\".format(self.source)\n if self.version:\n remainder += \",Version=\\\"{}\\\"\".format(self.version)\n return \"<ID={},Number={},Type={},Description=\\\"{}\\\"{}>\".format(\n self.name,\n self.number_field,\n self.data_type_field,\n self.description,\n remainder\n )\n\n def __call__(self, value):\n return self.parse_func(value)\n\n def extract_data(self, field, number_alts, number_genotypes):\n if field is None:\n return [self.parse_func(field)]\n elif field == '.':\n return []\n else:\n values = []\n for value in field.split(\",\"):\n if value:\n try:\n values.append(\n self.parse_func(value) if value != UNKNOWN else None)\n # Tolerate basic faults from badly-formatted header/VCF\n except ValueError as e:\n logging.warn(e.message)\n values.append(None)\n return self.cardinality_func(\n values, number_alts, self.number_field, number_genotypes)\n", "id": "3785599", "language": "Python", "matching_score": 3.7312355041503906, "max_stars_count": 8, "path": "python/wecall/vcfutils/fieldmetadata.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport unittest\n\nimport testfixtures\n\nfrom wecall.vcfutils import fieldmetadata\nfrom wecall.common.exceptions import weCallException\nfrom wecall.vcfutils.fieldmetadata import make_split_sample_alt_func\nfrom wecall.vcfutils.genotype_call import GenotypeCall\n\n\nclass VCFFieldMetadataTest(unittest.TestCase):\n\n def test_parse_float_field(self):\n field = fieldmetadata.FieldMetadata(\n 'PP', 'A', 'Float', 'Posterior probability')\n self.assertEqual(field.name, \"PP\")\n self.assertEqual(field.description, \"Posterior probability\")\n self.assertEqual(field.source, None)\n self.assertEqual(field.version, None)\n self.assertEqual(field.extract_data(\"12.3\", 1, None), [[12.3]])\n self.assertEqual(\n field.extract_data(\n \"12.3,78.9\", 2, None), [\n [12.3], [78.9]])\n\n def test_parse_int_field(self):\n field = fieldmetadata.FieldMetadata(\n 'TCR',\n '1',\n 'Integer',\n 'Total reverse, strand coverage at this locus',\n 'Blah',\n '0.3.0')\n self.assertEqual(field.name, \"TCR\")\n self.assertEqual(\n field.description,\n \"Total reverse, strand coverage at this locus\")\n self.assertEqual(field.source, \"Blah\")\n self.assertEqual(field.version, \"0.3.0\")\n self.assertEqual(field.extract_data(\"12\", 1, None), [[12]])\n self.assertEqual(field.extract_data(\"12\", 2, None), [[12], [12]])\n\n def test_parse_field_of_length_R(self):\n field = fieldmetadata.FieldMetadata(None, 'R', 'Integer', None)\n self.assertEqual(field.extract_data(\"12,13\", 1, None), [[12, 13]])\n self.assertEqual(field.extract_data(\n \"12,13,14\", 2, None), [[12, 13], [12, 14]])\n with self.assertRaises(AssertionError):\n field.extract_data(\"12,78\", 2, None)\n\n def test_parse_field_of_length_G(self):\n field = fieldmetadata.FieldMetadata(None, 'G', 'Integer', None)\n self.assertEqual(field.extract_data(\"12,13\", None, 2), [[12], [13]])\n self.assertEqual(\n field.extract_data(\n \"12,13,14\", None, 3), [\n [12], [13], [14]])\n with self.assertRaises(AssertionError):\n field.extract_data(\"12,78\", None, 1)\n\n def test_parse_field_of_unknown_number(self):\n field = fieldmetadata.FieldMetadata(None, '.', 'Integer', None)\n self.assertEqual(field.extract_data(\"12,13\", 1, None), [[12, 13]])\n\n def test_parse_flag_field(self):\n field = fieldmetadata.FieldMetadata(None, '.', 'Flag', None)\n self.assertEqual(True, field('1'))\n self.assertEqual(True, field('YES'))\n self.assertEqual(True, field('TRUE'))\n self.assertEqual(False, field('FALSE'))\n with self.assertRaises(weCallException):\n field(\"blah\")\n\n\nclass TestMakeSplitSampleAltFunc(unittest.TestCase):\n def test_split_genotype_likelihood_with_correct_number_of_genotypes_haploid(self):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual([[1.0, 2.0]], split_func(\n [1.0, 2.0], 1, GenotypeCall(\"0\")))\n\n def test_split_genotype_likelihood_with_correct_number_of_genotypes_diploid(self):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual([[1.0, 2.0, 3.0]], split_func(\n [1.0, 2.0, 3.0], 1, GenotypeCall(\"0/1\")))\n\n @testfixtures.log_capture()\n def test_split_genotype_likelihood_with_missing_genotype_likelihood_haploid(self, log):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual(\n [[None, None], [None, None]],\n split_func([1.0, 2.0], 2, GenotypeCall(\"0\"))\n )\n log.check(\n ('wecall.vcfutils.fieldmetadata', 'WARNING',\n \"Incorrect number of values 'G' cardinality, expected 3, got 2\"),\n )\n\n @testfixtures.log_capture()\n def test_split_genotype_likelihood_with_missing_genotype_likelihood_diploid(self, log):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual(\n [[None, None, None], [None, None, None]],\n split_func([1.0, 2.0, 3.0], 2, GenotypeCall(\"0/1\"))\n )\n log.check(\n ('wecall.vcfutils.fieldmetadata', 'WARNING',\n \"Incorrect number of values 'G' cardinality, expected 6, got 3\"),\n )\n\n def test_split_genotype_likelihood_with_correct_number_of_genotypes_haploid_multi_allelic(self):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual(\n [[1.0, 2.0], [1.0, 3.0]],\n split_func([1.0, 2.0, 3.0], 2, GenotypeCall(\"0\"))\n )\n\n def test_split_genotype_likelihood_with_correct_number_of_genotypes_diploid_multi_allelic(self):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual(\n [[1.0, 2.0, 3.0], [1.0, 4.0, 6.0]],\n split_func([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], 2, GenotypeCall(\"0/1\"))\n )\n\n @testfixtures.log_capture()\n def test_split_genotype_likelihood_warns_for_no_genotype(self, log):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual(\n [[1.0, 2.0], [1.0, 2.0]],\n split_func([1.0, 2.0], 2, None)\n )\n log.check(\n ('wecall.vcfutils.fieldmetadata', 'WARNING',\n \"Unknown ploidy when parsing genotype likelihood\"),\n )\n\n @testfixtures.log_capture()\n def test_split_genotype_likelihood_warns_for_non_haploid_diploid(self, log):\n split_func = make_split_sample_alt_func(\"G\", lambda x: x)\n self.assertEqual(\n [[1.0, 2.0], [1.0, 2.0]],\n split_func([1.0, 2.0], 2, GenotypeCall(\"0/1/2\"))\n )\n log.check(\n ('wecall.vcfutils.fieldmetadata', 'WARNING',\n \"Unable to handle ploidy other than haploid or diploid.\"),\n )\n", "id": "10598119", "language": "Python", "matching_score": 2.1388540267944336, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_fieldmetadata.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport unittest\nfrom wecall.vcfutils.genotype_call import GenotypeCall, merge_genotype_calls\nfrom wecall.vcfutils.sample_data import SampleData\n\n\nclass TestGenotypeCall(unittest.TestCase):\n def test_should_parse_alleles_into_sorted_list(self):\n self.assertEqual([None, 1, 2], GenotypeCall(\"2/./1\").alleles)\n\n def test_should_report_when_genotype_is_unknown(self):\n genotype_call1 = GenotypeCall(\"././.\")\n genotype_call2 = GenotypeCall(\"./0/.\")\n self.assertTrue(genotype_call1.is_unknown())\n self.assertFalse(genotype_call2.is_unknown())\n\n def test_should_compare_equal_if_alleles_match_setwise_if_both_unphased(self):\n genotype_call1 = GenotypeCall(\"0/1\")\n genotype_call2 = GenotypeCall(\"1/0\")\n self.assertEqual(genotype_call1, genotype_call2)\n\n def test_should_compare_not_equal_if_alleles_match_setwise_if_both_phased(self):\n heterozygous_genotype_call1 = GenotypeCall(\"0|1\")\n heterozygous_genotype_call2 = GenotypeCall(\"1|0\")\n self.assertNotEqual(\n heterozygous_genotype_call1,\n heterozygous_genotype_call2)\n\n def test_should_compare_equal_if_alleles_match_setwise_if_one_phased_and_one_unphased(self):\n heterozygous_genotype_call_unphased = GenotypeCall(\"0/1\")\n heterozygous_genotype_call_phased = GenotypeCall(\"1|0\")\n self.assertEqual(\n heterozygous_genotype_call_unphased,\n heterozygous_genotype_call_phased)\n\n def test_should_compare_equal_if_all_alleles_match_regardless_of_phasing(self):\n genotype_call_with_phase = GenotypeCall(\"1|1\")\n genotype_call_without_phase = GenotypeCall(\"1/1\")\n self.assertEqual(genotype_call_with_phase, genotype_call_without_phase)\n\n def test_should_not_compare_equal_diploid_and_haploid_ref_call(self):\n diploid_call = GenotypeCall(\"0/0\")\n haploid_call = GenotypeCall(\"0\")\n self.assertNotEqual(diploid_call, haploid_call)\n\n def test_should_not_compare_equal_diploid_and_haploid_hom_call(self):\n diploid_call = GenotypeCall(\"1/1\")\n haploid_call = GenotypeCall(\"1\")\n self.assertNotEqual(diploid_call, haploid_call)\n\n\nclass TestGenotypeCallClassifications(unittest.TestCase):\n def test_should_mark_following_as_heterozygous(self):\n self.assertTrue(GenotypeCall(\"0/1\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"1/0\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"1/.\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"./1\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"0|1\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"1|0\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"1|.\").is_heterozygous())\n self.assertTrue(GenotypeCall(\".|1\").is_heterozygous())\n self.assertTrue(GenotypeCall(\"1|2\").is_heterozygous())\n\n def test_should_mark_following_as_not_heterozygous(self):\n self.assertFalse(GenotypeCall(\"./.\").is_heterozygous())\n self.assertFalse(GenotypeCall(\".|.\").is_heterozygous())\n self.assertFalse(GenotypeCall(\"1/1\").is_heterozygous())\n self.assertFalse(GenotypeCall(\"1|1\").is_heterozygous())\n self.assertFalse(GenotypeCall(\"2/2\").is_heterozygous())\n self.assertFalse(GenotypeCall(\"2|2\").is_heterozygous())\n\n def test_should_mark_as_homozygous_alt(self):\n self.assertTrue(GenotypeCall(\"1/1\").is_homozygous_alt())\n self.assertTrue(GenotypeCall(\"1|1\").is_homozygous_alt())\n self.assertTrue(GenotypeCall(\"2/2\").is_homozygous_alt())\n self.assertTrue(GenotypeCall(\"2|2\").is_homozygous_alt())\n\n def test_should_not_mark_as_homozygous_alt(self):\n self.assertFalse(GenotypeCall(\"./.\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\".|.\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"0/1\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"1/0\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"1/.\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"./1\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"0|1\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"1|0\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"1|.\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\".|1\").is_homozygous_alt())\n self.assertFalse(GenotypeCall(\"1|2\").is_homozygous_alt())\n\n def test_should_mark_following_as_called(self):\n self.assertTrue(GenotypeCall(\"0/1\").is_called())\n self.assertTrue(GenotypeCall(\"0|1\").is_called())\n self.assertTrue(GenotypeCall(\"./1\").is_called())\n self.assertTrue(GenotypeCall(\".|1\").is_called())\n self.assertTrue(GenotypeCall(\"0/2\").is_called())\n self.assertTrue(GenotypeCall(\"1/2\").is_called())\n self.assertTrue(GenotypeCall(\"././1\").is_called())\n self.assertTrue(GenotypeCall(\"0/0/1\").is_called())\n\n def test_should_not_mark_following_as_called(self):\n self.assertFalse(GenotypeCall(\"./.\").is_called())\n self.assertFalse(GenotypeCall(\"./0\").is_called())\n self.assertFalse(GenotypeCall(\"0/.\").is_called())\n self.assertFalse(GenotypeCall(\"0|.\").is_called())\n self.assertFalse(GenotypeCall(\".|0\").is_called())\n self.assertFalse(GenotypeCall(\".\").is_called())\n self.assertFalse(GenotypeCall(\"0\").is_called())\n self.assertFalse(GenotypeCall(\"././.\").is_called())\n self.assertFalse(GenotypeCall(\".|0|.\").is_called())\n\n\nclass TestMergeGenotypeCall(unittest.TestCase):\n # Arithmetic for GenotypeCall.\n # \"0/0\" + \"0/0\" = \"0/0\"\n # \"0/0\" + other = other\n # \"1/1\" + \"1/1\" = \"1/1\"\n # \"1/1\" + A => error for A in {0/1, 0|1, 1|0}\n # \"0/1\" + \"0/1\" => \"1/1\" (two calls made at different points.)\n # \"0/1\" + \"1|0\" = \"0/1\" + \"0|1\" = \"1/1\" (??)\n # \"0|1\" + \"1|0\" = \"1/1\"\n # \"0|1\" + \"0|1\" = \"0|1\"\n # \"1|0\" + \"1|0\" = \"1|0\"\n\n def test_should_get_homozygous_alt_if_combining_two_homozygous_alt_genotypes(self):\n genotype_call_1 = GenotypeCall(\"1/1\")\n genotype_call_2 = GenotypeCall(\"1/1\")\n self.assertEqual(\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2),\n GenotypeCall(\"1/1\"))\n\n def test_should_get_homozygous_ref_if_combining_two_homozygous_ref_genotypes(self):\n genotype_call_1 = GenotypeCall(\"0/0\")\n genotype_call_2 = GenotypeCall(\"0/0\")\n self.assertEqual(\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2),\n GenotypeCall(\"0/0\"))\n\n def test_should_raise_exception_one_is_homozygous_alt_and_other_heterozygous_genotypes(self):\n genotype_call_1 = GenotypeCall(\"0/1\")\n genotype_call_2 = GenotypeCall(\"1/1\")\n self.assertRaises(\n Exception,\n merge_genotype_calls,\n genotype_call_1,\n genotype_call_2)\n\n def test_should_get_heterozygous_if_one_is_homozygous_ref_and_other_is_heterozygous(self):\n genotype_call_1 = GenotypeCall(\"0/0\")\n genotype_call_2 = GenotypeCall(\"0/1\")\n self.assertEqual(\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2),\n genotype_call_2)\n\n def test_should_get_homozygous_alt_if_one_is_homozyzgous_ref_and_other_is_homozygous_alt(self):\n genotype_call_1 = GenotypeCall(\"0/0\")\n genotype_call_2 = GenotypeCall(\"1/1\")\n self.assertEqual(\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2),\n genotype_call_2)\n\n def test_should_combine_two_unphased_heterozygous_genotypes_to_homozygous_alt(self):\n genotype_call_1 = GenotypeCall(\"0/1\")\n genotype_call_2 = GenotypeCall(\"0/1\")\n self.assertEqual(\n GenotypeCall(\"1/1\"),\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2))\n\n def test_should_combine_two_heterozygous_genotypes_to_homozygous_alt_if_one_is_not_phased(self):\n genotype_call_1 = GenotypeCall(\"1|0\")\n genotype_call_2 = GenotypeCall(\"0/1\")\n self.assertEqual(\n GenotypeCall(\"1/1\"),\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2))\n\n genotype_call_1 = GenotypeCall(\"0|1\")\n genotype_call_2 = GenotypeCall(\"0/1\")\n self.assertEqual(\n GenotypeCall(\"1/1\"),\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2))\n\n def test_should_return_phased_heterozygous_genotype_when_merging_two_phased_identical_heterozygous_genotypes(self):\n genotype_call_1 = GenotypeCall(\"1|0\")\n genotype_call_2 = GenotypeCall(\"1|0\")\n self.assertEqual(\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2),\n GenotypeCall(\"1|0\"))\n\n genotype_call_1 = GenotypeCall(\"0|1\")\n genotype_call_2 = GenotypeCall(\"0|1\")\n self.assertEqual(\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2),\n GenotypeCall(\"0|1\"))\n\n def test_should_combine_two_opposite_heterozygous_phased_genotypes(self):\n genotype_call_1 = GenotypeCall(\"1|0\")\n genotype_call_2 = GenotypeCall(\"0|1\")\n self.assertEqual(\n GenotypeCall(\"1|1\"),\n merge_genotype_calls(\n genotype_call_1,\n genotype_call_2))\n\n def test_should_raise_exception_any_genotype_is_not_diploid(self):\n genotype_call_1 = GenotypeCall(\"1\")\n genotype_call_2 = GenotypeCall(\"0\")\n self.assertRaises(\n Exception,\n merge_genotype_calls,\n genotype_call_1,\n genotype_call_2)\n\n\nclass TestMergeSampleDataGenotypesCalls(unittest.TestCase):\n\n def test_should_fail_if_sample_data_objects_have_different_sample(self):\n sample_data1 = SampleData(['GT'], ['sample_name_1'])\n sample_data1.add_sample_data(\n 'sample_name_1', 'GT', GenotypeCall('0/0'))\n sample_data2 = SampleData(['GT'], ['sample_name_2'])\n sample_data2.add_sample_data(\n 'sample_name_2', 'GT', GenotypeCall('0/0'))\n\n self.assertRaises(\n Exception,\n sample_data1.merge_genotype_calls,\n sample_data2.genotypes())\n\n def test_should_merge_genotype_call_object_in_sample_data(self):\n sample_data1 = SampleData(['GT'], ['sample_name'])\n sample_data1.add_sample_data('sample_name', 'GT', GenotypeCall('0/1'))\n sample_data2 = SampleData(['GT'], ['sample_name'])\n sample_data2.add_sample_data('sample_name', 'GT', GenotypeCall('0/1'))\n\n sample_data1.merge_genotype_calls(sample_data2.genotypes())\n\n self.assertEqual(\n sample_data1.get_field(\"sample_name\", \"GT\"),\n GenotypeCall(\"1/1\")\n )\n\n\nclass TestGenotypeCallAlleleCounts(unittest.TestCase):\n\n def test_homozygous_unphased_genotypes(self):\n self.assertEqual((1,), GenotypeCall('0').normalized_allele_count)\n self.assertEqual((1,), GenotypeCall('0/0').normalized_allele_count)\n self.assertEqual((1,), GenotypeCall('0/0/0').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('1').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('1/1').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('1/1/1').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('2').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('2/2').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('2/2/2').normalized_allele_count)\n\n def test_homozygous_phased_genotypes(self):\n self.assertEqual((1,), GenotypeCall('0').normalized_allele_count)\n self.assertEqual((1,), GenotypeCall('0|0').normalized_allele_count)\n self.assertEqual((1,), GenotypeCall('0|0|0').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('1').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('1|1').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('1|1|1').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('2').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('2|2').normalized_allele_count)\n self.assertEqual((0, 1), GenotypeCall('2|2|2').normalized_allele_count)\n\n def test_binary_heterozygous_unphased_genotypes(self):\n self.assertEqual((1, 1), GenotypeCall('0/1').normalized_allele_count)\n self.assertEqual((1, 1), GenotypeCall('0/2').normalized_allele_count)\n self.assertEqual((0, 1, 1), GenotypeCall(\n '1/2').normalized_allele_count)\n\n def test_unknown_genotypes_allele_count(self):\n self.assertEqual((1, ), GenotypeCall('.').normalized_allele_count)\n self.assertEqual((1, ), GenotypeCall('./.').normalized_allele_count)\n self.assertEqual((1, 1), GenotypeCall('./1').normalized_allele_count)\n\n def test_binary_heterozygous_phased_genotypes(self):\n self.assertEqual((1, 1), GenotypeCall('0|1').normalized_allele_count)\n self.assertEqual((1, 1), GenotypeCall('1|0').normalized_allele_count)\n self.assertEqual((1, 1), GenotypeCall('0|2').normalized_allele_count)\n self.assertEqual((1, 1), GenotypeCall('2|0').normalized_allele_count)\n self.assertEqual((0, 1, 1), GenotypeCall(\n '1|2').normalized_allele_count)\n self.assertEqual((0, 1, 1), GenotypeCall(\n '2|1').normalized_allele_count)\n", "id": "5112099", "language": "Python", "matching_score": 3.5747103691101074, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_genotype_call.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport copy\nfrom collections import Counter\nfrom fractions import gcd\n\nfrom wecall.vcfutils.stringutils import from_vcf_str, to_vcf_str\n\n\nclass GenotypeCall(object):\n\n def __init__(self, genotype_string, normalize=True):\n self.phased = \"|\" in genotype_string\n self.__alleles = [\n from_vcf_str(\n x, int) for x in genotype_string.split(\n self.deliminator())]\n if normalize is True:\n self.normalise()\n\n @property\n def alleles(self):\n return self.__alleles\n\n @property\n def normalized_allele_count(self):\n counter = Counter()\n counter[0] = 0\n for allele in self.__alleles:\n counter[allele] += 1\n try:\n counter[0] += counter[None]\n del counter[None]\n except KeyError:\n pass\n gcd_value = 0\n for value in list(counter.values()):\n gcd_value = gcd(value, gcd_value)\n if gcd_value > 1:\n for key, value in list(counter.items()):\n counter[key] = value / gcd_value\n return tuple(sorted(counter.values()))\n\n def deliminator(self):\n return \"|\" if self.phased else \"/\"\n\n def __hash__(self):\n return hash(str(self))\n\n def __len__(self):\n return len(self.__alleles)\n\n def __getitem__(self, item):\n return self.__alleles[item]\n\n def __setitem__(self, key, value):\n self.__alleles[key] = value\n self.normalise()\n\n def __eq__(self, other):\n if not self.phased or not other.phased:\n return sorted(\n self.__alleles,\n key=lambda x: x if x is not None else -\n 1) == sorted(\n other.__alleles,\n key=lambda x: x if x is not None else -\n 1)\n else:\n return self.__alleles == other.__alleles\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __str__(self):\n return self.deliminator().join(to_vcf_str(s) for s in self.__alleles)\n\n def __repr__(self):\n return \"GenotypeCall<Phased={}, Alleles={}>\".format(\n self.phased, self.__alleles)\n\n def is_haploid(self):\n return len(self.__alleles) == 1\n\n def is_diploid(self):\n return len(self.__alleles) == 2\n\n def is_unknown(self):\n return all((allele is None for allele in self.__alleles))\n\n def is_called(self):\n return set(self.__alleles) - {0, None} != set()\n\n def is_heterozygous(self):\n return not self.is_unknown() and len(set(self.__alleles)) == 2\n\n def is_homozygous(self):\n return not self.is_unknown() and len(set(self.__alleles)) == 1\n\n def is_homozygous_alt(self):\n return not self.is_unknown() and self.is_homozygous(\n ) and self.__alleles[0] != 0\n\n def is_homozygous_ref(self):\n return not self.is_unknown() and self.is_homozygous() and not self.is_homozygous_alt()\n\n def normalise(self):\n if not self.phased:\n self.__alleles.sort(key=lambda x: x if x is not None else -1)\n\n def get_genotype_call_for_alt(self, alt_number):\n new_genotype = copy.deepcopy(self)\n for index, allele in enumerate(self.__alleles):\n if allele == alt_number:\n new_genotype[index] = 1\n elif allele is not None:\n new_genotype[index] = 0\n\n return new_genotype\n\n\ndef merge_genotype_calls(genotype_call1, genotype_call2):\n if len(genotype_call1) != 2 or len(genotype_call2) != 2:\n raise Exception(\"Cannot merge non-diploid genotype calls.\")\n elif genotype_call1.is_homozygous_ref():\n return copy.deepcopy(genotype_call2)\n elif genotype_call2.is_homozygous_ref():\n return copy.deepcopy(genotype_call1)\n elif genotype_call1.is_homozygous_alt() and genotype_call2.is_homozygous_alt():\n return GenotypeCall(\"1/1\")\n elif genotype_call1.is_homozygous_alt() or genotype_call2.is_homozygous_alt():\n raise Exception(\"Cannot merge any homozygous alt genotype calls.\")\n elif (genotype_call1.phased and genotype_call2.phased and genotype_call1.is_heterozygous() and\n genotype_call1.is_heterozygous() and genotype_call1 == genotype_call2):\n return copy.deepcopy(genotype_call1)\n else:\n return GenotypeCall(\"1/1\")\n", "id": "2267334", "language": "Python", "matching_score": 0.5297930836677551, "max_stars_count": 8, "path": "python/wecall/vcfutils/genotype_call.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom os.path import join\nfrom unittest import expectedFailure\n\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.info_data import InfoData\nfrom wecall.vcfutils.schema import Schema\nfrom wecall.vcfutils.vcf_builder import VCFBuilder\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestInputSpecification(BaseTest):\n\n def test_doesnt_give_a_flying_damn_about_spurious_filter_header(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n schema = Schema()\n complex_filter_name = '.+-*\\\\/~@?!%^&><=\\\"\\'(){}[]_|'\n schema.set_filter(complex_filter_name, 'unusual characters')\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"), schema=schema)\n gv_builder.with_record_from_variant(variant, filters={complex_filter_name})\n gv_builder.build().index()\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call(expected_success=True)\n expect .with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"1/1\")\n\n def test_doesnt_give_a_flying_damn_about_spurious_filters(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(\n variant, filters={\"#$.:@$%$%^&**()7!\"})\n gv_builder.build().index()\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call(expected_success=True)\n expect.with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"1/1\")\n\n def test_should_handle_complex_variant_input(self):\n chrom = \"22\"\n\n variant = Variant(chrom, 10, \"CAA\", \"CA\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect.with_log()\\\n .input_variant_trimmed_warning(variant, Variant(chrom, 11, \"A\", \"\"))\n expect.with_output_vcf()\\\n .record_count(1)\n\n @expectedFailure # \"Unskip test if parameter made public\"\n def test_should_raise_if_output_ref_calls_is_switched_on(self):\n chrom = \"22\"\n variant = Variant(chrom, 10, \"CAA\", \"CA\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n ).with_output_ref_calls(True)\n\n driver.call(False).genotyping_is_incompatible_with_outputting_reference_calls_error()\n\n def test_doesnt_give_a_flying_damn_about_spurious_info(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant,\n info=InfoData(None, {\"#f$@$e%$%^&k**()7!\": [\"#o$@$f%$%f^&**()7!\"]}))\n gv_builder.build().index()\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call(expected_success=True)\n expect.with_output_vcf() \\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"1/1\")\n", "id": "12673479", "language": "Python", "matching_score": 6.292269706726074, "max_stars_count": 8, "path": "test/wecall_acceptance/genotyping/test_input_file.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import expectedFailure\n\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.vcf_builder import VCFBuilder\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\nfrom os.path import join\n\n\nclass TestSingleSampleGenotyping(BaseTest):\n def test_genotypes_variant_correctly_with_zero_coverage(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=0, n_rev=0, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect.with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"./.\")\n\n def test_genotypes_variant_correctly_complex_indel_which_is_snp_and_deletion(self):\n chrom = \"22\"\n variant = Variant(chrom, 10, \"CA\", \"T\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"..........T*..........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect.with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(sample)\\\n .has_genotype(\"1/1\")\n\n def test_genotypes_variant_correctly_with_supporting_reads(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect.with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"1/1\")\n\n def test_genotypes_mnp_correctly_with_supporting_reads(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"AAA\", \"CAC\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.C.......\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect.with_output_vcf()\\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"./1\")\n\n @expectedFailure\n def test_gets_correct_genotype_if_not_fully_left_aligned(self):\n chrom = \"22\"\n\n variant = Variant(chrom, 12, \"AA\", \"A\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAATACGCCCCCTACGCCCCCT\", chrom=chrom, pos_from=0\n ).with_read(\n \"...................*...................\", n_fwd=10, n_rev=10, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect.with_output_vcf()\\\n .record_count(1)\\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample).has_genotype(\"1/1\")\n\n def test_genotypes_variant_correctly_with_unsupportive_reads(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \".....................\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n )\n\n expect = driver.call()\n expect\\\n .with_output_vcf()\\\n .record_count(1) \\\n .has_record_for_variant(variant)\\\n .with_sample(dodgy_sample)\\\n .has_genotype(\"0/0\")\n\n def test_doesnt_call_extra_variants(self):\n chrom = \"22\"\n variant = Variant(\"1\", 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().index()\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"....G................\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename)\n\n expect = driver.call()\n expect \\\n .with_output_vcf() \\\n .record_count(0)\n\n def test_raises_if_genotyping_file_doesnt_exist(self):\n missing_file = join(\n self.work_dir,\n \"I_DONT_EXIST_NOT_JUST_IN_THE_PHILOSOPHICAL_SENSE.vcf.gz\")\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n ).with_read(\n \"....G................\", n_fwd=5, n_rev=5,\n ).with_genotype_alleles(\n missing_file\n ).with_verbosity(0)\n\n driver.call(expected_success=False)\\\n .missing_genotype_file(missing_file)\n\n def test_raises_if_genotyping_file_is_not_expected_format(self):\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.build() # note: not compressed or indexed\n\n driver = SVCDriver(self)\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\",\n ).with_read(\n \"....G................\", n_fwd=5, n_rev=5,\n ).with_genotype_alleles(\n gv_builder.filename\n ).with_verbosity(0)\n\n driver.call(expected_success=False)\\\n .unexpected_genotype_file_format(gv_builder.filename)\n\n def test_raises_if_genotyping_file_not_indexed(self):\n chrom = \"22\"\n variant = Variant(chrom, 11, \"A\", \"C\")\n\n gv_builder = VCFBuilder(join(self.work_dir, \"genotype.vcf\"))\n gv_builder.with_record_from_variant(variant)\n gv_builder.build().bgzip() # note: no index\n\n driver = SVCDriver(self)\n\n dodgy_sample = \"bobs_your_uncle\"\n driver.with_ref_sequence(\n \"ACGCCCCCTGCAAAAAAAAAA\", chrom=chrom, pos_from=0\n ).with_read(\n \"...........C.........\", n_fwd=5, n_rev=5, chrom=chrom, sample_name=dodgy_sample\n ).with_genotype_alleles(\n gv_builder.compressed_filename\n ).with_verbosity(0)\n\n driver.call(expected_success=False)\\\n .missing_genotype_index_file(gv_builder.compressed_filename_index)\n", "id": "8539958", "language": "Python", "matching_score": 3.203202724456787, "max_stars_count": 8, "path": "test/wecall_acceptance/genotyping/test_single_sample.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestRegionPadding(BaseTest):\n def test_should_call_snp_with_minimal_covering_region_using_default_padding(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:40-41')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 40, \"C\", \"T\"))\n\n def test_should_call_del_with_minimal_covering_region_using_default_padding(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................*........... \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:40-41')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 39, \"GC\", \"G\"))\n\n def test_should_call_del_with_minimal_covering_region_using_default_padding_with_region_before(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................*........... \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:39-40')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 39, \"GC\", \"G\"))\n\n def test_should_not_call_del_if_region_doesnt_overlap_deleted_part(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................*........... \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:38-39,1:41-42')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(0)\n\n def test_should_call_first_snp_if_region_padding_is_zero(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ...............................G.......... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=6, n_rev=6)\n svc.with_region_string('1:0-41')\n svc.with_region_padding(0)\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 40, \"C\", \"T\"))\n\n def test_should_not_call_first_snp_if_region_padding_is_one(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ...............................G.......... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=6, n_rev=6)\n svc.with_region_string('1:0-41')\n svc.with_region_padding(1)\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(0)\n\n def test_should_cope_with_region_padding_which_pads_to_negative_index_into_reference(self):\n svc = SVCDriver(self)\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ...............................G.......... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=6, n_rev=6)\n svc.with_region_string('1:20-41')\n svc.with_region_padding(20)\n\n svc.call(expected_success=True)\n", "id": "1023002", "language": "Python", "matching_score": 4.564641952514648, "max_stars_count": 8, "path": "test/wecall_acceptance/regions_specification/test_region_padding.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestSmallRegions(BaseTest):\n\n def test_should_only_call_overlapping_ref_call(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_output_ref_calls(True).with_max_ref_call_size(1)\n\n svc.with_region_string('1:39-40')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1)\n\n def test_should_ignore_SNP_not_overlapping_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-40')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(0)\n\n def test_should_not_ignore_SNP_not_overlapping_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-41')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 40, \"C\", \"T\"))\n\n def test_should_use_read_evidence_outside_region_to_not_call_snp(self):\n svc = SVCDriver(self)\n # If no region was specified then only the SNP with stronger evidence\n # would be outputted.\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"......................................................\", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ...............................G.......... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................T........... \", chrom='1', n_fwd=6, n_rev=6)\n svc.with_region_string('1:0-41')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(0)\n\n def test_should_ignore_deletion_not_overlapping_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................****........ \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-39')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(0)\n\n def test_should_include_deletion_overlapping_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................****........ \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-41')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 39, \"GCCCC\", \"G\"))\n\n def test_should_include_deletion_overlapping_two_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10).with_read(\n \" ..............................****........ \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-41,1:43-48')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1).has_record_for_variant(Variant('1', 39, \"GCCCC\", \"G\"))\n\n def test_should_call_variant_in_complex_region_within_small_calling_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTAGTCGGTAGGAATAATG\", chrom='1').with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................****........ \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-44')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1)\n\n def test_should_not_ignore_variant_overlapping_edge_of_small_region(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCCCCCCCCCCCATG\", chrom='1'\n ).with_read(\n \"....................................... \", chrom='1', n_fwd=10, n_rev=10\n ).with_read(\n \" ..............................****........ \", chrom='1', n_fwd=10, n_rev=10)\n svc.with_region_string('1:0-42')\n\n expect = svc.call()\n\n expect.with_output_vcf().record_count(1)\n", "id": "8849445", "language": "Python", "matching_score": 1.4801466464996338, "max_stars_count": 8, "path": "test/wecall_acceptance/regions_specification/test_small_regions.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestAllelePlusStrandBiasFilteringBehaviour(BaseTest):\n\n def test_should_allow_mildly_strand_biased_calls(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n reads = 10\n strand_bias = 6\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=reads + strand_bias, n_fwd=reads - strand_bias, chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=reads - strand_bias, n_fwd=reads + strand_bias, chrom=chrom\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n\n def test_should_allow_mildly_allele_biased_calls(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n reads = 10\n allele_bias = 5\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=reads + allele_bias, n_fwd=reads + allele_bias, chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=reads - allele_bias, n_fwd=reads - allele_bias, chrom=chrom\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n\n def test_should_stop_mildly_allele_and_strand_biased_calls(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n reads = 10\n allele_bias = 5\n strand_bias = 4\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n chrom=chrom).with_read(\n \"............................................\",\n n_rev=reads + allele_bias + strand_bias,\n n_fwd=reads + allele_bias - strand_bias,\n chrom=chrom).with_read(\n \"................G...........................\",\n n_rev=reads - allele_bias - strand_bias,\n n_fwd=reads - allele_bias + strand_bias,\n chrom=chrom)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_filters({'AB+SB'})\n\n def test_should_allow_mildly_allele_and_strand_biased_calls_with_lower_specified_threshold(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n reads = 10\n allele_bias = 5\n strand_bias = 4\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n chrom=chrom).with_read(\n \"............................................\",\n n_rev=reads + allele_bias + strand_bias,\n n_fwd=reads + allele_bias - strand_bias,\n chrom=chrom).with_read(\n \"................G...........................\",\n n_rev=reads - allele_bias - strand_bias,\n n_fwd=reads - allele_bias + strand_bias,\n chrom=chrom)\n svc.with_allele_plus_strand_bias_p(0.03)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n", "id": "1405692", "language": "Python", "matching_score": 4.40097188949585, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_combined_allele_strand_bias.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestDefaultStrandBiasFilteringBehaviour(BaseTest):\n\n def test_should_allow_unbiased_calls_to_pass_through(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=10, n_fwd=10, chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=10, n_fwd=10, chrom=chrom\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n\n def test_should_stop_forward_biased_calls_to_pass_through(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n reads = 10\n bias = 7\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=reads + bias, n_fwd=reads - bias, chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=reads - bias, n_fwd=reads + bias, chrom=chrom\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_filters({'SB'})\n\n def test_should_stop_reverse_biased_calls_to_pass_through(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n reads = 10\n bias = -7\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=reads + bias, n_fwd=reads - bias, chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=reads - bias, n_fwd=reads + bias, chrom=chrom\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_filters({'SB'})\n\n\nclass TestCustomStrandBiasThreshold(BaseTest):\n \"\"\"\n Estimating custom probability cut-off point\n > def beta_binomial(k, n, a, b):\n . kCn = scipy.special.comb(n, k)\n . num = scipy.special.beta(k+a, n-k+b)\n . den = scipy.special.beta(a, b)\n . return kCn * num / den\n > sum((beta_binomial(n, 30, 20.0*30.0/30.0, 20.0) for n in range(21, 30)))\n 0.062450575207941214\n > sum((beta_binomial(n, 30, 20.0*30.0/30.0, 20.0) for n in range(22, 30)))\n 0.033780564203978708\n > 0.5 * (0.062450575207941214 + 0.033780564203978708)\n 0.04811556970595996\n \"\"\"\n\n def test_should_allow_forward_biased_calls_just_below_custom_threshold_to_pass_through(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n svc.with_strand_bias_p(0.04811556970595996)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\", n_rev=21, n_fwd=9, chrom=chrom\n ).with_read(\n \"................G...........................\", n_rev=9, n_fwd=21, chrom=chrom)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n\n def test_should_filter_forward_biased_calls_just_above_custom_threshold(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n svc.with_strand_bias_p(0.04811556970595996)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\", n_rev=22, n_fwd=8, chrom=chrom\n ).with_read(\n \"................G...........................\", n_rev=8, n_fwd=22, chrom=chrom)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_filters({'SB'})\n\n def test_should_allow_reverse_biased_calls_just_below_custom_threshold_to_pass_through(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n svc.with_strand_bias_p(0.04811556970595996)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\", n_rev=9, n_fwd=21, chrom=chrom\n ).with_read(\n \"................G...........................\", n_rev=21, n_fwd=9, chrom=chrom)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n\n def test_should_filter_reverse_biased_calls_just_above_custom_threshold(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n svc.with_strand_bias_p(0.04811556970595996)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"............................................\", n_rev=22, n_fwd=8, chrom=chrom\n ).with_read(\n \"................G...........................\", n_rev=8, n_fwd=22, chrom=chrom)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_filters({'SB'})\n", "id": "10665332", "language": "Python", "matching_score": 2.173649549484253, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_strand_bias.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestBadReadsFilter(BaseTest):\n def test_bad_reads_filter_applied_when_snp_has_low_quality_bases_on_left_of_supporting_reads(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(21)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 2 \",\n n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_filters({\"BR\"})\n\n def test_bad_reads_filter_applied_when_snp_has_low_quality_bases_on_right_of_supporting_reads(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(21)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 2 \",\n n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_filters({\"BR\"})\n\n def test_bad_reads_filter_window_considers_full_alignment_span_of_indel(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(21)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................*...........................\",\n \" 2 \",\n n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 15, \"GT\", \"G\")) \\\n .with_filters({\"BR\"})\n\n def test_bad_reads_filter_window_considers_full_alignment_span_of_indel_in_di_nucleotide_region(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(21)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTGTGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................**..........................\",\n \" 2 \",\n n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 14, \"GGT\", \"G\")) \\\n .with_filters({\"BR\"})\n\n def test_should_not_apply_filter_to_snp_if_all_supporting_reads_are_good(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(15)\n\n svc.with_ref_sequence(\n # 1234567 890123456789\n \"AAAGCGTAA**CCGGGTTAGT**CAAACCCGTTACGTATGCATG\"\n ).with_read(\n \".........**.....G....**.....................\", n_rev=10, n_fwd=10\n ).with_read(\n \".........GT..........TA.....................\",\n \" 00 00 \",\n n_rev=11, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(3)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 14, \"T\", \"G\")) \\\n .with_no_filters()\n\n def test_should_not_apply_bad_reads_to_insertion_if_all_supporting_reads_have_high_base_qualities(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(3) \\\n .with_min_bad_reads_score(15)\n\n svc.with_ref_sequence(\n # 1234567890123 456789\n \"AAAGCGTACAACCG*GGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"..............*..G...........................\",\n \" 1 \",\n n_rev=11, n_fwd=10\n )\n svc.with_read(\n \"..............T..............................\",\n n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 13, \"G\", \"GT\")) \\\n .with_no_filters()\n\n def test_should_not_apply_filter_with_base_pair_too_far_on_left_of_snp(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(15)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 0 \", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_no_filters()\n\n def test_should_not_apply_filter_with_base_pair_too_far_on_right_of_snp(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(1) \\\n .with_min_bad_reads_score(15)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 0 \", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_no_filters()\n\n def test_bad_reads_filter_not_applied_when_median_read_is_good(self):\n svc = SVCDriver(self) \\\n .with_var_filters(\"BR\") \\\n .with_bad_reads_window_size(7) \\\n .with_min_bad_reads_score(20)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 1 1 \", n_rev=10, n_fwd=10\n ).with_read(\n \"................G...........................\",\n \" 4444444 4444444 \", n_rev=11, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_no_filters()\n\n def test_bad_reads_filter_not_applied_when_snp_has_high_quality_bases_nearby(self):\n svc = SVCDriver(self)\n svc.with_var_filters(\"BR\")\n svc.with_bad_reads_window_size(7)\n svc.with_min_bad_reads_score(15)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 4444444 4444444 \",\n n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_no_filters()\n\n def test_bad_reads_filter_not_applied_if_one_sample_is_not_naughty(self):\n svc = SVCDriver(self)\n svc.with_var_filters(\"BR\")\n svc.with_bad_reads_window_size(7)\n svc.with_min_bad_reads_score(13)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\",\n \" 3333333 3333333 \",\n sample_name=\"GOOD\", n_rev=2, n_fwd=2\n ).with_read(\n \"................G...........................\",\n \" 0000000 0000000 \",\n sample_name=\"BAD\", n_rev=10, n_fwd=10\n ).with_read(\n \"................G...........................\",\n \" 00000 0000 \",\n sample_name=\"UGLY\", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\")) \\\n .with_no_filters()\n", "id": "6541070", "language": "Python", "matching_score": 4.943249225616455, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_bad_reads_filter.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestMinSquareRootMeanSquareMappingQualityFilter(BaseTest):\n def test_should_filter_variant_when_all_reads_have_quality_below_threshold(self):\n svc = SVCDriver(self)\n threshold = 50\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\")\\\n .with_read(\n \"................G...........................\", n_fwd=10, n_rev=10, mapping_quality=threshold - 1)\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_filters({\"MQ\"})\n\n def test_should_filter_variant_when_all_supporting_reads_have_low_mapping_quality(self):\n svc = SVCDriver(self)\n threshold = 50\n low_mq = threshold - 1\n high_mq = threshold * 2\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\")\\\n .with_read(\n \"................G......................... \", n_fwd=10, n_rev=10, mapping_quality=low_mq)\\\n .with_read(\n \"............................................\", n_fwd=10, n_rev=10, mapping_quality=high_mq)\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_filters({\"MQ\"})\n\n def test_should_not_filter_variant_when_all_supporting_reads_have_high_mapping_quality(self):\n svc = SVCDriver(self)\n threshold = 50\n low_mq = threshold - 1\n high_mq = threshold\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\").with_read(\n \"................G......................... \", n_fwd=10, n_rev=10, mapping_quality=high_mq)\\\n .with_read(\n \"............................................\", n_fwd=10, n_rev=10, mapping_quality=low_mq)\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_no_filters()\n\n def test_should_take_max_mq_over_all_the_samples_which_support_the_variant(self):\n svc = SVCDriver(self)\n threshold = 50\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\")\\\n .with_read(\n \"................G......................... \",\n n_fwd=10, n_rev=10, mapping_quality=threshold - 1, sample_name=\"Ugly\")\\\n .with_read(\n \"................G...........................\",\n n_fwd=10, n_rev=10, mapping_quality=threshold, sample_name=\"Good\")\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_no_filters()\n\n def test_should_take_max_mq_only_over_the_the_samples_that_support_the_variant(self):\n svc = SVCDriver(self)\n threshold = 50\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\")\\\n .with_read(\n \"................G......................... \",\n n_fwd=10, n_rev=10, mapping_quality=threshold - 1, sample_name=\"Ugly\")\\\n .with_read(\n \"............................................\",\n n_fwd=10, n_rev=10, mapping_quality=threshold + 1, sample_name=\"Good\")\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_filters({\"MQ\"})\n\n def test_should_filter_variant_when_root_mean_square_of_supporting_reads_is_below_threshold(self):\n svc = SVCDriver(self)\n threshold = 50\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\")\\\n .with_read(\n \"................G...........................\", n_fwd=10, n_rev=10, mapping_quality=threshold - 1)\\\n .with_read(\n \"................G...........................\", n_fwd=10, n_rev=10, mapping_quality=threshold)\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_filters({\"MQ\"})\n\n def test_should_not_filter_variant_when_root_mean_square_of_supporting_reads_is_above_threshold(self):\n svc = SVCDriver(self)\n threshold = 100\n svc.with_var_filters(\"MQ\")\n svc.with_min_root_mean_square_mapping_q(threshold)\n svc.with_read_mapping_filter_q(0)\n svc\\\n .with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\")\\\n .with_read(\n \"................G...........................\", n_fwd=10, n_rev=10, mapping_quality=threshold - 1)\\\n .with_read(\n \"................G...........................\", n_fwd=1, n_rev=1, mapping_quality=threshold + 30)\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n vcf_expectation\\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\\\n .with_no_filters()\n", "id": "7141013", "language": "Python", "matching_score": 4.226224899291992, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_min_root_mean_square_mapping_quality_filter.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\nMAX_PHRED = 3000\n\n\nclass TestQualityOverDepthFilter(BaseTest):\n\n def test_should_apply_soft_filter_to_snp_with_low_quality(self):\n svc = SVCDriver(self)\n svc.with_var_filters(\"QD\")\n svc.with_min_snp_q_over_depth(MAX_PHRED)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n record_expectation = vcf_expectation.has_record_for_variant(\n Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\n record_expectation.with_filters({\"QD\"})\n\n def test_should_not_apply_soft_filter_to_snp_with_high_quality(self):\n svc = SVCDriver(self)\n svc.with_var_filters(\"QD\")\n svc.with_min_snp_q_over_depth(1.0)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n record_expectation = vcf_expectation.has_record_for_variant(\n Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\n record_expectation.with_filters(set())\n\n def test_should_apply_soft_filter_to_indel_with_low_quality(self):\n svc = SVCDriver(self)\n svc.with_var_filters(\"QD\")\n svc.with_min_indel_q_over_depth(MAX_PHRED)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGG*TAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................C...........................\",\n n_rev=1, n_fwd=1\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n record_expectation = vcf_expectation.has_record_for_variant(\n Variant(DEFAULT_CHROM, 15, \"G\", \"GC\"))\n record_expectation.with_filters({\"QD\"})\n\n def test_should_not_apply_soft_filter_to_indel_with_high_quality(self):\n svc = SVCDriver(self)\n svc.with_var_filters(\"QD\")\n svc.with_min_indel_q_over_depth(1.0)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGG*TAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................C...........................\", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n record_expectation = vcf_expectation.has_record_for_variant(\n Variant(DEFAULT_CHROM, 15, \"G\", \"GC\"))\n record_expectation.with_filters(set())\n\n def test_should_not_apply_filter_for_quality_below_cap(self):\n record_expectation = QD_impl(self, n_rev=30, n_fwd=30)\n record_expectation.with_filters(set())\n\n def test_should_not_apply_filter_for_quality_beyond_cap(self):\n # Quality is capped so that quality / (number of supporting reads) is\n # low artificially.\n record_expectation = QD_impl(self, n_rev=50, n_fwd=50)\n record_expectation \\\n .with_quality(MAX_PHRED) \\\n .with_filters(set())\n record_expectation.with_info() \\\n .with_field('QD', [None])\n\n\ndef QD_impl(test_case, n_fwd, n_rev):\n svc = SVCDriver(test_case)\n svc.with_var_filters(\"QD\")\n svc.with_min_snp_q_over_depth(35)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\"\n ).with_read(\n \"................G...........................\", n_rev=n_rev, n_fwd=n_fwd\n )\n\n expect = svc.call()\n vcf_expectation = expect.with_output_vcf()\n vcf_expectation.record_count(1)\n\n return vcf_expectation.has_record_for_variant(\n Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\n", "id": "7626329", "language": "Python", "matching_score": 3.6972460746765137, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_quality_over_depth_filter.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestNoSimilarReadsFilterFilter(BaseTest):\n\n def test_should_not_be_on_by_default_in_tests(self):\n svc = SVCDriver(self)\n svc.with_min_reads_per_var(20)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\n\n def test_should_filter_similar_reads(self):\n svc = SVCDriver(self)\n svc.with_no_similar_reads_filter(True)\n svc.with_min_reads_per_var(2)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=0\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(0)\n\n def test_should_not_filter_similar_reads_with_different_start_positions(self):\n svc = SVCDriver(self)\n svc.with_no_similar_reads_filter(True)\n svc.with_min_reads_per_var(8)\n svc.with_allow_MNP_calls(False)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"TTAATGCATGCATGCATGCATGCATGCATGCATGCCCCG\",\n ).with_read(\n \" G...G...G...G...G...G... \", n_fwd=1, n_rev=1\n ).with_read(\n \" G...G...G...G...G...G... \", n_fwd=1, n_rev=1\n ).with_read(\n \" G...G...G...G...G...G... \", n_fwd=1, n_rev=1\n ).with_read(\n \".......G...G...G...G...G...G...........\", n_fwd=1, n_rev=1\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(4)\n\n def test_should_not_filter_similar_reads_with_different_sequences(self):\n svc = SVCDriver(self)\n svc.with_min_reads_per_var(6)\n svc.with_no_similar_reads_filter(True)\n svc.with_allow_MNP_calls(False)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"TTAATGCATGCATGCATGCATGCATGCATGCATGCCCCG\",\n ).with_read(\n \" ....G...G...G........... \", n_fwd=1, n_rev=1\n ).with_read(\n \" G.......G...G........... \", n_fwd=1, n_rev=1\n ).with_read(\n \" G...G.......G........... \", n_fwd=1, n_rev=1\n ).with_read(\n \".......G...G...G...G...................\", n_fwd=1, n_rev=1\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(4)\n", "id": "5461747", "language": "Python", "matching_score": 4.373359680175781, "max_stars_count": 8, "path": "test/wecall_acceptance/read_filters/no_similar_reads.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.bamutils.read_sequence import FORWARD_GOOD_READ\nfrom wecall.genomics.reference_chromosome import DEFAULT_CHROM\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestPairedReadFilters(BaseTest):\n\n def test_should_call_variant_with_proper_paired_reads_and_allow_improper_reads_flag_set(self):\n svc = SVCDriver(self)\n svc.with_allow_improper_pairs()\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=10\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\n\n def test_should_call_variant_with_improper_paired_reads_and_allow_improper_reads_flag_set(self):\n svc = SVCDriver(self)\n svc.with_allow_improper_pairs()\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=10, read_flags=FORWARD_GOOD_READ & ~2\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(DEFAULT_CHROM, 16, \"T\", \"G\"))\n\n def test_should_call_variant_with_improper_paired_reads_when_allow_improper_reads_flag_not_set(self):\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n # 1234567890123456789\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\",\n ).with_read(\n \"................G...........................\", n_rev=10, n_fwd=10, read_flags=FORWARD_GOOD_READ & ~2\n )\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1)\n", "id": "336401", "language": "Python", "matching_score": 4.6429338455200195, "max_stars_count": 8, "path": "test/wecall_acceptance/read_filters/test_non_proper_pair.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestLowQualityFilter(BaseTest):\n def test_should_filter_low_quality_call(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=1, n_fwd=1, chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=1, n_fwd=1, chrom=chrom\n ).with_min_call_qual(40)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_filters({'LQ'})\n\n def test_should_not_filter_high_quality_call(self):\n chrom = 'chr1'\n svc = SVCDriver(self)\n\n svc.with_ref_sequence(\n \"AAAGCGTACAACCGGGTTAGTCACAAACCCGTTACGTATGCATG\", chrom=chrom\n ).with_read(\n \"................G...........................\",\n n_rev=10, n_fwd=10, chrom=chrom\n ).with_read(\n \"............................................\",\n n_rev=10, n_fwd=10, chrom=chrom\n ).with_min_call_qual(40)\n\n expect = svc.call()\n\n expect.with_output_vcf() \\\n .record_count(1) \\\n .has_record_for_variant(Variant(chrom, 16, 'T', 'G')) \\\n .with_no_filters()\n", "id": "2524290", "language": "Python", "matching_score": 4.0561347007751465, "max_stars_count": 8, "path": "test/wecall_acceptance/call_filters/test_quality_filter.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics.variant import Variant\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.svc_driver import SVCDriver\n\n\nclass TestOutputAllVariants(BaseTest):\n\n def test_should_output_isolated_variants_in_identical_way(self):\n ref_sequence = 'GTGG**AGACCTGAGCGAACAAGAGCGCAC'\n var_sequence = ' ..GA.........T.......**... '\n\n normal = SVCDriver(self)\n normal \\\n .with_ref_sequence(ref_sequence) \\\n .with_read(var_sequence, n_fwd=10, n_rev=10)\n\n normal_vcf = normal \\\n .call() \\\n .with_output_vcf() \\\n .record_count(3)\n\n all_variants = SVCDriver(self) \\\n .with_all_variants(True)\n all_variants \\\n .with_ref_sequence(ref_sequence) \\\n .with_read(var_sequence, n_fwd=10, n_rev=10)\n\n all_variants_vcf = all_variants \\\n .call() \\\n .with_output_vcf() \\\n .record_count(3)\n\n self.assertEqual(normal_vcf, all_variants_vcf)\n\n def test_should_output_each_individual_variant_from_MNP(self):\n chrom = 'chr12'\n driver = SVCDriver(self) \\\n .with_all_variants(True) \\\n .with_allow_MNP_calls(True)\n\n driver .with_ref_sequence(\n 'GTGGGAAGACCTGAGCGAACAAGAGCGCAC', chrom=chrom\n ).with_read(\n ' .....T.....T.....T........ ', n_fwd=10, n_rev=10, chrom=chrom)\n\n vcf = driver.call().with_output_vcf()\n\n vcf.has_record_for_variant(Variant(chrom, 7, 'G', 'T')).with_filters({'LQ', 'NC'})\n vcf.has_record_for_variant(Variant(chrom, 13, 'A', 'T')).with_filters({'LQ', 'NC'})\n vcf.has_record_for_variant(Variant(chrom, 19, 'C', 'T')).with_filters({'LQ', 'NC'})\n vcf.has_record_for_variant(Variant(chrom, 7, 'GACCTGAGCGAAC', 'TACCTGTGCGAAT')).with_no_filters()\n", "id": "10217396", "language": "Python", "matching_score": 0.3261001408100128, "max_stars_count": 8, "path": "test/wecall_acceptance/single_sample_diploid/test_output_all_variants.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.wecall_utils.log_utils import log_timing_parser\nimport json\n\nimport os\nfrom wecall_test_drivers.timed_command import TimedCommand\nfrom wecall_test_drivers.tool_runner import log_file\nfrom wecall_test_drivers.variant_callset import VariantCallSet\n\nCANDIDATE_VARIANTS_FILE_KEY = \"candidateVariantsFile\"\n\n\nclass VariantCallerWrapper(object):\n\n def __init__(\n self,\n output_vcf_path_stem,\n wecall_config\n ):\n self.output_vcf = output_vcf_path_stem + \".vcf\"\n self.log_filename = output_vcf_path_stem + \".log\"\n self.wecall_config = wecall_config\n self.__additional_commands = {}\n self.__timmed_command = None\n\n @property\n def stderr(self):\n return self.__timmed_command.stderr\n\n @property\n def stdout(self):\n return self.__timmed_command.stdout\n\n @property\n def return_code(self):\n return self.__timmed_command.return_code\n\n @property\n def config_filename(self):\n return self.wecall_config.filename\n\n def get_variant_callset(self, test_case):\n variant_callset = VariantCallSet(test_case)\n variant_callset.add_vcf_variants(self.output_vcf)\n return variant_callset\n\n def add_additional_command(self, key, value):\n self.__additional_commands[key] = value\n\n def dump_timing_json(self, filename):\n times = self.__timmed_command.times\n with open(self.log_filename) as log_file:\n timing_data = log_timing_parser(log_file)\n per_file_timings = {}\n for timing_data_item in timing_data:\n assert(timing_data_item.timing_type == \"IO\")\n timed_file = timing_data_item.metadata[\"file\"]\n if timed_file not in per_file_timings:\n per_file_timings[timed_file] = 0\n assert(timing_data_item.length_units == \"us\")\n per_file_timings[timed_file] += timing_data_item.length\n\n per_file_timings_in_seconds = {\n key: value / 10.0 ** 6 for key,\n value in per_file_timings.items()}\n times[\"IO\"] = per_file_timings_in_seconds\n with open(filename, \"w\") as json_fp:\n json.dump(times, json_fp, indent=4, sort_keys=True)\n json_fp.write(\"\\n\")\n\n @property\n def system_time(self):\n return self.__timmed_command.system_time\n\n @property\n def user_time(self):\n return self.__timmed_command.user_time\n\n def run(self):\n cmd = [\n os.path.join(os.environ[\"WECALL_BIN\"], \"weCall\"),\n \"--output={}\".format(self.output_vcf),\n \"--logFilename={}\".format(self.log_filename),\n \"--config={}\".format(self.wecall_config.filename)\n ]\n for key, value in self.__additional_commands.items():\n cmd.append(\"--{}={}\".format(key, value))\n\n log_file(self.wecall_config.filename)\n\n # try:\n # # remove file before trying to run tool\n # os.unlink(self.output_vcf)\n # except OSError:\n # pass\n\n self.__timmed_command = TimedCommand().start(cmd)\n\n log_file(self.output_vcf)\n\n return self\n", "id": "2158149", "language": "Python", "matching_score": 3.3587844371795654, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/variant_caller_wrapper.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.tool_runner import ToolRunner\nimport json\nimport logging\nimport tempfile\nimport psutil\nimport time\n\n\nclass TimedCommand(ToolRunner):\n\n def __init__(self):\n ToolRunner.__init__(self)\n self.user_time = None\n self.system_time = None\n\n @property\n def times(self):\n return {\"user_time\": self.user_time, \"system_time\": self.system_time}\n\n def dump_timing_json(self, filename):\n with open(filename, \"w\") as json_fp:\n json.dump(self.times, json_fp, indent=4, sort_keys=True)\n json_fp.write(\"\\n\")\n\n def log_output(self):\n ToolRunner.log_output(self)\n logging.info(\"user_time: {}\".format(self.user_time))\n logging.info(\"system_time: {}\".format(self.system_time))\n\n def run(self, command, cwd=None):\n with tempfile.TemporaryFile() as stdout, tempfile.TemporaryFile() as stderr:\n process = psutil.Popen(\n command, stdout=stdout, stderr=stderr, cwd=cwd)\n while process.status() != psutil.STATUS_ZOMBIE:\n time.sleep(0)\n stdout.seek(0)\n self.stdout = stdout.read()\n stderr.seek(0)\n self.stderr = stderr.read()\n\n times = process.cpu_times()\n self.user_time, self.system_time = times.user, times.system\n process.wait()\n self.return_code = process.returncode\n\n return self\n", "id": "3415110", "language": "Python", "matching_score": 0.16622985899448395, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/timed_command.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nimport subprocess\n\nWECALL_BIN = os.environ[\"WECALL_BIN\"]\n\n\nclass TabixIndexer(object):\n\n def __init__(self, filename, file_type=None):\n self.filename = filename\n self.file_type = file_type\n\n @property\n def compressed_filename(self):\n return self.filename + \".gz\"\n\n @property\n def compressed_filename_index(self):\n return self.compressed_filename + \".tbi\"\n\n def bgzip(self):\n subprocess.call(\n [os.path.join(WECALL_BIN, \"bgzip\"), \"-f\", self.filename])\n return self\n\n def index(self):\n self.bgzip()\n tabix_args = [os.path.join(WECALL_BIN, \"tabix\"), \"-f\", ]\n if self.file_type == \"VARINFO\":\n tabix_args += ['-s', '1', '-b', '2', '-e', '3']\n elif self.file_type is not None:\n tabix_args += [\"-p\", self.file_type]\n tabix_args.append(self.compressed_filename)\n subprocess.check_call(tabix_args)\n return self\n", "id": "9177593", "language": "Python", "matching_score": 1.3887081146240234, "max_stars_count": 8, "path": "python/wecall/utils/tabix_indexer.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.genomics import variant\nfrom wecall.vcfutils.record import Record\nfrom wecall.vcfutils.info_data import InfoData\nfrom wecall.vcfutils.sample_data import SampleData\nfrom wecall.vcfutils.schema import Schema\nfrom wecall.vcfutils.writer import VCFWriterContextManager\nfrom wecall_test_drivers.tool_runner import log_file\nfrom wecall.utils.tabix_indexer import TabixIndexer\n\n\nclass VCFBuilder(object):\n def __init__(self, filename, schema=None):\n self.__filename = filename\n self.__indexer = TabixIndexer(self.__filename, \"vcf\")\n if schema is None:\n self.schema = Schema()\n else:\n self.schema = schema\n self.__records = []\n\n @property\n def filename(self):\n return self.__filename\n\n @property\n def compressed_filename(self):\n return self.__indexer.compressed_filename\n\n @property\n def compressed_filename_index(self):\n return self.__indexer.compressed_filename_index\n\n def with_variant(self, chrom, pos_from, ref, alt):\n return self.with_record(\n self.generate_record_from_variant(\n variant.Variant(\n chrom, pos_from, ref, alt)))\n\n def with_record(self, record):\n self.__records.append(record)\n return self\n\n def with_record_from_variant(self, variant, **kwargs):\n return self.with_record(\n self.generate_record_from_variant(\n variant, **kwargs))\n\n def build(self):\n # use default schema\n with VCFWriterContextManager(self.filename, self.schema) as vcf_writer:\n vcf_writer.write_records(self.__records)\n\n log_file(self.filename)\n return self\n\n def bgzip(self):\n self.__indexer.bgzip()\n\n def index(self):\n self.__indexer.index()\n return self\n\n def generate_record_from_variant(self, variant, **kwargs):\n annotations = {'variant_id': set(),\n 'quality': None,\n 'filters': set(),\n 'info': InfoData(self.schema,\n {}),\n 'sample_info': SampleData([key for key,\n _ in self.schema.iter_sample_data()],\n self.schema.samples),\n 'from_multi_alt': False,\n }\n for key, value in kwargs.items():\n annotations[key] = value\n\n return Record(schema=self.schema, variant=variant, **annotations)\n", "id": "12356996", "language": "Python", "matching_score": 2.158191204071045, "max_stars_count": 8, "path": "python/wecall/vcfutils/vcf_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nimport unittest\n\nimport wecall.common.exceptions\nfrom wecall.genomics import variant\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils import record\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall.vcfutils.info_data import InfoData\nfrom wecall.vcfutils.parser import VCFReaderContextManager\nfrom wecall.vcfutils.record import common_prefix_length, trimmed_vcf_ref_alt, vcf_row_from_record, \\\n variant_from_vcf, variant_quality_from_vcf, variant_ids_from_vcf, \\\n vcf_id_entry_from_variant_ids, filters_from_vcf, split_MNP_variant, Record\nfrom wecall.vcfutils.sample_data import SampleData\nfrom wecall.vcfutils.schema import Schema\n\n\nclass TestUtilityFunctionsInRecordModule(unittest.TestCase):\n def test_quality_from_vcf(self):\n quality = \"50\"\n self.assertEqual(variant_quality_from_vcf(quality), 50.0)\n self.assertEqual(variant_quality_from_vcf(\".\"), None)\n self.assertRaises(wecall.common.exceptions.weCallException, variant_quality_from_vcf, \"String\")\n\n def test_variant_ids_from_vcf(self):\n self.assertEqual(variant_ids_from_vcf(\"A,B\"), {\"A\", \"B\"})\n self.assertEqual(variant_ids_from_vcf(\".\"), set())\n\n def test_vcf_id_entry_from_variants_ids(self):\n self.assertEqual(vcf_id_entry_from_variant_ids({\"A\", \"B\"}), \"A,B\")\n self.assertEqual(vcf_id_entry_from_variant_ids(set()), \".\")\n\n def test_filters_from_vcf(self):\n self.assertEqual(filters_from_vcf(\"PASS\"), set())\n self.assertEqual(filters_from_vcf(\"PISS\"), {\"PISS\"})\n self.assertEqual(filters_from_vcf(\"PASS;PISS\"), {\"PASS\", \"PISS\"})\n\n def test_variant_from_vcf(self):\n chrom = \"blah\"\n pos = 21\n ref = \"hell\"\n alt = \"heaven\"\n self.assertEqual(variant_from_vcf(chrom, pos, ref, alt), variant.Variant(chrom, 20, ref, alt))\n\n def test_split_MNP_variant(self):\n chrom = \"blah\"\n pos = 21\n ref = \"hell\"\n alt = \"beli\"\n var = variant.Variant(chrom, pos, ref, alt)\n self.assertEqual(\n list(split_MNP_variant(var)),\n [\n variant.Variant(chrom, pos, ref[0], alt[0]),\n variant.Variant(chrom, pos + 3, ref[3], alt[3]),\n ]\n )\n self.assertEqual(\n list(split_MNP_variant(var, include_ref_calls=True)),\n [\n variant.Variant(chrom, pos, ref[0], alt[0]),\n variant.Variant(chrom, pos + 1, ref[1], alt[1]),\n variant.Variant(chrom, pos + 2, ref[2], alt[2]),\n variant.Variant(chrom, pos + 3, ref[3], alt[3]),\n ]\n )\n\n\nclass RecordTest(unittest.TestCase):\n def setUp(self):\n self.data_dir = os.path.join(os.path.dirname(__file__), \"example_data\")\n self.work_dir = os.path.join(os.environ[\"WECALL_TEST_RESULTS\"], *self.id().split(\".\"))\n try:\n os.makedirs(self.work_dir)\n except OSError:\n pass\n\n def __get_example_schema(self, filename):\n with VCFReaderContextManager(os.path.join(self.data_dir, filename)) as vcf_handler:\n vcf_handler.read_header()\n return vcf_handler.header\n\n def test_eq(self):\n reference = Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False)\n\n self.assertTrue(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertFalse(reference == Record(None, Variant(\"2\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertFalse(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(\"rs0\"), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertFalse(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 5.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertFalse(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(\"CV\"),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertFalse(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {'AF': []}), SampleData([], []), False))\n\n self.assertFalse(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], ['NA12787']), False))\n\n self.assertFalse(reference == Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), True))\n\n def test_ne(self):\n reference = Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False)\n\n self.assertFalse(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertTrue(reference != Record(None, Variant(\"2\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertTrue(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(\"rs0\"), 0.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertTrue(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 5.0, set(),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertTrue(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(\"CV\"),\n InfoData(None, {}), SampleData([], []), False))\n\n self.assertTrue(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {'AF': []}), SampleData([], []), False))\n\n self.assertTrue(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], ['NA12787']), False))\n\n self.assertTrue(reference != Record(None, Variant(\"1\", 20, \"A\", \"G\"), set(), 0.0, set(),\n InfoData(None, {}), SampleData([], []), True))\n\n def test_read_variant_from_vcf(self):\n with VCFReaderContextManager(os.path.join(self.data_dir, \"vcf_example.vcf\")) as vcf_handler:\n variant_gen = (record.variant for record in vcf_handler.read_records())\n next_variant = next(variant_gen)\n self.assertEqual(next_variant.chrom, \"20\")\n self.assertEqual(next_variant.pos_from, 9)\n self.assertEqual(next_variant.ref, \"CT\")\n self.assertEqual(next_variant.alt, \"C\")\n\n def test_read_record_line(self):\n with VCFReaderContextManager(os.path.join(self.data_dir, \"vcf_example.vcf\")) as vcf_handler:\n\n record_gen = vcf_handler.read_records()\n next_record = next(record_gen)\n\n self.assertEqual(next_record.chrom, \"20\")\n self.assertEqual(next_record.pos_from, 9)\n self.assertEqual(next_record.ids, set())\n self.assertEqual(next_record.ref, \"CT\")\n self.assertEqual(next_record.alt, \"C\")\n self.assertEqual(next_record.quality, 3000)\n self.assertEqual(next_record.filters, set())\n self.assertEqual(next_record.passes_filter, True)\n self.assertEqual(next_record.from_multi_alt, False)\n self.assertEqual(next_record.type, variant.TYPE_DEL)\n\n self.assertEqual(next_record.info['PP'], [3000])\n self.assertEqual(next_record.info['DP'], [250])\n self.assertEqual(next_record.info['VC'], [100])\n self.assertEqual(next_record.info['ABPV'], [0.2])\n self.assertEqual(next_record.info['SBPV'], [0.3])\n self.assertEqual(next_record.info['MQ'], [70])\n self.assertEqual(next_record.info['QD'], [None])\n\n self.assertTrue(next_record.sample_info.has_sample(\"sample1\"))\n self.assertEqual(next_record.genotypes, {\"sample1\": GenotypeCall(\"1|0\"), \"sample2\": GenotypeCall(\"1|1\")})\n self.assertEqual(next_record.sample_info.get_field(\"sample1\", 'GT'), GenotypeCall(\"1|0\"))\n self.assertEqual(next_record.sample_info.get_field(\"sample1\", 'PL'), [3000, 0, 3000])\n self.assertEqual(next_record.sample_info.get_field(\"sample1\", \"GQ\"), [1000])\n\n def test_read_sample_data(self):\n schema = self.__get_example_schema(\"vcf_example.vcf\")\n sample_schema = [key for key, _ in schema.iter_sample_data()]\n\n sample_data = SampleData(sample_schema, ['sample1'])\n\n sample_data.add_sample_data(\"sample1\", \"GT\", GenotypeCall(\"1|0\"))\n sample_data.add_sample_data(\"sample1\", \"PL\", [3000, 0, 3000])\n sample_data.add_sample_data(\"sample1\", \"GQ\", [1000])\n sample_data.add_sample_data(\"sample1\", \"PQ\", [2000])\n sample_data.add_sample_data(\"sample1\", \"PS\", [60000])\n sample_data.add_sample_data(\"sample1\", \"AD\", [140, 110])\n sample_data.add_sample_data(\"sample1\", \"DP\", [250])\n sample_data.add_sample_data(\"sample1\", \"VAF\", [0.4])\n\n self.assertTrue(sample_data.has_sample(\"sample1\"))\n self.assertEqual(sample_data.genotypes(), {\"sample1\": GenotypeCall(\"1|0\")})\n self.assertEqual(sample_data.get_field(\"sample1\", 'GT'), GenotypeCall(\"1|0\"))\n self.assertEqual(sample_data.get_field(\"sample1\", 'PL'), [3000, 0, 3000])\n\n genotype_data = sample_data.get_genotype_data(\"sample1\")\n self.assertEqual(genotype_data.genotype(), GenotypeCall(\"1|0\"))\n self.assertEqual(genotype_data['GT'], GenotypeCall(\"1|0\"))\n self.assertEqual(genotype_data['PL'], [3000, 0, 3000])\n\n def test_should_write_missing_values_in_sample_data(self):\n with VCFReaderContextManager(os.path.join(self.data_dir, \"vcf_example.vcf\")) as vcf_handler:\n first_record = next(vcf_handler.read_records())\n\n sample_data = SampleData(['GT', 'PL', 'GQ'], ['sample1', 'sample2', 'sample3'])\n\n sample_data.add_sample_data(\"sample1\", \"GT\", GenotypeCall(\"1|0\"))\n sample_data.add_sample_data(\"sample1\", \"PL\", [3000, 0, 3000])\n sample_data.add_sample_data(\"sample1\", \"GQ\", [1000])\n\n sample_data.add_sample_data(\"sample2\", \"GT\", GenotypeCall(\"1|1\"))\n sample_data.add_sample_data(\"sample2\", \"PL\", [2000, 0, 1000])\n sample_data.add_sample_data(\"sample2\", \"GQ\", [3])\n\n first_record.sample_info = sample_data\n\n print((sample_data.to_vcf_columns()))\n vcf_string = vcf_row_from_record(first_record)\n expected_vcf_string = \"20\t10\t.\tCT\tC\t3000\tPASS\tPP=3000;DP=250;DPR=140;DPF=110;VC=100;VCR=49;VCF=51;ABPV=0.2;SBPV=0.3;MQ=70.0;BR=31.0;QD=None\tGT:PL:GQ\t1|0:3000,0,3000:1000\t1|1:2000,0,1000:3\t./.:.:.\" # noqa\n self.assertEqual(expected_vcf_string, vcf_string)\n\n def test_should_return_default_diploid_genotype(self):\n sample_data = SampleData(['GT', 'GL'], [\"NA12878\"])\n\n self.assertEqual(GenotypeCall(\"./.\"), GenotypeCall(\"./.\"))\n\n self.assertTrue(sample_data.has_sample(\"NA12878\"))\n self.assertEqual(sample_data.genotypes(), {\"NA12878\": GenotypeCall(\"./.\")})\n self.assertEqual(sample_data.get_field(\"NA12878\", 'GT'), GenotypeCall(\"./.\"))\n self.assertEqual(sample_data.get_field(\"NA12878\", 'GL'), [])\n\n genotype_data = sample_data.get_genotype_data(\"NA12878\")\n self.assertEqual(genotype_data.genotype(), GenotypeCall(\"./.\"))\n self.assertEqual(genotype_data['GT'], GenotypeCall(\"./.\"))\n self.assertEqual(genotype_data['GL'], [])\n\n def test_split_empty_sample_data_string(self):\n schema = self.__get_example_schema(\"vcf_example.vcf\")\n cols = \"\"\"1\\t11082325\\tRS1\\tG\\tC,A\\t.\\t.\\tPP=.;DP=.;DPR=.;DPF=.;VC=.;VCR=.;VCF=.;ABPV=.;SBPV=.;MQ=.;BR=.;QD=.\\tGT:PL:GQ\\t1|0:3000,0,3000:1000\\t1|1:2000,0,1000:3\"\"\".split() # noqa\n first_record = next(record.generate_records(schema, cols))\n self.assertEqual(first_record.alt, 'C')\n self.assertTrue(first_record.from_multi_alt)\n\n second_record = next(record.generate_records(schema, cols))\n self.assertEqual(first_record.info, second_record.info)\n\n def test_split_genotype_calls(self):\n self.assertEqual(record.split_GT(\"0/0\", 2), [\"0/0\", \"0/0\"])\n self.assertEqual(record.split_GT(\"0/1\", 2), [\"0/1\", \"0/0\"])\n self.assertEqual(record.split_GT(\"0/2\", 2), [\"0/0\", \"0/1\"])\n self.assertEqual(record.split_GT(\"1/1\", 2), [\"1/1\", \"0/0\"])\n self.assertEqual(record.split_GT(\"1/2\", 2), [\"0/1\", \"0/1\"])\n self.assertEqual(record.split_GT(\"2/2\", 2), [\"0/0\", \"1/1\"])\n\n self.assertEqual(record.split_GT(\"0/0\", 3), [\"0/0\", \"0/0\", \"0/0\"])\n self.assertEqual(record.split_GT(\"0/1\", 3), [\"0/1\", \"0/0\", \"0/0\"])\n self.assertEqual(record.split_GT(\"0/2\", 3), [\"0/0\", \"0/1\", \"0/0\"])\n self.assertEqual(record.split_GT(\"0/3\", 3), [\"0/0\", \"0/0\", \"0/1\"])\n self.assertEqual(record.split_GT(\"1/1\", 3), [\"1/1\", \"0/0\", \"0/0\"])\n self.assertEqual(record.split_GT(\"1/2\", 3), [\"0/1\", \"0/1\", \"0/0\"])\n self.assertEqual(record.split_GT(\"1/3\", 3), [\"0/1\", \"0/0\", \"0/1\"])\n self.assertEqual(record.split_GT(\"2/2\", 3), [\"0/0\", \"1/1\", \"0/0\"])\n self.assertEqual(record.split_GT(\"2/3\", 3), [\"0/0\", \"0/1\", \"0/1\"])\n self.assertEqual(record.split_GT(\"3/3\", 3), [\"0/0\", \"0/0\", \"1/1\"])\n\n def test_split_unknown_genotype_calls(self):\n self.assertEqual(record.split_GT(\"./.\", 2), [\"./.\", \"./.\"])\n self.assertEqual(record.split_GT(\"./1\", 2), [\"./1\", \"./0\"])\n self.assertEqual(record.split_GT(\"./2\", 2), [\"./0\", \"./1\"])\n\n def test_split_phased_genotype_calls(self):\n self.assertEqual(record.split_GT(\"0|0\", 2), [\"0|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"0|1\", 2), [\"0|1\", \"0|0\"])\n self.assertEqual(record.split_GT(\"0|2\", 2), [\"0|0\", \"0|1\"])\n self.assertEqual(record.split_GT(\"1|0\", 2), [\"1|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"1|1\", 2), [\"1|1\", \"0|0\"])\n self.assertEqual(record.split_GT(\"1|2\", 2), [\"1|0\", \"0|1\"])\n self.assertEqual(record.split_GT(\"2|0\", 2), [\"0|0\", \"1|0\"])\n self.assertEqual(record.split_GT(\"2|1\", 2), [\"0|1\", \"1|0\"])\n self.assertEqual(record.split_GT(\"2|2\", 2), [\"0|0\", \"1|1\"])\n\n self.assertEqual(record.split_GT(\"0|0\", 3), [\"0|0\", \"0|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"0|1\", 3), [\"0|1\", \"0|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"0|2\", 3), [\"0|0\", \"0|1\", \"0|0\"])\n self.assertEqual(record.split_GT(\"0|3\", 3), [\"0|0\", \"0|0\", \"0|1\"])\n self.assertEqual(record.split_GT(\"1|0\", 3), [\"1|0\", \"0|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"1|1\", 3), [\"1|1\", \"0|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"1|2\", 3), [\"1|0\", \"0|1\", \"0|0\"])\n self.assertEqual(record.split_GT(\"1|3\", 3), [\"1|0\", \"0|0\", \"0|1\"])\n self.assertEqual(record.split_GT(\"2|0\", 3), [\"0|0\", \"1|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"2|1\", 3), [\"0|1\", \"1|0\", \"0|0\"])\n self.assertEqual(record.split_GT(\"2|2\", 3), [\"0|0\", \"1|1\", \"0|0\"])\n self.assertEqual(record.split_GT(\"2|3\", 3), [\"0|0\", \"1|0\", \"0|1\"])\n self.assertEqual(record.split_GT(\"3|0\", 3), [\"0|0\", \"0|0\", \"1|0\"])\n self.assertEqual(record.split_GT(\"3|1\", 3), [\"0|1\", \"0|0\", \"1|0\"])\n self.assertEqual(record.split_GT(\"3|2\", 3), [\"0|0\", \"0|1\", \"1|0\"])\n self.assertEqual(record.split_GT(\"3|3\", 3), [\"0|0\", \"0|0\", \"1|1\"])\n\n def __cleanup_tmp_files(self, file_name):\n try:\n os.remove(file_name)\n except OSError as ex:\n self.assertTrue(False, str(ex))\n else:\n pass\n\n\nclass TestCommonPrefixLength(unittest.TestCase):\n\n def test_no_common_prefix(self):\n self.assertEqual(0, common_prefix_length(\"ACDE\", \"BCDE\"))\n\n def test_common_prefix_matching_strings(self):\n self.assertEqual(4, common_prefix_length(\"ABCA\", \"ABCA\"))\n\n def test_common_prefix_then_end_of_one_string(self):\n self.assertEqual(2, common_prefix_length(\"AAA\", \"AA\"))\n self.assertEqual(2, common_prefix_length(\"AA\", \"AAA\"))\n\n def test_common_prefix_different_suffix(self):\n self.assertEqual(3, common_prefix_length(\"AAAA\", \"AAAB\"))\n self.assertEqual(3, common_prefix_length(\"AAAB\", \"AAAA\"))\n\n\nclass TestTrimmedVCFRefAlt(unittest.TestCase):\n\n def test_refcall(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"A\", \"A\"), (0, \"A\", \"A\"))\n\n def test_fails_with_long_refcall(self):\n self.assertRaises(wecall.common.exceptions.weCallException, trimmed_vcf_ref_alt, \"ACDE\", \"ACDE\")\n\n def test_with_SNP(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"ABCDE\", \"ABQDE\"), (2, \"C\", \"Q\"))\n\n def test_with_trimmed_SNP(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"C\", \"Q\"), (0, \"C\", \"Q\"))\n\n def test_with_MNP(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"ABCDEFG\", \"ABQDPFG\"), (2, \"CDE\", \"QDP\"))\n\n def test_with_trimmed_MNP(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"ABCDEFG\", \"XBCDEFY\"), (0, \"ABCDEFG\", \"XBCDEFY\"))\n\n def test_with_INS(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"ABCDEF\", \"ABCPQDEF\"), (2, \"C\", \"CPQ\"))\n\n def test_with_INS_at_start(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"DEF\", \"PQDEF\"), (0, \"D\", \"PQD\"))\n\n def test_with_DEL(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"ABCPQDEF\", \"ABCDEF\"), (2, \"CPQ\", \"C\"))\n\n def test_with_DEL_at_start(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"PQDEF\", \"DEF\"), (0, \"PQD\", \"D\"))\n\n def test_with_common_prefix_and_suffix(self):\n self.assertEqual(trimmed_vcf_ref_alt(\"AT\", \"ATAT\"), (0, \"A\", \"ATA\"))\n\n def test_fails_with_monomorphic_variant(self):\n self.assertRaises(wecall.common.exceptions.weCallException, trimmed_vcf_ref_alt, \"AT\", \".\")\n self.assertRaises(wecall.common.exceptions.weCallException, trimmed_vcf_ref_alt, \".\", \"AT\")\n\n def test_fails_with_empty_ref(self):\n self.assertRaises(wecall.common.exceptions.weCallException, trimmed_vcf_ref_alt, \"\", \"A\")\n\n def test_fails_with_empty_alt(self):\n self.assertRaises(wecall.common.exceptions.weCallException, trimmed_vcf_ref_alt, \"A\", \"\")\n\n def test_fails_with_empty_ref_and_alt(self):\n self.assertRaises(wecall.common.exceptions.weCallException, trimmed_vcf_ref_alt, \"\", \"\")\n\n\nclass TestInfoFieldFormatting(unittest.TestCase):\n def test_should_format_a_present_flag(self):\n schema = Schema()\n schema.set_info_data('F', '0', 'Flag', 'Flag')\n info_data = InfoData(schema, {\"F\": None})\n self.assertEqual('F', info_data.to_vcf())\n\n def test_should_format_no_data(self):\n info_data = InfoData(None, {})\n self.assertEqual('.', info_data.to_vcf())\n\n def test_should_format_a_string(self):\n info_data = InfoData(None, {'K': 'V'})\n self.assertEqual('K=V', info_data.to_vcf())\n\n def test_should_format_a_string_list(self):\n schema = Schema()\n schema.set_info_data('K', 'A', 'String', 'K')\n info_data = InfoData(schema, {'K': ['V1', 'V2']})\n self.assertEqual('K=V1,V2', info_data.to_vcf())\n\n def test_should_format_an_int_list(self):\n schema = Schema()\n schema.set_info_data('K', 'A', 'Integer', 'K')\n info_data = InfoData(schema, {'K': [1, 2, 3]})\n self.assertEqual('K=1,2,3', info_data.to_vcf())\n\n def test_should_format_a_float_list(self):\n schema = Schema()\n schema.set_info_data('K', 'A', 'Integer', 'K')\n info_data = InfoData(schema, {'K': [1.0, 2.66, 3.0]})\n self.assertEqual('K=1.0,2.66,3.0', info_data.to_vcf())\n\n def test_should_format_multiple_values(self):\n schema = Schema()\n schema.set_info_data('K', 'A', 'Float', 'K')\n schema.set_info_data('K2', 'A', 'String', 'K')\n schema.set_info_data('K3', '0', 'Flag', 'K')\n schema.set_info_data('K4', 'A', 'String', 'K')\n info_data = InfoData(schema, {'K3': None, 'K2': ['S2'], 'K': [1.0, 2.66, 3.0], 'K4': ['S4']})\n self.assertEqual('K=1.0,2.66,3.0;K2=S2;K3;K4=S4', info_data.to_vcf())\n", "id": "3082153", "language": "Python", "matching_score": 4.8762993812561035, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_record.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n# Represents a single VCF record with only a single ALT.\n\nimport copy\nimport logging\nimport re\nimport sys\nfrom collections import OrderedDict\nfrom itertools import repeat\n\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics import variant\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.fieldmetadata import UNKNOWN\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall.vcfutils.info_data import DeferredInfoData, InfoData, DeferredInfoValue\nfrom wecall.vcfutils.sample_data import SampleData, GENOTYPE_KEY, GENOTYPE_PHRED_LIKELIHOODS_KEY, \\\n GENOTYPE_LIKELIHOODS_KEY\nfrom wecall.vcfutils.stringutils import to_vcf_str, from_vcf_str\n\n\nlogger = logging.getLogger(__name__)\n\nCHROM_COL = 0\nPOS_COL = 1\nID_COL = 2\nREF_COL = 3\nALT_COL = 4\nQUALITY_COL = 5\nFILTER_COL = 6\nINFO_COL = 7\nFORMAT_COL = 8\nSAMPLE_COL = 9\n\n\ndef read_records(schema, line):\n \"\"\"\n Extracts a sequence of `Record` objects from a single line in a VCF file.\n \"\"\"\n try:\n cols = [l for l in line.strip().split(\"\\t\")]\n for item in generate_records(schema, cols):\n yield item\n except weCallException:\n raise\n except Exception:\n _, exc, tb = sys.exc_info()\n new_exc = weCallException(\n \"while reading record from line {!r}: {!s}\".format(\n line, exc.message))\n raise new_exc.__class__(new_exc).with_traceback(tb)\n\n\ndef generate_records(schema, cols):\n alts = cols[ALT_COL].split(',')\n vars = [Variant(cols[CHROM_COL], int(cols[POS_COL]) -\n 1, cols[REF_COL], alt) for alt in alts]\n\n info_data_list = []\n if len(alts) == 1:\n # deferred parsing is simple with a single alt\n info_data_list.append(\n DeferredInfoData(\n schema,\n lambda: defer_parse_info_field(\n schema,\n cols[INFO_COL])))\n\n else:\n # extract and split info data into lists of length n_alts\n split_info_data = OrderedDict()\n for key, value in parse_info_field(cols[INFO_COL]):\n try:\n info_metadata = schema.get_info_data(key)\n except KeyError:\n split_info_data[key] = [\n DeferredInfoValue(\n schema, key, value) for index in range(\n len(alts))]\n else:\n split_info_data[key] = info_metadata.split_alts(\n value if isinstance(value, list) else value.split(','), n_alts=len(alts)\n )\n\n # construct InfoData objects from prepared info data\n for index in range(len(alts)):\n info_dict = OrderedDict([\n (key, values[index]) for key, values in list(split_info_data.items())\n ])\n info_data_list.append(InfoData(schema, info_dict))\n\n try:\n sample_format = cols[FORMAT_COL].split(':')\n except IndexError:\n sample_data_list = repeat(None)\n else:\n # extract sample format\n split_sample_data = {sample_name: sample_field.split(\n ':') for sample_name, sample_field in zip(schema.samples, cols[SAMPLE_COL:])}\n\n sample_data_list = [\n SampleData(\n cols[FORMAT_COL].split(':'),\n schema.samples) for _ in alts]\n for sample_name, sample_items in list(split_sample_data.items()):\n split_sample_items = {}\n\n # extract data from sample fields\n gt = None\n for key, item in zip(sample_format, sample_items):\n try:\n if key == GENOTYPE_KEY:\n gt = GenotypeCall(item)\n values = [\n GenotypeCall(gt.deliminator().join(\n # Note: default value should be '.', but\n # downstream tools aren't good enough to use it\n {None: '.', 0: '0', 1 + index: '1'}.get(gt_index, '0') for gt_index in gt\n ))\n for index in range(len(alts))\n ]\n elif key == GENOTYPE_LIKELIHOODS_KEY or key == GENOTYPE_PHRED_LIKELIHOODS_KEY:\n values = schema.get_sample_data(key).split_alts(\n item.split(','), len(alts), gt)\n else:\n values = schema.get_sample_data(key).split_alts(\n item.split(','), len(alts), None)\n split_sample_items[key] = values\n except Exception as e:\n raise type(e)(\n \"Error parsing field {} for sample {}: {}\".format(\n key, sample_name, e))\n\n # distribute data to each split sample meta-data container\n for index in range(len(alts)):\n sample_data = sample_data_list[index]\n for key, value in list(split_sample_items.items()):\n sample_data.add_sample_data(sample_name, key, value[index])\n\n # generate & return record objects\n for var, info_data, sample_data in zip(\n vars, info_data_list, sample_data_list):\n qual = variant_quality_from_vcf(cols[QUALITY_COL])\n ids = variant_ids_from_vcf(cols[ID_COL])\n filts = filters_from_vcf(cols[FILTER_COL])\n yield Record(schema, var, ids, qual, filts, info_data, sample_data, len(alts) > 1)\n\n\ninfo_item_regex = re.compile(r'^(?P<key>[a-zA-Z0-9,_+-]+)(?:=(?P<value>.*))?$')\n\n\ndef parse_info_field(field):\n for item in field.split(';'):\n match = info_item_regex.match(item)\n if match:\n key, value = match.group('key'), match.group('value')\n if value is None:\n yield key, [True]\n else:\n yield key, value\n\n\ndef defer_parse_info_field(schema, field):\n for item in field.split(';'):\n match = info_item_regex.match(item)\n if match:\n key, value = match.group('key'), match.group('value')\n if value is None:\n yield key, [True]\n else:\n yield key, DeferredInfoValue(schema, key, value)\n\n\ndef variant_from_vcf(chrom_column, pos_column, ref_column, alt_column):\n return variant.Variant(\n chrom_column,\n from_vcf_str(\n pos_column,\n int) - 1,\n ref_column,\n alt_column)\n\n\ndef variant_quality_from_vcf(quality_string):\n return from_vcf_str(quality_string, float)\n\n\ndef variant_ids_from_vcf(id_column):\n return set() if id_column == UNKNOWN else set(id_column.split(\",\"))\n\n\ndef vcf_id_entry_from_variant_ids(variant_ids):\n return \",\".join(sorted(variant_ids)) if variant_ids else UNKNOWN\n\n\ndef filters_from_vcf(filter_column):\n return set() if filter_column == \"PASS\" else set(filter_column.split(\";\"))\n\n\ndef vcf_row_from_variant(\n variant,\n variant_ids=set(),\n quality=None,\n filters=set(),\n info_data=None,\n sample_data=None,\n):\n columns = [\n variant.chrom,\n to_vcf_str(variant.one_indexed_pos_from),\n vcf_id_entry_from_variant_ids(variant_ids),\n variant.ref,\n variant.alt,\n to_vcf_str(quality),\n \";\".join(filters) if filters else \"PASS\",\n info_data.to_vcf() if info_data else UNKNOWN,\n ]\n if sample_data is not None:\n columns = columns + sample_data.to_vcf_columns()\n return \"\\t\".join(columns)\n\n\ndef vcf_row_from_record(\n record,\n variant_ids=set(),\n quality=None,\n filters=None\n):\n actual_filters = filters if filters else record.filters\n filter_field = \";\".join(actual_filters) if actual_filters else \"PASS\"\n fields = list([\n record.variant.chrom,\n to_vcf_str(record.one_indexed_pos_from),\n vcf_id_entry_from_variant_ids(variant_ids if variant_ids else record.ids),\n record.variant.ref,\n record.variant.alt,\n to_vcf_str(quality if quality else record.quality),\n filter_field,\n record.info.to_vcf() if record.info else UNKNOWN,\n ])\n if record.sample_info is not None:\n fields.extend(record.sample_info.to_vcf_columns())\n return \"\\t\".join(\n fields\n )\n\n\ndef split_GT(composite_GT, number_alts):\n composite_call = GenotypeCall(composite_GT)\n individual_GTs = [str(composite_call.get_genotype_call_for_alt(i + 1))\n for i in range(0, number_alts)]\n return individual_GTs\n\n\ndef split_MNP_variant(var, include_ref_calls=False):\n if var.type != variant.TYPE_MNP:\n yield var\n else:\n for index, (ref, alt) in enumerate(zip(var.ref, var.alt)):\n if ref != alt or include_ref_calls:\n yield variant.Variant(var.chrom, index + var.pos_from, ref, alt)\n\n\ndef split_MNP_record(record):\n if record.type != variant.TYPE_MNP:\n yield record\n else:\n for var in split_MNP_variant(record.variant):\n new_record = copy.deepcopy(record)\n new_record.variant = var\n yield new_record\n\n\ndef common_prefix_length(lhs, rhs):\n offset = 0\n for lhs, rhs in zip(lhs, rhs):\n if lhs == rhs:\n offset += 1\n else:\n break\n return offset\n\n\ndef trimmed_vcf_ref_alt(ref, alt):\n if len(ref) == 0 or len(alt) == 0:\n raise weCallException(\"VCF format requires non-empty ref and alt\")\n if ref == alt and len(ref) > 1:\n raise weCallException(\"VCF requires refcalls of length 1\")\n if alt == UNKNOWN or ref == UNKNOWN:\n # VCF allows this to indicate unknown data.\n raise weCallException(\"not dealing with monomorphic variants\")\n offset, new_ref, new_alt = trimmed_ref_alt(ref, alt)\n start_context, end_context = 0, 0\n if len(ref) != len(alt) or (not new_ref and not new_alt):\n if offset == 0:\n end_context = 1\n else:\n start_context = 1\n result_ref =\\\n ref[offset - start_context:offset] +\\\n new_ref +\\\n ref[offset + len(new_ref):offset + len(new_ref) + end_context]\n result_alt =\\\n alt[offset - start_context:offset] +\\\n new_alt +\\\n alt[offset + len(new_alt):offset + len(new_alt) + end_context]\n return offset - start_context, result_ref, result_alt\n\n\ndef trimmed_ref_alt(ref, alt):\n end_offset = common_prefix_length(reversed(ref), reversed(alt))\n start_offset = common_prefix_length(\n ref[:len(ref) - end_offset], alt[:len(alt) - end_offset])\n new_ref_len = len(ref) - start_offset - end_offset\n new_alt_len = len(alt) - start_offset - end_offset\n assert(new_ref_len >= 0)\n assert(new_alt_len >= 0)\n return start_offset, ref[start_offset:len(\n ref) - end_offset], alt[start_offset:len(alt) - end_offset]\n\n\ndef trim_variant(var):\n start_offset, ref, alt = trimmed_vcf_ref_alt(var.ref, var.alt)\n return variant.Variant(var.chrom, start_offset + var.pos_from, ref, alt)\n\n\ndef trim_record(record):\n record.variant = trim_variant(record.variant)\n return record\n\n\nclass Record(object):\n \"\"\"\n Class representing a single variant with all its attributes\n \"\"\"\n\n __slots__ = (\n 'schema',\n 'variant',\n 'ids',\n 'quality',\n 'filters',\n 'info',\n 'sample_info',\n 'from_multi_alt')\n\n def __init__(\n self,\n schema,\n variant,\n variant_id,\n quality,\n filters,\n info,\n sample_info,\n from_multi_alt\n ):\n self.schema = schema\n self.variant = variant\n self.ids = variant_id\n self.quality = quality\n self.filters = filters\n self.info = info\n self.sample_info = sample_info\n self.from_multi_alt = from_multi_alt\n\n def __hash__(self):\n return hash((self.variant.__repr__(),))\n\n def __eq__(self, other):\n return (\n self.variant == other.variant and\n self.ids == other.ids and\n self.quality == other.quality and\n self.filters == other.filters and\n self.info == other.info and\n self.sample_info == other.sample_info and\n self.from_multi_alt == other.from_multi_alt\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n data_items = (\n \"variant={!r}\".format(self.variant),\n \"id={!r}\".format(self.ids),\n \"quality={!r}\".format(self.quality),\n \"filters={!r}\".format(self.filters),\n \"info={!r}\".format(self.info),\n \"sample_info={!r}\".format(self.sample_info),\n \"from_multi_alt={!r}\".format(self.from_multi_alt),\n )\n return \"<Record: {!s}>\".format(\n \", \".join(data_items)\n )\n\n @property\n def insert_size(self):\n return self.variant.insert_size\n\n @property\n def passes_filter(self):\n return len(self.filters) == 0\n\n @property\n def chrom(self):\n return self.variant.chrom\n\n @property\n def length(self):\n return self.variant.length\n\n @property\n def pos_from(self):\n return self.variant.pos_from\n\n @property\n def pos_to(self):\n return self.variant.pos_to\n\n @property\n def one_indexed_pos_from(self):\n return self.variant.one_indexed_pos_from\n\n @property\n def one_indexed_pos_to(self):\n return self.variant.one_indexed_pos_to\n\n @property\n def ref(self):\n return self.variant.ref\n\n @property\n def alt(self):\n return self.variant.alt\n\n @property\n def type(self):\n return self.variant.type\n\n @property\n def samples(self):\n \"\"\"\n :return: List with sample names\n \"\"\"\n return self.schema.samples[:]\n\n @property\n def genotypes(self):\n \"\"\"\n :return Dictionary from sample id to a genotype\n \"\"\"\n return self.sample_info.genotypes()\n\n def get_one_based_key(self):\n return self.chrom, self.one_indexed_pos_from, self.ref, self.alt\n", "id": "1870621", "language": "Python", "matching_score": 3.449892520904541, "max_stars_count": 8, "path": "python/wecall/vcfutils/record.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport logging\nfrom collections import OrderedDict\n\nfrom wecall.vcfutils.fieldmetadata import UNKNOWN\n\nlogger = logging.getLogger()\n\n\nclass DeferredInfoData(object):\n\n __slots__ = ('__schema', '__data_generator', '__data')\n\n def __init__(self, schema, data_generator):\n self.__schema = schema\n self.__data_generator = data_generator\n self.__data = None\n\n def __repr__(self):\n serialised_data = '(unparsed)' if self.__data is None else ', '.join(\n '{}: {}'.format(key, value) for key, value in list(self.__data.items())\n )\n return '<{}: {}>'.format(type(self).__name__, serialised_data)\n\n def __require_data_mapping(self):\n if self.__data is None:\n self.__data = OrderedDict(self.__data_generator())\n\n def __require_all_values(self):\n self.__require_data_mapping()\n for key, value in list(self.__data.items()):\n if isinstance(value, DeferredInfoValue):\n self.__data[key] = value()\n\n def __len__(self):\n self.__require_data_mapping()\n return len(self.__data)\n\n def __contains__(self, key):\n self.__require_data_mapping()\n return key in self.__data\n\n def keys(self):\n self.__require_data_mapping()\n return list(self.__data.keys())\n\n def values(self):\n self.__require_all_values()\n return list(self.__data.values())\n\n def items(self):\n self.__require_all_values()\n return list(self.__data.items())\n\n def __getitem__(self, key):\n self.__require_data_mapping()\n value = self.__data[key]\n if isinstance(value, DeferredInfoValue):\n value = self.__data[key] = value()\n return value\n\n def __setitem__(self, key, value):\n self.__require_data_mapping()\n self.__data[key] = value\n\n def __eq__(self, other):\n return dict(list(self.items())) == dict(list(other.items()))\n\n def to_vcf(self):\n self.__require_all_values()\n if len(self.__data) == 0:\n return UNKNOWN\n else:\n info_strings = []\n for key, value in list(self.__data.items()):\n if value is None:\n info_strings.append(key)\n else:\n info_strings.append(\"{!s}={!s}\".format(\n key, ','.join(map(str, value))))\n return \";\".join(info_strings)\n\n\nclass DeferredInfoValue(object):\n\n __slots__ = ('__schema', '__key', '__value', '__parser')\n\n def __init__(self, schema, key, value):\n self.__schema = schema\n self.__key = key\n self.__value = value\n try:\n info_data = self.__schema.get_info_data(self.__key)\n except KeyError:\n self.__parser = None\n else:\n self.__parser = info_data.parser\n\n def __call__(self):\n if self.__parser is None:\n try:\n info_data = self.__schema.get_info_data(self.__key)\n except KeyError:\n logger.warn(\n 'info field {!r} not defined in schema'.format(\n self.__key))\n self.__schema.set_info_data(\n self.__key,\n '.',\n 'String',\n 'Inferred from file content during parsing',\n 'vcfutils',\n 'undefined')\n info_data = self.__schema.get_info_data(self.__key)\n self.__parser = info_data.parser\n try:\n return [self.__parser(item) for item in self.__value.split(',')]\n except Exception as e:\n raise type(e)(\n \"Error parsing field {}: {}\".format(\n self.__key, e.message))\n\n\nclass InfoData(object):\n \"\"\"\n Class that represents all the info fields. Acts as a dictionary INFO_KEY: INFO_VALUE\n \"\"\"\n\n __slots__ = ('__schema', '__dict')\n\n def __init__(self, schema, dict):\n self.__schema = schema\n self.__dict = dict\n\n def to_vcf(self):\n if len(self.__dict) == 0:\n return UNKNOWN\n else:\n info_strings = []\n for key, value in sorted(self.__dict.items()):\n if value is None:\n info_strings.append(key)\n else:\n info_strings.append(\"{!s}={!s}\".format(\n key, ','.join(map(str, value))))\n return \";\".join(info_strings)\n\n def __setitem__(self, key, value):\n try:\n info_data = self.__schema.get_info_data(key)\n except KeyError:\n raise KeyError(\n \"Attempt to read INFO field {!r} which is not defined in the VCF header\".format(key))\n self.__dict[key] = value\n\n def __getitem__(self, key):\n value = self.__dict[key]\n if isinstance(value, DeferredInfoValue):\n value = self.__dict[key] = value()\n return value\n\n def __contains__(self, key):\n return key in self.__dict\n\n def __len__(self):\n return len(self.__dict)\n\n def __eq__(self, other):\n return dict(list(self.items())) == dict(list(other.items()))\n\n def keys(self):\n return list(self.__dict.keys())\n\n def values(self):\n return list(self.__dict.values())\n\n def items(self):\n return list(self.__dict.items())\n\n def __repr__(self):\n serialised_data = ', '.join(\n (\"{!r}: {!r}\".format(\n key, value) for key, value in list(\n self.__dict.items())))\n return \"<InfoData: {\" + serialised_data + \"}>\"\n", "id": "9665035", "language": "Python", "matching_score": 1.7952134609222412, "max_stars_count": 8, "path": "python/wecall/vcfutils/info_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom unittest import TestCase\n\nfrom wecall.vcfutils.record import generate_records\nfrom wecall.vcfutils.schema import Schema\n\nimport testfixtures\n\n\nclass TestMalformedDeferredInfoDataParsing(TestCase):\n\n @testfixtures.log_capture()\n def test_should_warn_about_unrecognised_key_in_monoallelic_line(self, log):\n records = list(generate_records(Schema(), [\n 'chrZ', '200', '.', 'C', 'T', '.', 'PASS', 'NEW_KEY=value'\n ]))\n for index, record in enumerate(records):\n self.assertEqual(\n (index, ['value']), (index, record.info['NEW_KEY']))\n log.check(\n ('root',\n 'WARNING',\n 'info field {!r} not defined in schema'.format('NEW_KEY')),\n )\n\n def test_should_add_default_parsing_rule_for_unknown_key_in_monoallelic_line(self):\n schema = Schema()\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'T', '.', 'PASS', 'NEW_KEY=value'\n ]))\n\n self.assertEqual(0, len(list(schema.iter_info_data())))\n for index, record in enumerate(records):\n self.assertEqual(\n (index, ['value']), (index, record.info['NEW_KEY']))\n self.assertEqual(1, len(list(schema.iter_info_data())))\n\n info_metadata = schema.get_info_data('NEW_KEY')\n self.assertEqual('.', info_metadata.number)\n self.assertEqual('String', info_metadata.data_type)\n self.assertEqual(\n 'Inferred from file content during parsing',\n info_metadata.description)\n self.assertEqual('vcfutils', info_metadata.source)\n self.assertEqual('undefined', info_metadata.version)\n\n @testfixtures.log_capture()\n def test_should_warn_about_unrecognised_key_in_multiallelic_line(\n self,\n log):\n records = list(generate_records(Schema(), [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', 'NEW_KEY=value'\n ]))\n for index, record in enumerate(records):\n self.assertEqual(\n (index, ['value']), (index, record.info['NEW_KEY']))\n log.check(\n ('root',\n 'WARNING',\n 'info field {!r} not defined in schema'.format('NEW_KEY')),\n )\n\n def test_should_add_default_parsing_rule_for_unknown_key_in_multiallelic_line(self):\n schema = Schema()\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', 'NEW_KEY=value'\n ]))\n\n self.assertEqual(0, len(list(schema.iter_info_data())))\n for index, record in enumerate(records):\n self.assertEqual(\n (index, ['value']), (index, record.info['NEW_KEY']))\n self.assertEqual(1, len(list(schema.iter_info_data())))\n\n info_metadata = schema.get_info_data('NEW_KEY')\n self.assertEqual('.', info_metadata.number)\n self.assertEqual('String', info_metadata.data_type)\n self.assertEqual(\n 'Inferred from file content during parsing',\n info_metadata.description)\n self.assertEqual('vcfutils', info_metadata.source)\n self.assertEqual('undefined', info_metadata.version)\n\n @testfixtures.log_capture()\n def test_should_warn_about_too_few_alts_in_field_of_allelic_cardinality(\n self,\n log):\n schema = Schema()\n schema.set_info_data('key', 'A', 'String', '')\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', 'key=a'\n ]))\n expected = [['a'], [None]]\n for index, record in enumerate(records):\n self.assertEqual(expected[index], record.info['key'])\n log.check(('wecall.vcfutils.fieldmetadata', 'WARNING',\n 'expected 2 items in {!r}'.format([['a']])), )\n\n @testfixtures.log_capture()\n def test_should_warn_about_too_many_alts_in_field_of_allelic_cardinality(\n self,\n log):\n schema = Schema()\n schema.set_info_data('key', 'A', 'String', '')\n records = list(generate_records(schema, [\n 'chrZ', '200', '.', 'C', 'A,T', '.', 'PASS', 'key=a,b,c'\n ]))\n expected = [['a'], ['b']]\n for index, record in enumerate(records):\n self.assertEqual(expected[index], record.info['key'])\n log.check(('wecall.vcfutils.fieldmetadata', 'WARNING',\n 'expected 2 items in {!r}'.format([['a'], ['b'], ['c']])), )\n", "id": "3489871", "language": "Python", "matching_score": 2.2459497451782227, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_info_data.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.vcfutils import schema, fieldmetadata\nimport unittest\n\n\n# Note: using iterator-based access to decouple from object type\nclass TestSchemaDataModel(unittest.TestCase):\n\n # simple data:\n\n def test_should_contain_mutable_file_metadata(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.file_metadata.items()))\n\n schema_data.file_metadata['key'] = 'value'\n self.assertEqual([('key', 'value')], list(\n schema_data.file_metadata.items()))\n self.assertEqual('value', schema_data.file_metadata['key'])\n\n del schema_data.file_metadata['key']\n self.assertEqual([], list(schema_data.file_metadata.items()))\n\n def test_should_contain_mutable_samples_sequence(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(iter(schema_data.samples)))\n\n schema_data.samples.append('sample_name')\n self.assertEqual(['sample_name'], list(iter(schema_data.samples)))\n\n schema_data.samples.remove('sample_name')\n self.assertEqual([], list(iter(schema_data.samples)))\n\n # info data\n\n def test_should_contain_mutable_info_data_with_required_fields(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.iter_info_data()))\n\n schema_data.set_info_data(\n 'key',\n number=1,\n data_type='String',\n description='description'\n )\n\n expected_data = fieldmetadata.InfoMetadata(\n number=1,\n data_type='String',\n description='description'\n )\n self.assertEqual([('key', expected_data)],\n list(schema_data.iter_info_data()))\n self.assertEqual(expected_data, schema_data.get_info_data('key'))\n\n schema_data.del_info_data('key')\n self.assertEqual([], list(schema_data.iter_info_data()))\n\n def test_should_contain_mutable_info_data_with_all_fields(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.iter_info_data()))\n\n schema_data.set_info_data(\n 'key',\n number=1,\n data_type='String',\n description='description',\n source='source',\n version='version'\n )\n\n expected_data = fieldmetadata.InfoMetadata(\n number=1,\n data_type='String',\n description='description',\n source='source',\n version='version'\n )\n self.assertEqual([('key', expected_data)],\n list(schema_data.iter_info_data()))\n self.assertEqual(expected_data, schema_data.get_info_data('key'))\n\n schema_data.del_info_data('key')\n self.assertEqual([], list(schema_data.iter_info_data()))\n\n # sample data\n\n def test_should_contain_mutable_sample_data(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.iter_sample_data()))\n\n schema_data.set_sample_data(\n 'key',\n number=1,\n data_type='String',\n description='description'\n )\n\n expected_data = fieldmetadata.SampleMetadata(\n number=1,\n data_type='String',\n description='description'\n )\n self.assertEqual([('key', expected_data)],\n list(schema_data.iter_sample_data()))\n self.assertEqual(expected_data, schema_data.get_sample_data('key'))\n\n schema_data.del_sample_data('key')\n self.assertEqual([], list(schema_data.iter_sample_data()))\n\n # filters\n\n def test_should_contain_mutable_filter_data(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.iter_filters()))\n\n schema_data.set_filter('key', description='description')\n\n expected_data = fieldmetadata.FilterMetadata(description='description')\n self.assertEqual([('key', expected_data)],\n list(schema_data.iter_filters()))\n self.assertEqual(expected_data, schema_data.get_filter('key'))\n\n schema_data.del_filter('key')\n self.assertEqual([], list(schema_data.iter_filters()))\n\n # contigs\n\n def test_should_contain_mutable_contig_data_with_required_fields(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.iter_contigs()))\n\n schema_data.set_contig('key')\n\n expected_data = fieldmetadata.ContigMetadata()\n self.assertEqual([('key', expected_data)],\n list(schema_data.iter_contigs()))\n self.assertEqual(expected_data, schema_data.get_contig('key'))\n\n schema_data.del_contig('key')\n self.assertEqual([], list(schema_data.iter_contigs()))\n\n def test_should_contain_mutable_contig_data_with_all_fields(self):\n schema_data = schema.Schema()\n self.assertEqual([], list(schema_data.iter_contigs()))\n\n schema_data.set_contig('key', length=100000)\n\n expected_data = fieldmetadata.ContigMetadata(length=100000)\n self.assertEqual([('key', expected_data)],\n list(schema_data.iter_contigs()))\n self.assertEqual(expected_data, schema_data.get_contig('key'))\n\n schema_data.del_contig('key')\n self.assertEqual([], list(schema_data.iter_contigs()))\n", "id": "6217852", "language": "Python", "matching_score": 2.8202810287475586, "max_stars_count": 8, "path": "test/test_utils/vcfutils/test_schema.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom collections import OrderedDict\nfrom wecall.vcfutils import fieldmetadata\n\n\nclass Schema(object):\n\n def __init__(self):\n # TODO: file format is a property of the serialisation, not the data\n self.vcf_format = None\n self.file_metadata = OrderedDict()\n self.samples = []\n self.__info_data = OrderedDict()\n self.__sample_data = OrderedDict()\n self.__filters = OrderedDict()\n self.__contigs = OrderedDict()\n self.__adapters = []\n\n def __eq__(self, other):\n return all((\n self.file_metadata == other.file_metadata,\n self.samples == other.samples,\n self.__info_data == other.__info_data,\n self.__sample_data == other.__sample_data,\n self.__filters == other.__filters,\n self.__contigs == other.__contigs,\n ))\n\n def __repr__(self):\n data_items = (\n \"file_metadata={!r}\".format(self.file_metadata),\n \"samples={!r}\".format(self.samples),\n \"info_data={!r}\".format(self.__info_data),\n \"sample_data={!r}\".format(self.__sample_data),\n \"filters={!r}\".format(self.__filters),\n \"contigs={!r}\".format(self.__contigs)\n )\n return \"<{}: {!s}>\".format(type(self).__name__, \", \".join(data_items))\n\n # TODO: remove this function\n def set_vcf_format(self, vcf_format):\n self.vcf_format = vcf_format\n\n # info data:\n\n def set_info_data(\n self,\n key,\n number,\n data_type,\n description,\n source=None,\n version=None):\n self.__info_data[key] = fieldmetadata.InfoMetadata(\n number, data_type, description, source, version)\n\n def get_info_data(self, key):\n return self.__info_data[key]\n\n def iter_info_data(self):\n for key, value in list(self.__info_data.items()):\n yield key, value\n\n def del_info_data(self, key):\n del self.__info_data[key]\n\n # sample data:\n\n def set_sample_data(self, key, number, data_type, description):\n self.__sample_data[key] = fieldmetadata.SampleMetadata(\n number, data_type, description)\n\n def get_sample_data(self, key):\n return self.__sample_data[key]\n\n def iter_sample_data(self):\n for key, value in list(self.__sample_data.items()):\n yield key, value\n\n def del_sample_data(self, key):\n del self.__sample_data[key]\n\n # filter data:\n\n def set_filter(self, key, description):\n self.__filters[key] = fieldmetadata.FilterMetadata(description)\n\n def get_filter(self, key):\n return self.__filters[key]\n\n def iter_filters(self):\n for key, value in list(self.__filters.items()):\n yield key, value\n\n def del_filter(self, key):\n del self.__filters[key]\n\n # contig data:\n\n def set_contig(self, key, length=None):\n self.__contigs[key] = fieldmetadata.ContigMetadata(length)\n\n def get_contig(self, key):\n return self.__contigs[key]\n\n def iter_contigs(self):\n for key, value in list(self.__contigs.items()):\n yield key, value\n\n def del_contig(self, key):\n del self.__contigs[key]\n\n # adapters:\n\n @property\n def from_adapted_vcf(self):\n return len(self.__adapters) > 0\n\n def set_adapter(self, adapter, hash, date):\n self.__adapters.append(\n fieldmetadata.AdapterMetadata(\n adapter, hash, date))\n\n def iter_adapters(self):\n for adapter in self.__adapters:\n yield adapter\n", "id": "1378339", "language": "Python", "matching_score": 2.341496229171753, "max_stars_count": 8, "path": "python/wecall/vcfutils/schema.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.vcfutils.schema import Schema\n\n\ndef wecall_schema(\n file_date=None,\n reference=None,\n contigs=None,\n add_ref_calls=True,\n format='4.2'):\n schema = Schema()\n if file_date is not None:\n schema.file_metadata['fileDate'] = file_date\n if reference is not None:\n schema.file_metadata['reference'] = reference\n\n app_name = 'weCall'\n version_number = '2.0.1'\n app = {'4.1': None, '4.2': app_name}[format]\n version = {'4.1': None, '4.2': version_number}[format]\n\n schema.file_metadata[\n 'disclaimer'] = 'This software is in beta-testing. Results generated using the software are confidential and should only be used for research purposes in accordance with the legal agreement with Genomics plc.' # noqa\n schema.file_metadata['source'] = '{application!s} v{version!s}'.format(\n application=app_name, version=version_number) # noqa\n\n schema.set_info_data(\n 'ABPV',\n 'A',\n 'Float',\n 'Allele bias P-value; probability that fraction of reads supporting alt allele (VC) amongst read depth (DP) is '\n 'more extreme than expected assuming a beta-binomial distribution.',\n app,\n version) # noqa\n schema.set_info_data(\n 'MQ',\n 'A',\n 'Float',\n 'Root mean square of mapping quality of reads supporting each alternative allele.',\n app,\n version) # noqa\n schema.set_info_data(\n 'PP',\n 'A',\n 'Integer',\n 'Posterior probability (phred scaled) that this variant does not segregate.',\n app,\n version) # noqa\n schema.set_info_data(\n 'SBPV',\n 'A',\n 'Float',\n 'Strand bias P-value; probability that the fraction of forward reads (VCF) amongst reads supporting alt allele '\n '(VC) is more extreme than expected assuming a beta-binomial distribution.',\n app,\n version) # noqa\n schema.set_info_data(\n 'DP',\n '1',\n 'Integer',\n 'Total depth of read coverage at this locus.',\n app,\n version)\n schema.set_info_data(\n 'DPF',\n '1',\n 'Integer',\n 'Total probabilistic depth of forward read coverage at this locus (sum of probabilities of each read supporting '\n 'the variant).',\n app,\n version) # noqa\n schema.set_info_data(\n 'DPR',\n '1',\n 'Integer',\n 'Total probabilistic depth of reverse read coverage at this locus (sum of probabilities of each read supporting '\n 'the variant).',\n app,\n version) # noqa\n schema.set_info_data(\n 'VC',\n 'A',\n 'Integer',\n 'Total probabilistic number of reads supporting each alternative allele (sum of probabilities of each read '\n 'supporting the allele).',\n app,\n version) # noqa\n schema.set_info_data(\n 'VCF',\n 'A',\n 'Integer',\n 'Total probabilistic number of forward reads supporting each alternative allele (sum of probabilities of '\n 'each read supporting the allele).',\n app,\n version) # noqa\n schema.set_info_data(\n 'VCR',\n 'A',\n 'Integer',\n 'Total probabilistic number of reverse reads supporting each alternative allele (sum of probabilities of each '\n 'read supporting the allele).',\n app,\n version) # noqa\n schema.set_info_data(\n 'QD',\n 'A',\n 'Float',\n 'Ratio of phred-scaled posterior probability (PP) to number of supporting reads for each allele (VC).',\n app,\n version) # noqa\n schema.set_info_data(\n 'BR',\n 'A',\n 'Float',\n 'The median of the per-read min base quality (within a interval of the locus) taken over reads supporting '\n 'each allele.',\n app,\n version) # noqa\n\n schema.set_sample_data(\n 'GT',\n '1',\n 'String',\n 'Genotypes of reference and alternative alleles in order listed.')\n\n if add_ref_calls:\n schema.set_info_data(\n 'BEG',\n '1',\n 'Integer',\n 'Start position of reference call block.',\n app,\n version)\n schema.set_info_data(\n 'END',\n '1',\n 'Integer',\n 'End position of reference call block (inclusive).',\n app,\n version)\n schema.set_info_data(\n 'LEN',\n '1',\n 'Integer',\n 'Length of reference call block.',\n app,\n version)\n schema.set_sample_data(\n 'MIN_DP',\n '1',\n 'Integer',\n 'Minimum read coverage observed within the reference block.')\n\n schema.set_sample_data('GQ', '1', 'Integer',\n 'Phred-scaled genotype quality (i.e. posterior probability that the genotype call is incorrect).') # noqa\n schema.set_sample_data('PQ', '1', 'Integer',\n 'Phred-scaled phase quality (i.e. posterior probability that the phasing is incorrect).') # noqa\n schema.set_sample_data('PS', '1', 'String', 'Phase set id.') # noqa\n schema.set_sample_data('PL', 'G', 'Integer',\n \"Normalized, Phred-scaled likelihoods for genotypes as defined in the VCF specification.\") # noqa\n schema.set_sample_data('DP', '1', 'Integer',\n 'Number of reads overlapping the variant site (i.e. INFO::DP split out by sample). For reference calls the average depth (rounded to the nearest integer) over the region is reported.') # noqa\n schema.set_sample_data('AD', '.', 'Integer',\n 'Probabilistic allelic depths for the ref and alt alleles in the order listed (i.e. INFO::VC split out by sample).') # noqa\n schema.set_sample_data('VAF', 'A', 'Float',\n 'Probabilistic variant allelic frequencies for each alt allele (FORMAT::AD / FORMAT::DP).') # noqa\n\n schema.set_filter('AB',\n 'Allele Bias: Indicates lower number of reads supporting variant than expected (any of INFO::ABPV < 0.009).') # noqa\n schema.set_filter('SB',\n 'Strand Bias: Indicates imbalance between number of forward and reverse reads supporting variant (any of INFO::SBPV < 0.01).') # noqa\n schema.set_filter('AB+SB',\n 'Allele + Strand Bias: Indicates that both the AB and SB filters are close to being triggered (any of INFO::ABPV + INFO::SBPV < 0.07).') # noqa\n schema.set_filter('MQ',\n 'low Mapping Quality: Indicates presence of low mapping quality (any of INFO::MQ < 25).') # noqa\n schema.set_filter('QD',\n 'Quality over Depth: Indicates low quality relative to number of supporting reads (any of INFO::QD < 3.5 for Indels or INFO::QD < 8 otherwise).') # noqa\n schema.set_filter('BR',\n 'Bad Reads: Indicates low quality base pairs on reads in the vicinity of variant locus (any of INFO::BR < 0).') # noqa\n schema.set_filter('NC', 'Not called: Indicates a variant that was not positively genotyped in any sample.') # noqa\n schema.set_filter('LQ', 'Low Quality: Indicates a low variant quality (any of INFO::PP < 10).') # noqa\n\n if contigs is not None:\n for contig_name, contig_data in contigs.items():\n schema.set_contig(contig_name, **contig_data)\n return schema\n", "id": "5332982", "language": "Python", "matching_score": 2.2052085399627686, "max_stars_count": 8, "path": "test-drivers/wecall_test_drivers/wecall_schema.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n# -*- coding:utf8 -*-\nfrom wecall_test_drivers.variant_caller_builder import VariantCallerBuilderFromSampleBank\n\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.vcfutils.parser import VCFReaderContextManager\nfrom wecall_test_drivers.base_test import BaseTest\nimport datetime\nfrom wecall_test_drivers.wecall_schema import wecall_schema\nimport os\n\n\nclass TestVCFSchema(BaseTest):\n def __run_small_variant_caller(self, refcalls, format):\n sample_bank = SampleBank(\"T\")\n sample_bank.add_sample_name(\"TEST\").add_sequence(\".\")\n\n variant_caller_builder = VariantCallerBuilderFromSampleBank(sample_bank, self.work_dir)\n variant_caller_builder.configuration = {} # clear config.\n variant_caller = variant_caller_builder.build()\n variant_caller.add_additional_command('outputRefCalls', refcalls)\n variant_caller.add_additional_command('outputFormat', \"VCF{}\".format(format))\n variant_caller.run()\n\n with VCFReaderContextManager(variant_caller.output_vcf) as vcf_file:\n actual_schema = vcf_file.read_header()\n\n reference = os.path.splitext(os.path.basename(\n variant_caller_builder.wecall_input_data.reference_filename))[0]\n expected_schema = wecall_schema(\n file_date=datetime.datetime.today().strftime('%F'),\n reference=reference,\n contigs={sample_bank.reference.chrom: {\"length\": sample_bank.reference.length_minus_deletions()}},\n add_ref_calls=refcalls,\n format=format)\n\n return expected_schema, actual_schema\n\n def test_correct_disclaimer(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(expected_schema.file_metadata['disclaimer'], actual_schema.file_metadata['disclaimer'])\n\n def test_correct_filedate_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(expected_schema.file_metadata['fileDate'], actual_schema.file_metadata['fileDate'])\n\n def test_correct_reference_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(expected_schema.file_metadata['reference'], actual_schema.file_metadata['reference'])\n\n def test_info_data_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(sorted(expected_schema.iter_info_data()), sorted(actual_schema.iter_info_data()))\n\n def test_info_data_without_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(False, \"4.2\")\n self.assertEqual(sorted(expected_schema.iter_info_data()), sorted(actual_schema.iter_info_data()))\n\n def test_sample_data_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(sorted(expected_schema.iter_sample_data()), sorted(actual_schema.iter_sample_data()))\n\n def test_correct_source_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(expected_schema.file_metadata['source'], actual_schema.file_metadata['source'])\n\n def test_we_call_outputs_contigs_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertEqual(sorted(expected_schema.iter_contigs()), sorted(actual_schema.iter_contigs()))\n\n def test_filters_schema_with_refcalls(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.2\")\n self.assertCountEqual(sorted(expected_schema.iter_filters()), sorted(actual_schema.iter_filters()))\n\n def test_correct_filedate_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(expected_schema.file_metadata['fileDate'], actual_schema.file_metadata['fileDate'])\n\n def test_correct_reference_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(expected_schema.file_metadata['reference'], actual_schema.file_metadata['reference'])\n\n def test_info_data_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(sorted(expected_schema.iter_info_data()), sorted(actual_schema.iter_info_data()))\n\n def test_info_data_without_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(False, \"4.1\")\n self.assertEqual(sorted(expected_schema.iter_info_data()), sorted(actual_schema.iter_info_data()))\n\n def test_sample_data_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(sorted(expected_schema.iter_sample_data()), sorted(actual_schema.iter_sample_data()))\n\n def test_correct_source_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(expected_schema.file_metadata['source'], actual_schema.file_metadata['source'])\n\n def test_we_call_outputs_contigs_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(sorted(expected_schema.iter_contigs()), sorted(actual_schema.iter_contigs()))\n\n def test_filters_schema_with_refcalls_format_4_1(self):\n expected_schema, actual_schema = self.__run_small_variant_caller(True, \"4.1\")\n self.assertEqual(sorted(expected_schema.iter_filters()), sorted(actual_schema.iter_filters()))\n", "id": "9487010", "language": "Python", "matching_score": 3.3931055068969727, "max_stars_count": 8, "path": "test/wecall_acceptance/wecall_runner/test_vcf_schema.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nimport itertools\nfrom wecall.bamutils.bam_builder import BAMBuilder\nfrom wecall.bamutils.sequence_bank import SequenceBank\nfrom wecall.fastautils.fasta_file_builder import FastaFileBuilder\nfrom wecall.genomics.variant import Variant\nfrom wecall.vcfutils.genotype_call import GenotypeCall\nfrom wecall.wecall_utils.wecall_config_builder import WecallConfigBuilder\nfrom wecall.wecall_utils.wecall_input_data import WecallInputData\nfrom wecall_test_drivers.ascii_wecall_runner import DEFAULT_SAMPLE_NAME\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall_test_drivers.variant_caller_wrapper import VariantCallerWrapper\n\n\nclass TestWeCallParallelisation(BaseTest):\n\n def setUp(self):\n BaseTest.setUp(self)\n self.chrom1 = \"1\"\n self.chrom2 = \"2\"\n # 0 1 2 3 4\n # 012345678901 23456789012345678901234567890123456789\n self.ref_string1 = \"AACCTTGGACGT***TATTCTGTCAATGCATCCCATTGCCGCCGCTAATCGCT\"\n self.seq_string1 = \" ...**......CTG.......*.....T...........A..T.........\"\n\n # 0 1 2 3\n # 0123456789012345678901234567890123456789\n self.ref_string2 = \"GGGAATCATACATACTGGATTACCATTGGACCAGATTAGT\"\n self.seq_string2 = \"........*.............T................ \"\n self.seq_string3 = \"........*........ ....T.................\"\n self.block_size = 60\n\n self.sample_name1 = DEFAULT_SAMPLE_NAME\n self.sample_name2 = DEFAULT_SAMPLE_NAME\n\n self.vc_work_dir = os.path.join(self.work_dir, \"vc_work_dir\")\n os.makedirs(self.vc_work_dir)\n\n def setParallelAndSerialVariantCallers(self, copies1, copies2):\n '''Prepare the variant caller data for the test to run'''\n filestem = \"vc_input\"\n\n ref_file_builder = FastaFileBuilder(os.path.join(self.work_dir, filestem + \".fa\"))\n ref1 = ref_file_builder.with_chrom(self.chrom1, self.ref_string1 * copies1)\n ref2 = ref_file_builder.with_chrom(self.chrom2, self.ref_string2 * copies2)\n\n self.repeat_length1 = ref1.length_minus_deletions() / copies1\n self.repeat_length2 = ref2.length_minus_deletions() / copies2\n\n ref_file_builder.build()\n ref_file_builder.index()\n\n seq_bank1 = SequenceBank(ref1)\n seq_bank1.add_sequence(self.seq_string1 * copies1, n_fwd=10, n_rev=10)\n\n seq_bank2 = SequenceBank(ref2)\n seq_bank2.add_sequence(self.seq_string2 * copies2, n_fwd=10, n_rev=10)\n seq_bank2.add_sequence(self.seq_string3 * copies2, n_fwd=10, n_rev=10)\n\n bam_builder = BAMBuilder(os.path.join(self.work_dir, filestem + \".bam\"))\n bam_builder.with_bam_contig_data(ref1.chrom, ref1.length_minus_deletions(), self.sample_name1, seq_bank1)\n bam_builder.with_bam_contig_data(ref2.chrom, ref2.length_minus_deletions(), self.sample_name2, seq_bank2)\n bam_builder.build()\n\n wecall_input_data = WecallInputData([bam_builder.filename], ref_file_builder.filename)\n wecall_config_builder = WecallConfigBuilder(wecall_input_data, os.path.join(self.work_dir, filestem))\n wecall_config_builder.with_configuration(\"maxBlockSize\", self.block_size)\n wecall_config_builder.with_configuration(\"noSimilarReadsFilter\", False)\n wecall_config_builder.with_configuration(\"maxClusterDist\", 20)\n wecall_config = wecall_config_builder.build()\n\n parallel_output_file_stem = os.path.join(self.work_dir, filestem + \"_parallel\")\n serial_output_file_stem = os.path.join(self.work_dir, filestem + \"_serial\")\n\n self.vc_wrapper_parallel = VariantCallerWrapper(parallel_output_file_stem, wecall_config)\n\n self.vc_wrapper_serial = VariantCallerWrapper(serial_output_file_stem, wecall_config)\n\n def test_should_give_same_results_in_parallel_as_in_series(self):\n self.setParallelAndSerialVariantCallers(1, 5)\n self.vc_wrapper_parallel.add_additional_command(\"numberOfJobs\", \"2\")\n self.vc_wrapper_parallel.add_additional_command(\"workDir\", self.vc_work_dir)\n self.vc_wrapper_parallel.run()\n self.vc_wrapper_serial.run()\n\n with open(self.vc_wrapper_parallel.output_vcf, \"r\") as parallel_vcf:\n with open(self.vc_wrapper_serial.output_vcf, \"r\") as serial_vcf:\n zipped = itertools.zip_longest(parallel_vcf, serial_vcf, fillvalue=\"MISSING_LINE\")\n for parallel_vcf_line, serial_vcf_line in zipped:\n if not parallel_vcf_line.startswith(\"##options\"):\n self.assertEqual(parallel_vcf_line, serial_vcf_line)\n\n def test_should_find_correct_variants(self):\n n_copies1 = 1\n n_copies2 = 5\n self.setParallelAndSerialVariantCallers(n_copies1, n_copies2)\n self.vc_wrapper_parallel.add_additional_command(\"numberOfJobs\", \"2\")\n self.vc_wrapper_parallel.add_additional_command(\"workDir\", self.vc_work_dir)\n self.vc_wrapper_parallel.add_additional_command(\"allowMNPCalls\", False)\n self.vc_wrapper_parallel.run()\n\n expected_vars = set()\n for i in range(0, n_copies1):\n expected_vars.update({\n Variant(self.chrom1, 3 + i * self.repeat_length1, \"CTT\", \"C\"),\n Variant(self.chrom1, 11 + i * self.repeat_length1, \"T\", \"TCTG\"),\n Variant(self.chrom1, 18 + i * self.repeat_length1, \"GT\", \"G\"),\n Variant(self.chrom1, 25 + i * self.repeat_length1, \"C\", \"T\"),\n Variant(self.chrom1, 37 + i * self.repeat_length1, \"G\", \"A\"),\n Variant(self.chrom1, 40 + i * self.repeat_length1, \"G\", \"T\"),\n })\n\n for i in range(0, n_copies2):\n expected_vars.update({\n Variant(self.chrom2, 7 + i * self.repeat_length2, \"AT\", \"A\"),\n Variant(self.chrom2, 22 + i * self.repeat_length2, \"C\", \"T\"),\n })\n\n actual_parallel_variants = self.vc_wrapper_parallel.get_variant_callset(self).get_variants()\n self.assertEqual(expected_vars, actual_parallel_variants)\n\n def test_should_give_correct_output_for_different_sample_names(self):\n self.sample_name1 = \"SAMPLE_A\"\n self.sample_name2 = \"SAMPLE_B\"\n\n n_copies1 = 1\n n_copies2 = 5\n self.setParallelAndSerialVariantCallers(n_copies1, n_copies2)\n self.vc_wrapper_parallel.add_additional_command(\"numberOfJobs\", \"2\")\n self.vc_wrapper_parallel.add_additional_command(\"workDir\", self.vc_work_dir)\n self.vc_wrapper_parallel.run()\n\n expected_var_A_1 = Variant(self.chrom1, 3, \"CTT\", \"C\")\n expected_var_B_1 = Variant(self.chrom2, 7, \"AT\", \"A\")\n\n parallel_variants_with_genotypes = self.vc_wrapper_parallel \\\n .get_variant_callset(self) \\\n .get_variants_with_genotypes()\n\n self.assertTrue(expected_var_A_1 in list(parallel_variants_with_genotypes.keys()))\n self.assertTrue(expected_var_B_1 in list(parallel_variants_with_genotypes.keys()))\n\n self.assertEqual(GenotypeCall(\"1/1\"), parallel_variants_with_genotypes[expected_var_A_1][self.sample_name1])\n self.assertEqual(GenotypeCall(\"./.\"), parallel_variants_with_genotypes[expected_var_A_1][self.sample_name2])\n self.assertEqual(GenotypeCall(\"./.\"), parallel_variants_with_genotypes[expected_var_B_1][self.sample_name1])\n self.assertEqual(GenotypeCall(\"1/1\"), parallel_variants_with_genotypes[expected_var_B_1][self.sample_name2])\n", "id": "10918324", "language": "Python", "matching_score": 5.0519537925720215, "max_stars_count": 8, "path": "test/wecall_acceptance/wecall_runner/test_weCall_parallelisation.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\n\nimport pysam\nfrom wecall.bamutils.bam_builder import BAMBuilder, RG_ID\nfrom wecall.bamutils.sequence_bank import SequenceBank\nfrom wecall.genomics.reference_chromosome import ReferenceChromosome\nfrom wecall_test_drivers.base_test import BaseTest\n\n\nclass TestBamBuilder(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.chrom = \"2\"\n self.chrom_length = 200\n self.sample_name = \"TEST_SAMPLE\"\n self.filestub = \"_\"\n\n def test_header_for_multisample_multicontig(self):\n ref = ReferenceChromosome(\"\")\n sequence_bank = SequenceBank(ref)\n builder = BAMBuilder(\n os.path.join(\n self.work_dir,\n self.filestub +\n \".bam\"))\n builder.with_bam_contig_data(\"1\", 10, \"SAMPLE_ONE\", sequence_bank)\n builder.with_bam_contig_data(\"2\", 20, \"SAMPLE_TWO\", sequence_bank)\n\n expected_header = {'HD': {'VN': '1.0'}, 'SQ': [{'LN': 10, 'SN': \"1\"}, {'LN': 20, 'SN': \"2\"}], 'RG': [\n {\"ID\": RG_ID + \"_SAMPLE_ONE\", \"SM\": \"SAMPLE_ONE\"}, {\"ID\": RG_ID + \"_SAMPLE_TWO\", \"SM\": \"SAMPLE_TWO\"}]}\n\n self.assertDictEqual(expected_header, builder.header)\n\n def test_can_build_with_one_seq(self):\n ref = ReferenceChromosome(\"TCATAAAAAAAT\")\n sequence_bank = SequenceBank(ref)\n sequence_bank.add_sequence(\n \".*G.........\",\n \" \",\n n_fwd=2, n_rev=1\n )\n\n builder = BAMBuilder(\n os.path.join(\n self.work_dir,\n self.filestub +\n \".bam\")) .with_bam_contig_data(\n self.chrom,\n self.chrom_length,\n self.sample_name,\n sequence_bank)\n builder.build()\n\n bam_file = pysam.Samfile(builder.filename, \"rb\")\n reads = list(bam_file.fetch())\n self.assertEqual(len(reads), 3)\n\n for read in reads:\n self.assertEqual(read.pos, 0)\n self.assertEqual(read.seq, \"TGTAAAAAAAT\")\n self.assertEqual(read.cigarstring, \"1M1D10M\")\n\n self.assertTrue(os.path.isfile(bam_file.filename))\n self.assertTrue(os.path.isfile(bam_file.filename.decode() + \".bai\"))\n\n def test_can_build_with_defined_quality(self):\n ref = ReferenceChromosome(\"TCATAAAT\")\n sequence_bank = SequenceBank(ref)\n sequence_bank.add_sequence(\n \".*G.....\",\n \"9 87 00\",\n n_fwd=1, n_rev=0\n )\n\n builder = BAMBuilder(\n os.path.join(\n self.work_dir,\n self.filestub +\n \".bam\")) .with_bam_contig_data(\n self.chrom,\n self.chrom_length,\n self.sample_name,\n sequence_bank)\n builder.build()\n\n bam_file = pysam.Samfile(builder.filename, \"rb\")\n reads = list(bam_file.fetch())\n self.assertEqual(len(reads), 1)\n self.assertEqual(reads[0].seq, \"TGTAAAT\")\n\n # ascii: \"0\": \"!\", \"1\": \"+\", \"2\": \"5\", \"3\": \"?\", \"4\": \"H\", \"5\": \"S\",\n # \"6\": \"]\", \"7\": \"g\", \"8\": \"q\", \"9\": \"{\"\n expected_qual = \"{qgHH!!\"\n self.assertEqual(reads[0].qual, expected_qual)\n\n def test_can_build_two_chroms(self):\n ref1 = ReferenceChromosome(\"TCATAAAAAAAT\")\n sequence_bank1 = SequenceBank(ref1)\n sequence_bank1.add_sequence(\".*G.........\")\n\n ref2 = ReferenceChromosome(\"GGGG\")\n sequence_bank2 = SequenceBank(ref2)\n sequence_bank2.add_sequence(\"..*.\")\n\n builder = BAMBuilder(\n os.path.join(\n self.work_dir,\n self.filestub +\n \".bam\")) .with_bam_contig_data(\n \"1\",\n 100,\n \"SAMPLE\",\n sequence_bank1) .with_bam_contig_data(\n \"X\",\n 50,\n \"SAMPLE\",\n sequence_bank2)\n builder.build()\n\n bam_file = pysam.Samfile(builder.filename, \"rb\")\n reads_chrom1 = list(bam_file.fetch(region=\"1:1-20\"))\n self.assertEqual(len(reads_chrom1), 1)\n self.assertEqual(reads_chrom1[0].seq, \"TGTAAAAAAAT\")\n\n bam_file = pysam.Samfile(builder.filename, \"rb\")\n reads_chrom2 = list(bam_file.fetch(region=\"X:1-5\"))\n self.assertEqual(len(reads_chrom2), 1)\n self.assertEqual(reads_chrom2[0].seq, \"GGG\")\n\n reads = list(bam_file.fetch())\n self.assertEqual(len(reads), 2)\n self.assertEqual(reads[0].seq, \"TGTAAAAAAAT\")\n self.assertEqual(reads[1].seq, \"GGG\")\n\n self.assertRaises(ValueError, bam_file.fetch, region=\"2:1-20\")\n\n self.assertTrue(os.path.isfile(bam_file.filename))\n self.assertTrue(os.path.isfile(bam_file.filename.decode() + \".bai\"))\n", "id": "3876269", "language": "Python", "matching_score": 3.640765428543091, "max_stars_count": 8, "path": "test/test_utils/bamutils/test_bam_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall_test_drivers.base_test import BaseTest\nfrom wecall.bamutils.sample_bank import SampleBank\nfrom wecall.wecall_utils.wecall_input_data_builder import WecallInputDataBuilder\nimport pysam\n\n\nclass TestWecallInputDataBuilder(BaseTest):\n def test_can_build_correct_ref_and_bam_file(self):\n bank = SampleBank(\"ATCCT*ATAATAAATAAATAAT\")\n sample_name = \"TEST_SAMPLE\"\n bank.add_sample_name(sample_name)\n bank[sample_name].add_sequence(\"....CT.........T......\")\n\n builder = WecallInputDataBuilder(self.work_dir).with_sample_bank(bank)\n\n input_files = builder.build()\n\n bam_file = pysam.Samfile(input_files.bam_filenames[0], \"rb\")\n for read in bam_file.fetch():\n self.assertEqual(read.pos, 0)\n self.assertEqual(read.seq, \"ATCCCTATAATAAATTAATAAT\")\n self.assertEqual(read.cigarstring, \"5M1I16M\")\n\n fasta_file = pysam.Fastafile(input_files.reference_filename)\n self.assertEqual(fasta_file.get_reference_length(bank.reference.chrom), 21)\n self.assertEqual(fasta_file.fetch(bank.reference.chrom, 0, 21), \"ATCCTATAATAAATAAATAAT\")\n\n def test_can_build_multiple_bam_files(self):\n bank = SampleBank(\"ATCCT*ATAATAAATAAATAAT\")\n sample_name1 = \"TEST_SAMPLE1\"\n bank.add_sample_name(sample_name1)\n bank[sample_name1].add_sequence(\"....CT.........T......\")\n\n sample_name2 = \"TEST_SAMPLE2\"\n bank.add_sample_name(sample_name2)\n bank[sample_name2].add_sequence(\".....*.G..........*...\")\n\n builder = WecallInputDataBuilder(self.work_dir).with_sample_bank(bank)\n input_bams = builder.build().bam_filenames\n\n bam_file1 = pysam.Samfile(input_bams[0], \"rb\")\n for read in bam_file1.fetch():\n self.assertEqual(read.pos, 0)\n self.assertEqual(read.seq, \"ATCCCTATAATAAATTAATAAT\")\n self.assertEqual(read.cigarstring, \"5M1I16M\")\n\n bam_file2 = pysam.Samfile(input_bams[1], \"rb\")\n for read in bam_file2.fetch():\n self.assertEqual(read.pos, 0)\n self.assertEqual(read.seq, \"ATCCTAGAATAAATAAAAAT\")\n self.assertEqual(read.cigarstring, \"17M1D3M\")\n\n def test_builds_correct_ref_and_bam_file_at_custom_position(self):\n pos_from = 100\n bank = SampleBank(\"ATCCT*ATAATAAATAAATAAT\", pos_from)\n sample_name = \"TEST_SAMPLE\"\n bank.add_sample_name(sample_name)\n bank[sample_name].add_sequence(\"....CT.........T......\")\n\n builder = WecallInputDataBuilder(self.work_dir).with_sample_bank(bank)\n\n input_files = builder.build()\n\n bam_file = pysam.Samfile(input_files.bam_filenames[0], \"rb\")\n for read in bam_file.fetch():\n self.assertEqual(read.pos, pos_from)\n self.assertEqual(read.seq, \"ATCCCTATAATAAATTAATAAT\")\n self.assertEqual(read.cigarstring, \"5M1I16M\")\n\n fasta_file = pysam.FastaFile(input_files.reference_filename)\n self.assertEqual(\n fasta_file.get_reference_length(bank.reference.chrom), 121)\n self.assertEqual(fasta_file.fetch(bank.reference.chrom, pos_from, pos_from + 21), \"ATCCTATAATAAATAAATAAT\")\n\n def test_should_be_able_to_build_bam_and_ref_data_with_multiple_chromosomes(self):\n bank_1 = SampleBank(\"A\" * 10, 0, chrom='10')\n bank_1.add_sample_name(\"sample\").add_sequence(\".\" * 10)\n\n bank_2 = SampleBank(\"T\" * 9, 0, chrom='20')\n bank_2.add_sample_name(\"sample\").add_sequence(\".\" * 9)\n\n builder = WecallInputDataBuilder(\n self.work_dir).with_sample_bank(bank_1).with_sample_bank(bank_2)\n\n input_files = builder.build()\n bam_file = pysam.Samfile(input_files.bam_filenames[0], \"rb\")\n\n for read in bam_file.fetch(reference='20'):\n self.assertEqual(read.pos, 0)\n self.assertEqual(read.seq, \"T\" * 9)\n self.assertEqual(read.cigarstring, \"9M\")\n\n for read in bam_file.fetch(reference='10'):\n self.assertEqual(read.pos, 0)\n self.assertEqual(read.seq, \"A\" * 10)\n self.assertEqual(read.cigarstring, \"10M\")\n print((dir(read)))\n", "id": "689322", "language": "Python", "matching_score": 3.4607200622558594, "max_stars_count": 8, "path": "test/test_utils/wecall_utils/test_wecall_input_data_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nfrom unittest import expectedFailure\n\nfrom wecall.fastautils.fasta_file_builder import FastaFileBuilder\nfrom wecall_test_drivers.base_test import BaseTest\nfrom pysam import Fastafile\n\n\nclass TestFastaFileBuilder(BaseTest):\n\n def __build_fasta_file(self, data):\n fasta_file = FastaFileBuilder(\n os.path.join(\n self.work_dir,\n \"baa.fa\"),\n line_length=5)\n for chrom, seq in list(data.items()):\n fasta_file.with_chrom(chrom, seq)\n return fasta_file.build().index()\n\n def test_should_be_able_to_fetch_section_of_genome(self):\n fasta_file = self.__build_fasta_file({'chr20': \"TAGCATTATTATTATTATTATTATTA\", })\n\n fasta_file = Fastafile(fasta_file.filename)\n self.assertEqual(fasta_file.fetch('chr20', 10, 20).upper(), \"ATTATTATTA\")\n\n def test_should_be_able_to_list_all_chromosomes(self):\n fasta_file = self.__build_fasta_file({'chr5': \"T\", 'chrX': \"T\", 'chr20': \"T\", })\n\n fasta_file = Fastafile(fasta_file.filename)\n self.assertEqual(sorted(fasta_file.references), sorted(['chr5', 'chr20', 'chrX']))\n\n def test_should_get_correct_chrom_length(self):\n chrom = 'chr20'\n seq = \"TAGCATTATTATTATTATTATTATTA\"\n fasta_file = self.__build_fasta_file({chrom: seq, })\n\n fasta_file = Fastafile(fasta_file.filename)\n self.assertEqual(fasta_file.get_reference_length(chrom), len(seq))\n\n @expectedFailure\n # \"Cannot create reference files with dodgy chromosome content\"\n def test_should_return_capitalised_sequence_from_ref_file(self):\n fasta_file = self.__build_fasta_file({'chr20': \"tagcattattattattattattatta\", })\n\n fasta_file = Fastafile(fasta_file.filename)\n self.assertEqual(fasta_file.fetch('chr20', 10, 20).upper(), \"ATTATTATTA\")\n", "id": "6216511", "language": "Python", "matching_score": 2.0614781379699707, "max_stars_count": 8, "path": "test/test_utils/fastautils/test_fasta_file_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nimport os\nimport subprocess\nfrom wecall.common.exceptions import weCallException\nfrom wecall.genomics.reference_genome import InMemoryReferenceGenome\nfrom wecall_test_drivers.tool_runner import ToolRunner\n\n\nclass FastaFileBuilder(object):\n\n def __init__(self, filename, line_length=80):\n assert(line_length > 0)\n self.filename = filename\n self.line_length = line_length\n self.__reference_genome = InMemoryReferenceGenome()\n\n def reference_genome(self):\n return self.__reference_genome\n\n def with_chrom(self, chrom, sequence, pos_from=0):\n return self.__reference_genome.with_chrom(chrom, sequence, pos_from)\n\n def build(self):\n with open(self.filename, \"w\") as fasta_file:\n for chrom_name in self.__reference_genome.chromosomes():\n sequence = self.__reference_genome.fetch(chrom_name)\n fasta_file.write(\">{}\\n\".format(chrom_name))\n for offset in range(0, len(sequence), self.line_length):\n line = sequence[offset:offset + self.line_length] + '\\n'\n fasta_file.write(line)\n\n return self\n\n def index(self):\n tool_runner = ToolRunner()\n tool_runner.start(\n [os.path.join(os.environ['WECALL_BIN'], \"samtools\"), \"faidx\", self.filename])\n\n if tool_runner.return_code != 0:\n raise weCallException(\"\")\n else:\n return self\n", "id": "5542939", "language": "Python", "matching_score": 1.8808320760726929, "max_stars_count": 8, "path": "python/wecall/fastautils/fasta_file_builder.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\n\"\"\"\nBasic exception classes to be used throughout the weCall code.\n\"\"\"\n\n\nclass weCallException(Exception):\n \"\"\"\n Base class for all exceptions. Everything we throw\n should derive from this.\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n self.message = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass weCallRuntimeException(weCallException):\n\n def __init__(self, return_code, result):\n self.return_code = return_code\n self.result = result\n self.value = \"weCall exited with non-zero exit code {} for `{}`.\".format(\n self.return_code, self.result)\n", "id": "6849600", "language": "Python", "matching_score": 0.8168335556983948, "max_stars_count": 8, "path": "python/wecall/common/exceptions.py" }, { "content": "# All content Copyright (C) 2018 Genomics plc\nfrom wecall.common.exceptions import weCallException\n\n\ndef to_vcf_str(primitive_type):\n if primitive_type is None:\n return \".\"\n elif isinstance(primitive_type, list) or isinstance(primitive_type, tuple):\n return ','.join(map(to_vcf_str, primitive_type))\n elif isinstance(primitive_type, float):\n return \"{:g}\".format(primitive_type)\n else:\n return str(primitive_type)\n\n\ndef from_vcf_str(vcf_str, desired_type):\n try:\n return desired_type(vcf_str) if vcf_str != \".\" else None\n except ValueError:\n raise weCallException(\n \"Cannot cast {} to {!r}\".format(\n vcf_str, desired_type))\n", "id": "886310", "language": "Python", "matching_score": 0.4812721014022827, "max_stars_count": 8, "path": "python/wecall/vcfutils/stringutils.py" } ]
2.863711
NGillet
[ { "content": "import numpy as np\nfrom .helper_functions import print_msg\nfrom . import const\nfrom . import conv\n\n#A simple struct to hold info about single halo\nclass Halo:\n\t'''\n\tA simple struct to hold info about a single halo\n\t'''\n\tdef __init__(self):\n\t\tself.pos = (0.0, 0.0, 0.0) #Position in grid points\n\t\tself.pos_cm = (0.0, 0.0, 0.0) #Center of mass position in grid points\n\t\tself.vel = (0.0, 0.0, 0.0) #Velocity in simulation units\n\t\tself.l = (0.0, 0.0, 0.0) #Angular momentum in simulation units\n\t\tself.vel_disp = 0.0 #Velocity dispersion in simulation units\n\t\tself.r = 0.0 #Virial radius in grid units\n\t\tself.m = 0.0 #Grid mass\n\t\tself.mp = 0 #Number of particles\n\t\tself.solar_masses = 0.0 #Mass in solar masses\n\n\nclass HaloList:\n\t'''\n\tA class that holds information about a large number of halos, as read from a \n\thalo list file.\n\tContains methods to select halos based on different criteria. This file is very slow\n\tif you need to read a large number of halos.\n\t\n\tTODO: write a better implementation of this class.\n\t'''\n\tdef __init__(self, filename=None, min_select_mass = 0.0, max_select_mass = None, \n\t\t\tmax_select_number=-1, startline = 0):\n\t\t'''\n\t\tInitialize the object. If filename is given, read the file. Otherwise,\n\t\tdo nothing.\n\t\t\n\t\tParameters:\n\t\t\t* filename = None (string): The file to read from\n\t\t\t* min_select_mass = 0.0 (float): The lower threshold mass in solar masses.\n\t\t\t\tOnly halos above this mass will be read.\n\t\t\t* max_select_mass = None (float): The upper threshold mass in solar masses.\n\t\t\t\tOnly halos below this mass will be read. If None, there is no limit.\n\t\t\t* max_select_number = -1 (int): The max number of halos to read. If -1, there\n\t\t\t\tis no limit.\n\t\t\t* startline = 0 (int): The line in the file where reading will start.\n\t\tReturns:\n\t\t\tNothing\n\t\t'''\n\t\tself.halos = []\n\n\t\tif filename:\n\t\t\tself.read_from_file(filename, min_select_mass, max_select_mass, max_select_number, \n\t\t\t\t\tstartline)\n\n\tdef read_from_file(self,filename, min_select_mass = 0.0, max_select_mass = None, max_select_number=-1, \n\t\t\tstartline=0):\n\t\t'''\n\t\tRead a halo list.\n\t\t\n\t\tParameters:\n\t\t\t* filename (string): The file to read from\n\t\t\t* min_select_mass = 0.0 (float): The lower threshold mass in solar masses.\n\t\t\t\tOnly halos above this mass will be read.\n\t\t\t* max_select_mass = None (float): The upper threshold mass in solar masses.\n\t\t\t\tOnly halos below this mass will be read. If None, there is no limit.\n\t\t\t* max_select_number = -1 (int): The max number of halos to read. If -1, there\n\t\t\t\tis no limit.\n\t\t\t* startline = 0 (int): The line in the file where reading will start.\n\t\tReturns:\n\t\t\tTrue if all the halos were read. False otherwise.\n\t\t'''\n\n\t\tself.halos = []\n\n\t\tprint_msg('Reading halo file %s...' % filename)\n\t\tself.filename = filename\n\t\timport fileinput\n\n\t\t#Store the redshift from the filename\n\t\timport os.path\n\t\tname = os.path.split(filename)[1]\n\t\tself.z = float(name.split('halo')[0])\n\n\t\t#Read the file line by line, since it's large\n\t\tlinenumber = 1\n\t\tmin_select_grid_mass = min_select_mass/(conv.M_grid*const.solar_masses_per_gram)\n\t\tif max_select_mass:\n\t\t\tprint_msg('Max_select_mass: %g' % max_select_mass)\n\t\t\tmax_select_grid_mass = max_select_mass/(conv.M_grid*const.solar_masses_per_gram)\n\n\t\tfor line in fileinput.input(filename):\n\t\t\tif linenumber < startline: #If you want to read from a particular line\n\t\t\t\tlinenumber += 1\n\t\t\t\tcontinue\n\t\t\tif max_select_number >= 0 and len(self.halos) >= max_select_number:\n\t\t\t\tfileinput.close()\n\t\t\t\treturn False\n\t\t\tif linenumber % 100000 == 0:\n\t\t\t\tprint_msg('Read %d lines' % linenumber)\n\t\t\tlinenumber += 1\n\n\t\t\tvals = line.split()\n\t\t\tgrid_mass = float(vals[-3])\n\n\t\t\t#Create a halo and add it to the list\n\t\t\tif grid_mass > min_select_grid_mass and (max_select_mass == None or grid_mass < max_select_grid_mass):\n\t\t\t\thalo = Halo()\n # The following lines used the map() function to convert\n # parts of the vals list into floats before putting them\n # into an array. In Python 3 map() returns an iterable,\n # not a list, so changed this to a list operation.\n # GM/200601\n\t\t\t\thalo.pos = np.array([float(i) for i in vals[:3]])\n\t\t\t\thalo.pos_cm = np.array([float(i) for i in vals[3:6]])\n\t\t\t\thalo.vel = np.array([float(i) for i in vals[6:9]])\n\t\t\t\thalo.l = np.array([float(i) for i in vals[9:12]])\n\t\t\t\thalo.vel_disp = float(vals[12])\n\t\t\t\thalo.r = float(vals[13])\n\t\t\t\thalo.m = float(vals[14])\n\t\t\t\thalo.mp = float(vals[15])\n\t\t\t\thalo.solar_masses = grid_mass*conv.M_grid*const.solar_masses_per_gram\n\t\t\t\tself.halos.append(halo)\n\n\t\tfileinput.close()\n\n\t\treturn True\n\n\t\t\t\n\n", "id": "5509827", "language": "Python", "matching_score": 1.3398940563201904, "max_stars_count": 0, "path": "t2c/halo_list.py" }, { "content": "'''\nMethods to identify regions of interest in images.\n'''\n\nfrom skimage.filters import threshold_otsu\nfrom scipy.signal import argrelextrema\nimport matplotlib.pyplot as plt\nfrom . import superpixels\nfrom sklearn.cluster import KMeans\nimport numpy as np\n\ndef bubbles_from_slic(data, n_segments=5000, bins='knuth'):\n\t\"\"\"\n\t@ Giri at al. (2018b)\n\tIt is a method to identify regions of interest in noisy images.\n\tThe method is an implementation of the superpixel based approach, called SLIC (Achanta et al. 2010),\n\tused in the field of computer vision.\n\n\tParameters\n\t----------\n\tdata : ndarray\n\t\tThe brightness temperature cube.\n\tn_segments: int\n\t\tNumber of superpixels (Default: 2000).\n\tbins : int or str\n\t\tNumber of bins for the PDF used for stitching.\n\t\t'blocks', 'knuth', 'scotts', 'freedman' rules to determine bins automatically \n\t\tcan also be choosen. (Default: 'knuth').\n\n\tReturns\n\t-------\n\tBinary cube where pixels identified as region of interest are the True.\n\t\"\"\"\n\tlabels = superpixels.slic_cube(data, n_segments=n_segments)\n\tbin_sl = superpixels.stitch_superpixels(data, labels, bins=bins, binary=True)\n\treturn bin_sl\n\ndef bubbles_from_kmeans(data, upper_lim=True, n_jobs=1, n_clusters=3):\n\t\"\"\"\n\t@ Giri at al. (2018a)\n\n\tIt is a method to identify regions of interest in noisy images.\n\tThe method finds the optimal threshold using the 1D PDF of the image. It gives similar results compared\n\tto the Otsu's method.\n\n\tParameters\n\t----------\n\tdata : ndarray\n\t\tThe brightness temperature/xfrac cube.\n\tupper_lim : bool\n\t\tThis decides which mode in the PDF is to be identified.\n\t\t'True' identifies ionized regions in brightness temperature, while\n\t\t'False' identifies in the xfrac data (Default: True).\n\tn_jobs : int\n\t\tNumber of cores to use (Default: 1).\n\tn_cluster : int\n\t\tNumber of clusters found in the PDF (Default: 3).\n\n\tReturns\n\t-------\n\tBinary cube where pixels identified as region of interest are the True.\n\t\"\"\"\n\tif np.unique(data).size<2:\n\t\tprint('The data array is single valued and thus the entire array is one region.')\n\t\treturn np.ones_like(data)\n\tif np.unique(data).size==2: n_clusters=2\n\tif n_clusters==2: array, t_th = threshold_kmeans(data, upper_lim=upper_lim, n_jobs=n_jobs)\n\telse: array = threshold_kmeans_3cluster(data, upper_lim=upper_lim, n_jobs=n_jobs)\n\treturn array\n\ndef bubbles_from_fixed_threshold(data, threshold=0, upper_lim=True):\n\t\"\"\"\n\t@ Giri at al. (2018a)\n\n\tIt is a method to identify regions of interest in noisy images.\n\tThe method uses a fixed threshold.\n\n\tParameters\n\t----------\n\tdata : ndarray\n\t\tThe brightness temperature or ionization fraction cube.\n\tthreshold : float\n\t\tThe fixed threshold value (Default: 0). \n\tupper_lim : bool\n\t\tThis decides which mode in the PDF is to be identified.\n\t\t'True' identifies ionized regions in brightness temperature, while\n\t\t'False' identifies in the xfrac data (Default: True).\n\n\tReturns\n\t-------\n\tBinary cube where pixels identified as region of interest are the True.\n\t\"\"\"\n\tif upper_lim: return (data<=threshold)\n\telse: return (data>=threshold)\n\ndef threshold_kmeans(cube, upper_lim=False, mean_remove=True, n_jobs=1):\n\t#The input is the brightness temperature cube.\n\t\n\tarray = np.zeros(cube.shape)\n\t#km = KMeans(n_clusters=2)\n\t# if mean_remove:\n\t# \tif upper_lim: X = cube[cube<=cube.mean()].reshape(-1,1)\n\t# \telse: X = cube[cube>=cube.mean()].reshape(-1,1)\n\t# else:\n\t# \tX = cube.reshape(-1,1)\n\tX = cube.reshape(-1,1)\n\ty = KMeans(n_clusters=2, n_jobs=n_jobs).fit_predict(X)\n\tt_th = X[y==0].max()/2.+X[y==1].max()/2.\n\tif upper_lim: array[cube<=t_th] = 1\n\telse: array[cube>t_th] = 1\n\tprint(\"The output contains a tuple with binary-cube and determined-threshold.\")\n\treturn array, t_th\n\t\ndef threshold_kmeans_3cluster(cube, upper_lim=False, n_jobs=1):\n\t#The input is the brightness temperature cube.\n\t\n\tkm = KMeans(n_clusters=3, n_jobs=n_jobs)\n\tX = cube.reshape(-1,1)\n\tarray = np.zeros(X.shape)\n\tkm.fit(X)\n\ty = km.labels_\n\tcenters = km.cluster_centers_\n\tif upper_lim: true_label = centers.argmin()\n\telse: true_label = centers.argmax()\n\tarray[y==true_label] = 1\n\tarray = array.reshape(cube.shape)\n\treturn array\n", "id": "9521895", "language": "Python", "matching_score": 1.1114521026611328, "max_stars_count": 1, "path": "t2c/identify_regions.py" }, { "content": "'''\nMethods to smooth or reduce resolution of the data to reduce noise.\n'''\n\nimport numpy as np\nfrom . import const, conv\nfrom . import cosmology as cm\nimport scipy.ndimage as ndimage\nimport scipy.interpolate\nfrom scipy import signal\nfrom scipy.fftpack import fft, ifft, fftn, ifftn\nfrom numpy.fft import rfftn, irfftn\nfrom math import ceil, floor\nfrom numpy import array, asarray, roll\nfrom .helper_functions import fftconvolve, find_idx\n\ndef gauss_kernel(size, sigma=1.0, fwhm=None):\n\t''' \n\tGenerate a normalized gaussian kernel, defined as\n\texp(-(x^2 + y^2)/(2sigma^2)).\n\t\n\t\n\tParameters:\n\t\tsize (int): Width of output array in pixels.\n\t\tsigma = 1.0 (float): The sigma parameter for the Gaussian.\n\t\tfwhm = None (float or None): The full width at half maximum.\n\t\t\t\tIf this parameter is given, it overrides sigma.\n\t\t\n\tReturns:\n\t\tnumpy array with the Gaussian. The dimensions will be\n\t\tsize x size or size x sizey depending on whether\n\t\tsizey is set. The Gaussian is normalized so that its\n\t\tintegral is 1.\t\n\t'''\n\t\n\tif fwhm != None:\n\t\tsigma = fwhm/(2.*np.sqrt(2.*np.log(2)))\n\n\tif size % 2 == 0:\n\t\tsize = int(size/2)\n\t\tx,y = np.mgrid[-size:size, -size:size]\n\telse:\n\t\tsize = int(size/2)\n\t\tx,y = np.mgrid[-size:size+1, -size:size+1]\n\t\n\tg = np.exp(-(x**2 + y**2)/(2.*sigma**2))\n\n\treturn g/g.sum()\n\n\ndef tophat_kernel(size, tophat_width):\n\t'''\n\tGenerate a square tophat kernel\n\t\n\tParameters:\n\t\tsize (int): the size of the array\n\t\ttophat_width (int): the size of the tophat kernel\n\t\t\n\tReturns:\n\t\tThe kernel as a (size,size) array\n\t'''\n\tkernel = np.zeros((size,size))\n\tcenter = kernel.shape[0]/2\n\tidx_low = int(center-np.floor(tophat_width/2.))\n\tidx_high = int(center+np.ceil(tophat_width/2.))\n\tkernel[idx_low:idx_high, idx_low:idx_high] = 1.\n\tkernel /= np.sum(kernel)\n\treturn kernel\n\n\ndef tophat_kernel_3d(size):\n\t'''\n\tGenerate a 3-dimensional tophat kernel with\n\tthe specified size\n\t\n\tParameters:\n\t\tsize (integer or list-like): the size of\n\t\t\tthe tophat kernel along each dimension. If\n\t\t\tsize is an integer, the kernel will be cubic.\n\tReturns:\n\t\tThe normalized kernel\n\t'''\n\tif hasattr(size, '__iter__'):\n\t\tkernel = np.ones(size)\n\telse: #Integer\n\t\tkernel = np.ones((size, size, size))\n\tkernel /= np.sum(kernel)\n\treturn kernel\n\n\ndef lanczos_kernel(size, kernel_width):\n\t'''\n\tGenerate a 2D Lanczos kernel.\n\t\n\tParameters:\n\t\tsize (int): the size of the array\n\t\tkernel_width (int): the width of the kernel\n\t\t\n\tReturns:\n\t\tThe kernel as a (size,size) array\n\n\t'''\n\t#x,y = np.mgrid[-size*0.5:size*0.5, -size*0.5:size*0.5]\n\txi = np.linspace(-size*0.5, size*0.5, size)\n\tyi = np.linspace(-size*0.5, size*0.5, size)\n\tx, y = np.meshgrid(xi, yi)\n\ta = kernel_width\n\tkernel = np.sinc(x)*np.sinc(x/a)*np.sinc(y)*np.sinc(y/a)\n\tkernel[np.abs(x) > a] = 0.\n\tkernel[np.abs(y) > a] = 0.\n\tkernel /= kernel.sum()\n\t\n\treturn kernel\n\n\ndef smooth_gauss(input_array, sigma=1.0, fwhm=None):\n\t''' \n\tSmooth the input array with a Gaussian kernel specified either by\n sigma (standard deviation of the Gaussian function) or FWHM (Full \n Width Half Maximum). The latter is more appropriate when considering\n the resolution of a telescope.\n\t\n\tParameters:\n\t\tinput_array (numpy array): the array to smooth\n\t\tsigma=1.0 (float): the width of the kernel (variance)\n\t\tfwhm = None (float or None): The full width at half maximum.\n\t\t\t\tIf this parameter is given, it overrides sigma.\n\n\tReturns:\n\t\tThe smoothed array. A numpy array with the same\n\t\tdimensions as the input.\n\t'''\n\tkernel = gauss_kernel(input_array.shape[0], sigma=sigma, fwhm=fwhm)\n\treturn smooth_with_kernel(input_array, kernel)\n\n\ndef smooth_tophat(input_array, tophat_width):\n\t''' \n\tSmooth the input array with a square tophat kernel.\n\t\n\tParameters:\n\t\tinput_array (numpy array): the array to smooth\n\t\ttophat_width (int): the width of the kernel in cells\n\n\tReturns:\n\t\tThe smoothed array. A numpy array with the same\n\t\tdimensions as the input.\n\t'''\n\t#For some reason fftconvolve works produces edge effects with\n\t#an even number of cells, so we pad the array with an extra pixel\n\t#if this is the case\n\tif input_array.shape[0] % 2 == 0:\n\t\tfrom .angular_coordinates import _get_padded_slice\n\t\tpadded = _get_padded_slice(input_array, input_array.shape[0]+1)\n\t\tout = smooth_tophat(padded, tophat_width)\n\t\treturn out[:-1,:-1]\n\t\n\tkernel = tophat_kernel(input_array.shape[0], tophat_width)\n\treturn smooth_with_kernel(input_array, kernel)\n\n\ndef smooth_lanczos(input_array, kernel_width):\n\t''' \n\tSmooth the input array with a Lanczos kernel.\n\t\n\tParameters:\n\t\tinput_array (numpy array): the array to smooth\n\t\tkernel_width (int): the width of the kernel in cells\n\n\tReturns:\n\t\tThe smoothed array. A numpy array with the same\n\t\tdimensions as the input.\n\t'''\n\n\tkernel = lanczos_kernel(input_array.shape[0], kernel_width)\n\treturn smooth_with_kernel(input_array, kernel)\n\n\ndef smooth_with_kernel(input_array, kernel):\n\t''' \n\tSmooth the input array with an arbitrary kernel.\n\t\n\tParameters:\n\t\tinput_array (numpy array): the array to smooth\n\t\tkernel (numpy array): the smoothing kernel. Must\n\t\t\tbe the same size as the input array\n\n\tReturns:\n\t\tThe smoothed array. A numpy array with the same\n\t\tdimensions as the input.\n\t'''\n\tassert len(input_array.shape) == len(kernel.shape)\n\t\n\tout = fftconvolve(input_array, kernel)\n\t\n\treturn out\n\n\ndef get_beam_w(baseline, z):\n\t'''\n\tCalculate the width of the beam for an\n\tinterferometer with a given maximum baseline.\n\tIt is assumed that observations are done at\n\tlambda = 21*(1+z) cm\n\t\n\tParameters:\n\t\tbaseline (float): the maximum baseline in meters\n\t\tz (float): the redshift\n\t\t\n\tReturns:\n\t\tThe beam width in arcminutes\n\t'''\n\t\n\tfr = const.nu0 / (1.0+z) #21 cm frequency at z\n\tlw = const.c/fr/1.e6*1.e3 # wavelength in m\n\tbeam_w = lw/baseline/np.pi*180.*60.\n\treturn beam_w\n\n\ndef interpolate3d(input_array, x, y, z, order=0):\n\t'''\n\tThis function is a recreation of IDL's interpolate\n\troutine. It takes an input array, and interpolates it\n\tto a new size, which can be irregularly spaced.\n\t\n\tParameters:\n\t\tinput_array (numpy array): the array to interpolate\n\t\tx (numpy array): the output coordinates along the x axis\n\t\t\texpressed as (fractional) indices \n\t\ty (numpy array): the output coordinates along the y axis\n\t\t\texpressed as (fractional) indices \n\t\tz (numpy array): the output coordinates along the z axis\n\t\t\texpressed as (fractional) indices\n\t\torder (int): the order of the spline interpolation. Default\n\t\t\tis 0 (linear interpolation). Setting order=1 gives the same\n\t\t\tbehaviour as IDL's interpolate function with default parameters.\n\n\tReturns:\n\t\tInterpolated array with shape (nx, ny, nz), where nx, ny and nz\n\t\tare the lengths of the arrays x, y and z respectively.\n\t'''\n\t\n\t\n\tinds = np.zeros((3, len(x), len(y), len(z)))\n\tinds[0,:,:] = x[:,np.newaxis,np.newaxis]\n\tinds[1,:,:] = y[np.newaxis,:,np.newaxis]\n\tinds[2,:,:] = z[np.newaxis,np.newaxis,:]\n\tnew_array = ndimage.map_coordinates(input_array, inds, mode='wrap', \\\n\t\t\t\t\t\t\t\t\torder=order)\n\t\n\treturn new_array\n\n\ndef interpolate2d(input_array, x, y, order=0):\n\t'''\n\tSame as interpolate2d but for 2D data\n\t\n\tParameters:\n\t\tinput_array (numpy array): the array to interpolate\n\t\tx (numpy array): the output coordinates along the x axis\n\t\t\texpressed as (fractional) indices \n\t\ty (numpy array): the output coordinates along the y axis\n\t\t\texpressed as (fractional) indices \n\t\torder (int): the order of the spline interpolation. Default\n\t\t\tis 0 (linear interpolation). Setting order=1 gives the same\n\t\t\tresults as IDL's interpolate function\n\n\tReturns:\n\t\tInterpolated array with shape (nx, ny), where nx and ny\n\t\tare the lengths of the arrays x and y respectively.\n\t'''\n\n\tinds = np.zeros((2, len(x), len(y)))\n\tinds[0,:] = x[:,np.newaxis]\n\tinds[1,:] = y[np.newaxis,:]\n\tnew_array = ndimage.map_coordinates(input_array, inds, mode='wrap', \\\n\t\t\t\t\t\t\t\t\torder=order, prefilter=True)\n\t\n\treturn new_array\n\ndef smooth_lightcone(lightcone, z_array, box_size_mpc=False, max_baseline=2., ratio=1.):\n\t\"\"\"\n\tThis smooths in both angular and frequency direction assuming both to be smoothed by same scale.\n\n\tParameters:\n\t\tlightcone (numpy array): The lightcone that is to be smoothed.\n\t\tz_array (float) : The lowest value of the redshift in the lightcone or the whole redshift array.\n\t\tbox_size_mpc (float) : The box size in Mpc. Default value is determined from \n\t\t\t\t\t the box size set for the simulation (set_sim_constants)\n\t\tmax_baseline (float) : The maximun baseline of the telescope in km. Default value \n\t\t\t\t\t is set as 2 km (SKA core).\n\t\tratio (int) : It is the ratio of smoothing scale in frequency direction and \n the angular direction (Default value: 1).\n\n\tReturns:\n\t\t(Smoothed_lightcone, redshifts) \n\t\"\"\"\n\tif (not box_size_mpc): box_size_mpc=conv.LB\n\tif(z_array.shape[0] == lightcone.shape[2]):\n\t\tinput_redshifts = z_array.copy()\n\telse:\n\t\tz_low = z_array\n\t\tcell_size = 1.0*box_size_mpc/lightcone.shape[0]\n\t\tdistances = cm.z_to_cdist(z_low) + np.arange(lightcone.shape[2])*cell_size\n\t\tinput_redshifts = cm.cdist_to_z(distances)\n\n\toutput_dtheta = (1+input_redshifts)*21e-5/max_baseline\n\toutput_ang_res = output_dtheta*cm.z_to_cdist(input_redshifts)\n\toutput_dz = ratio*output_ang_res/const.c\n\tfor i in range(len(output_dz)):\n\t\toutput_dz[i] = output_dz[i] * hubble_parameter(input_redshifts[i])\n\toutput_lightcone = smooth_lightcone_tophat(lightcone, input_redshifts, output_dz)\n\toutput_lightcone = smooth_lightcone_gauss(output_lightcone, output_ang_res*lightcone.shape[0]/box_size_mpc)\n\treturn output_lightcone, input_redshifts\n\ndef smooth_coeval(cube, z, box_size_mpc=False, max_baseline=2., ratio=1., nu_axis=2):\n\t\"\"\"\n\tThis smooths the coeval cube by Gaussian in angular direction and by tophat along the third axis.\n\n\tParameters:\n\t\tcoeval_cube (numpy array): The data cube that is to be smoothed.\n\t\tz (float) : The redshift of the coeval cube.\n\t\tbox_size_mpc (float) : The box size in Mpc. Default value is determined from \n\t\t\t\t\t the box size set for the simulation (set_sim_constants)\n\t\tmax_baseline (float) : The maximun baseline of the telescope in km. Default value \n\t\t\t\t\t is set as 2 km (SKA core).\n\t\tratio (int) : It is the ratio of smoothing scale in frequency direction and \n the angular direction (Default value: 1).\n\t\tnu_axis (int) : Frequency axis\n\n\tReturns:\n\t\tSmoothed_coeval_cube\n\t\"\"\"\n\tif (not box_size_mpc): box_size_mpc=conv.LB\t\n\toutput_dtheta = (1+z)*21e-5/max_baseline\n\toutput_ang_res = output_dtheta*cm.z_to_cdist(z) * cube.shape[0]/box_size_mpc\n\toutput_cube = smooth_coeval_tophat(cube, output_ang_res*ratio, nu_axis=nu_axis)\n\toutput_cube = smooth_coeval_gauss(output_cube, output_ang_res, nu_axis=nu_axis)\n\treturn output_cube\n\ndef smooth_coeval_tophat(cube, width, nu_axis):\n\t\"\"\"\n\tThis smooths the slices perpendicular to the given axis of the cube by tophat filter.\n\n\tParameters:\n\t\tcube (numpy array) : The data cube that is to be smoothed.\n\t\twidth (float) : The width of the tophat filter.\n\t\tnu_axis (int) : Frequency axis\n\n\tReturns:\n\t\tSmoothed_cube\n\t\"\"\"\n\tkernel = tophat_kernel(cube.shape[nu_axis], width)\n\toutput_cube = np.zeros(cube.shape)\n\tif nu_axis==0:\n\t\tfor i in range(cube.shape[1]):\n\t\t\toutput_cube[:,i,:] = smooth_with_kernel(cube[:,i,:], kernel)\n\telse:\n\t\tfor i in range(cube.shape[0]):\n\t\t\toutput_cube[i,:,:] = smooth_with_kernel(cube[i,:,:], kernel)\n\treturn output_cube\n\ndef smooth_coeval_gauss(cube, fwhm, nu_axis):\n\t\"\"\"\n\tThis smooths the slices parallel to the given axis of the cube by Gaussian filter.\n\n\tParameters:\n\t\tcube (numpy array) : The data cube that is to be smoothed.\n\t\tfwhm (float) : The fwhm of the Gaussian filter.\n\t\tnu_axis (int) : Frequency axis\n\n\tReturns:\n\t\tSmoothed_cube\n\t\"\"\"\n\tone = np.ones(cube.shape[nu_axis])\n\toutput_cube = smooth_lightcone_gauss(cube, fwhm*one, nu_axis=nu_axis)\n\treturn output_cube\n\ndef smooth_lightcone_tophat(lightcone, redshifts, dz):\n\t\"\"\"\n\tThis smooths the slices perpendicular to the third axis of the lightcone by tophat filter.\n\n\tParameters:\n\t\tlightcone (numpy array) : The lightcone that is to be smoothed.\n\t\tredshifts (numpy array) : The redshift of each slice along the third axis.\n\t\tdz (float) : redshift width \n\n\tReturns:\n\t\tSmoothed_lightcone\n\t\"\"\"\n\toutput_lightcone = np.zeros(lightcone.shape)\n\tfor i in range(output_lightcone.shape[2]):\n\t\tz_out_low = redshifts[i]-dz[i]/2\n\t\tz_out_high = redshifts[i]+dz[i]/2\n\t\tidx_low = int(np.ceil(find_idx(redshifts, z_out_low)))\n\t\tidx_high = int(np.ceil(find_idx(redshifts, z_out_high)))\n\t\toutput_lightcone[:,:,i] = np.mean(lightcone[:,:,idx_low:idx_high+1], axis=2)\n\treturn output_lightcone\n\ndef smooth_lightcone_gauss(lightcone,fwhm,nu_axis=2):\n\t\"\"\"\n\tThis smooths the slices perpendicular to the third axis of the lightcone by tophat filter.\n\n\tParameters:\n\t\tlightcone (numpy array) : The lightcone that is to be smoothed.\n\t\tfwhm (numpy array) : The fwhm values of the Gaussian filter at each slice along frequency axis.\n\t\tnu_axis (int) : frequency axis \n\n\tReturns:\n\t\tSmoothed_lightcone\n\t\"\"\"\n\tassert lightcone.shape[nu_axis] == len(fwhm)\n\toutput_lightcone = np.zeros(lightcone.shape)\n\tfor i in range(output_lightcone.shape[nu_axis]):\n\t\tif nu_axis==0: output_lightcone[i,:,:] = smooth_gauss(lightcone[i,:,:], fwhm=fwhm[i])\n\t\telif nu_axis==1: output_lightcone[:,i,:] = smooth_gauss(lightcone[:,i,:], fwhm=fwhm[i])\n\t\telse: output_lightcone[:,:,i] = smooth_gauss(lightcone[:,:,i], fwhm=fwhm[i])\n\treturn output_lightcone\n\ndef hubble_parameter(z):\n\t\"\"\"\n\tIt calculates the Hubble parameter at any redshift.\n\t\"\"\"\n\tpart = np.sqrt(const.Omega0*(1.+z)**3+const.lam)\n\treturn const.H0 * part\n\n\n\n", "id": "4589299", "language": "Python", "matching_score": 2.2377469539642334, "max_stars_count": 1, "path": "t2c/smoothing.py" }, { "content": "import numpy as np\nfrom . import usefuls\nfrom scipy.signal import fftconvolve\nfrom skimage import morphology\n\ndef spa_np(data, xth=0.95, nscales=30, binning='log'):\n\t\"\"\"\n\t@Zahn et al. (2007)\n\t\"\"\"\n\tRmx = data.shape[0]\n\tif binning=='linear': Rs_ = np.linspace(1,Rmx/2.,nscales)\n\telse: Rs_ = np.exp(np.linspace(np.log(2.),np.log(Rmx/2.),nscales))\n\tins = np.zeros(nscales)\n\t#nns = np.zeros(nscales)\n\trad = np.zeros(data.shape)\n\tfor i in range(nscales):\n\t\tra = Rs_[i]\n\t\t#kernel = put_sphere(np.zeros((Rmx,Rmx,Rmx)), [Rmx/2.,Rmx/2.,Rmx/2.], ra, label=1.)\n\t\tkernel = morphology.ball(ra)\n\t\tsmooth = fftconvolve(data, kernel/kernel.sum(), mode='same')\n\t\trad[smooth>=xth] = ra\n\t\tprint(\"Comepleted {0:.1f} %\".format(100*(i+1)/nscales))\t\n\tfor i in range(nscales): ins[i] = rad[rad==Rs_[i]].size\n\treturn Rs_, ins\n\n\ndef put_sphere(array, centre, radius, label=1, periodic=True, verbose=False):\n\tassert array.ndim == 3\n\tnx, ny, nz = array.shape\n\taw = np.argwhere(np.isfinite(array))\n\tRR = ((aw[:,0]-centre[0])**2 + (aw[:,1]-centre[1])**2 + (aw[:,2]-centre[2])**2).reshape(array.shape)\n\tarray[RR<=radius**2] = label\n\tif periodic: \n\t\tRR2 = ((aw[:,0]+nx-centre[0])**2 + (aw[:,1]+ny-centre[1])**2 + (aw[:,2]+nz-centre[2])**2).reshape(array.shape)\n\t\tarray[RR2<=radius**2] = label\n\t\tif verbose: print(\"Periodic circle of radius %d made at (%d,%d,%d)\"%(radius, centre[0], centre[1], centre[2]))\n\telse: \n\t\tif verbose: print(\"Non-periodic circle of radius %d made at (%d,%d,%d)\"%(radius, centre[0], centre[1], centre[2]))\n\treturn array\n\n", "id": "210929", "language": "Python", "matching_score": 1.2874491214752197, "max_stars_count": 1, "path": "t2c/spa_np.py" } ]
1.313672
GTmac
[ { "content": "from sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import f1_score\nimport torch\nimport numpy\n\n\nclass Str2id:\n def __init__(self):\n self.str2id_dict = dict()\n self.id2str_dict = dict()\n self.freeze = False\n\n def str2id(self, s):\n try:\n return self.str2id_dict[s]\n except:\n if self.freeze:\n raise Exception('Already Freezed')\n else:\n sid = len(self.str2id_dict)\n self.str2id_dict[s] = sid\n self.id2str_dict[sid] = s\n return self.str2id_dict[s]\n\n def id2str(self, sid):\n return self.id2str_dict[sid]\n\n\nclass NodeClassifier:\n def __init__(self, node_map, label_filename, used_label):\n self.Xid = []\n self.Y = []\n used_label = set(used_label)\n for line in open(label_filename):\n line = line.strip().split(' ')\n if int(line[1]) in used_label:\n self.Xid.append(node_map.str2id(line[0]))\n self.Y.append(int(line[1]))\n self.split(0.3, 41)\n\n def split(self, test_size, random_state):\n self.Xid_train, self.Xid_test, self.Y_train, self.Y_test = train_test_split(\n self.Xid, self.Y, test_size=test_size, random_state=random_state\n )\n\n def evaluate(self, model, use_cuda):\n u = model.forward(self.Xid_train, is_start=True, directed=True)\n v = model.forward(self.Xid_train, is_start=False, directed=True)\n if use_cuda:\n X_train = torch.cat((u, v), dim=1).cpu().data.numpy()\n else:\n X_train = torch.cat((u, v), dim=1).data.numpy()\n u = model.forward(self.Xid_test, is_start=True, directed=True)\n v = model.forward(self.Xid_test, is_start=False, directed=True)\n if use_cuda:\n X_test = torch.cat((u, v), dim=1).cpu().data.numpy()\n else:\n X_test = torch.cat((u, v), dim=1).data.numpy()\n y_train = [y for y in self.Y_train]\n y_test = [y for y in self.Y_test]\n clf = LogisticRegression(C=1e5, class_weight='balanced')\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n return f1_score(y_test, y_pred, pos_label=None, average='macro')\n\n\ndef get_word_embeddings(word_embedding_filename, word_map):\n f = open(word_embedding_filename)\n first = f.readline()\n first = first.strip().split(' ')\n word_emb_dim = int(first[1])\n numpy.random.seed(71)\n word_embs = numpy.random.uniform(\n low=-0.5 / word_emb_dim,\n high=0.5 / word_emb_dim,\n size=(len(word_map.str2id_dict) + 1, word_emb_dim)\n )\n for line in f:\n line = line.strip().split(' ')\n if line[0] in word_map.str2id_dict:\n word_embs[word_map.str2id(line[0])] = [float(e) for e in line[1:]]\n return word_embs\n", "id": "10001606", "language": "Python", "matching_score": 2.544297218322754, "max_stars_count": 8, "path": "utils.py" }, { "content": "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy\nimport copy\nimport os\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nimport numpy as np\nfrom utils import Str2id\nfrom collections import Counter\nfrom utils import NodeClassifier, get_word_embeddings\nfrom tqdm import tqdm_notebook as tqdm\nfrom tqdm import tnrange\nfrom datasets import NormalEdgeDataset, NodeTextEdgeDataset, MultipleLableDataset\n\n\nclass SemiSkipGram(nn.Module):\n def __init__(self, emb_dim, label_dim, use_cuda):\n super(SemiSkipGram, self).__init__()\n self.use_cuda = use_cuda\n self.ranking = nn.Sequential(nn.Linear(emb_dim * 2, label_dim))\n self.criteria = nn.BCEWithLogitsLoss(size_average=False)\n\n def stucture_loss(self, emb_u, emb_v, neg_emb_v):\n losses = []\n score = torch.mul(emb_u, emb_v).squeeze()\n score = torch.sum(score, dim=1)\n score = F.logsigmoid(score)\n neg_score = torch.bmm(neg_emb_v, emb_u.unsqueeze(2)).squeeze().view(-1)\n neg_score = F.logsigmoid(-1 * neg_score)\n return -1 * (torch.sum(score) + torch.sum(neg_score))\n\n def edge_label_loss(self, emb_u, emb_v, labels):\n embeddings = torch.cat((emb_u, emb_v), dim=1)\n ranking = self.ranking(embeddings)\n loss = self.criteria(ranking, labels)\n return loss\n\n def predicate_edge_label(self, emb_u, emb_v):\n embeddings = torch.cat((emb_u, emb_v), dim=1)\n ranking = self.ranking(embeddings)\n return ranking\n\n\nclass NodeRepresentation(nn.Module):\n def __init__(self, emb_size, emb_dim, use_cuda):\n super(NodeRepresentation, self).__init__()\n self.emb_size = emb_size\n self.emb_dim = emb_dim\n self.u_embeddings = nn.Embedding(emb_size, emb_dim, sparse=True)\n self.v_embeddings = nn.Embedding(emb_size, emb_dim, sparse=True)\n self.init_emb()\n self.use_cuda = use_cuda\n if use_cuda:\n self.cuda()\n\n def init_node_embedding(self, node_embs):\n if self.use_cuda:\n self.v_embeddings.weight.data = torch.from_numpy(\n node_embs[:, self.emb_dim:]\n ).cuda()\n else:\n self.u_embeddings.weight.data = torch.from_numpy(\n node_embs[:, :self.emb_dim]\n )\n self.v_embeddings.weight.data = torch.from_numpy(\n node_embs[:, self.emb_dim:]\n )\n\n def init_emb(self):\n initrange = 0.5 / self.emb_dim\n self.u_embeddings.weight.data.uniform_(-initrange, initrange)\n self.v_embeddings.weight.data.uniform_(-0, 0)\n\n def forward(self, nids, is_start, directed):\n losses = []\n if self.use_cuda:\n nids = Variable(torch.cuda.LongTensor(nids))\n else:\n nids = Variable(torch.LongTensor(nids))\n if directed:\n if is_start:\n return self.u_embeddings(nids)\n else:\n return self.v_embeddings(nids)\n else:\n return torch.cat(\n (self.u_embeddings(nids), self.v_embeddings(nids)), dim=1\n )\n\n def save_embedding(self, node_map, file_name, use_cuda):\n embeddings = self.u_embeddings.weight\n embeddings = embeddings.cpu().data.numpy()\n fout = open(file_name, 'w')\n fout.write(\"%d %d\\n\" % (self.emb_size, self.emb_dim))\n for wid in range(self.emb_size):\n e = embeddings[wid]\n w = node_map.id2str(wid)\n e = ' '.join(map(lambda x: str(x), e))\n fout.write('%s %s\\n' % (w, e))\n", "id": "1596157", "language": "Python", "matching_score": 2.8697524070739746, "max_stars_count": 8, "path": "model.py" }, { "content": "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy\nimport copy\nimport matplotlib\nimport os\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport torch.nn.functional as F\nsns.set(style=\"ticks\")\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nimport numpy as np\nfrom utils import Str2id\nfrom collections import Counter\nfrom utils import NodeClassifier\n#from tqdm import tqdm_notebook as tqdm\nfrom tqdm import tqdm as tqdm\nfrom tqdm import tnrange\n\n\nclass NormalEdgeDataset(Dataset):\n \"\"\"Edge dataset.\"\"\"\n\n def __init__(\n self,\n node_map1,\n node_map2,\n filename,\n neg_sampling_size,\n from_random_walk=False,\n window_size=None\n ):\n \"\"\"\n Args:\n edge_list (list): A list containts all edges. Each edge is a tuple\n \"\"\"\n if from_random_walk:\n assert window_size is not None\n self.random_walk_to_edge_list(\n node_map1, node_map2, filename, neg_sampling_size, window_size\n )\n else:\n self.init_from_edge_list_file(\n node_map1, node_map2, filename, neg_sampling_size\n )\n\n def random_walk_to_edge_list(\n self, node_map1, node_map2, in_file_path, neg_sampling_size, window_size\n ):\n self.edge_list = []\n self.node_list = []\n self.graph = []\n for line in tqdm(open(in_file_path)):\n line = line.strip().split(' ')\n n = len(line)\n for i in range(len(line)):\n left = max(0, i - window_size)\n right = min(i + window_size, n)\n for j in range(left, right):\n if j == i:\n continue\n u = node_map1.str2id(line[i])\n v = node_map2.str2id(line[j])\n if u >= len(self.graph):\n for _ in range(u - len(self.graph) + 1):\n self.graph.append(set())\n self.graph[u].add(v)\n self.edge_list.append([u, v, []])\n self.node_list.append(v)\n self.init_sample_table()\n self.neg_sampling_size = neg_sampling_size\n\n def init_from_edge_list_file(\n self, node_map1, node_map2, edge_list_file, neg_sampling_size\n ):\n self.edge_list = []\n self.node_list = []\n self.graph = []\n for line in tqdm(open(edge_list_file)):\n line = line.strip().split(' ')\n line[0] = node_map1.str2id(line[0])\n line[1] = node_map2.str2id(line[1])\n if line[0] >= len(self.graph):\n for _ in range(line[0] - len(self.graph) + 1):\n self.graph.append(set())\n self.graph[line[0]].add(line[1])\n self.edge_list.append([line[0], line[1], []])\n self.node_list.append(line[1])\n self.init_sample_table()\n self.neg_sampling_size = neg_sampling_size\n\n def init_sample_table(self):\n self.sample_table = []\n sample_table_size = 1e8\n node_frequency = list(Counter(self.node_list).items())\n nids = [f[0] for f in node_frequency]\n values = [f[1] for f in node_frequency]\n pow_frequency = numpy.array(values)**0.75\n nodes_pow = numpy.sum(pow_frequency)\n ratio = pow_frequency / nodes_pow\n count = numpy.round(ratio * sample_table_size)\n for index, c in enumerate(count):\n self.sample_table += [nids[index]] * int(c)\n self.sample_table = numpy.array(self.sample_table)\n\n def negative_sampling1(self):\n #numpy.random.seed(41)\n neg = numpy.random.choice(\n self.sample_table,\n size=(len(self.edge_list), self.neg_sampling_size)\n )\n # neg=numpy.random.choice(list(set(self.node_list)),size=(len(self.edge_list),self.neg_sampling_size))\n for i in range(len(self.edge_list)):\n self.edge_list[i][2] = list(neg[i])\n\n def negative_sampling2(self):\n #numpy.random.seed(41)\n for i in range(len(self.edge_list)):\n neg = numpy.random.choice(\n self.sample_table, size=(self.neg_sampling_size)\n )\n self.edge_list[i][2] = list(neg)\n\n def negative_sampling(self):\n #numpy.random.seed(41)\n negs = numpy.random.choice(\n self.sample_table,\n size=(len(self.edge_list), self.neg_sampling_size)\n )\n for i in tqdm(range(len(self.edge_list))):\n u = self.edge_list[i][0]\n v = self.edge_list[i][1]\n neg = list(filter(lambda x: x not in self.graph[u], negs[i]))\n while len(neg) < self.neg_sampling_size:\n nid = numpy.random.choice(self.sample_table)\n if nid in self.graph[u]:\n continue\n neg.append(nid)\n self.edge_list[i][2] = neg\n\n def __len__(self):\n return len(self.edge_list)\n\n def __getitem__(self, idx):\n return self.edge_list[idx]\n\n\nclass NodeTextEdgeDataset(Dataset):\n \"\"\"Edge dataset.\"\"\"\n\n def __init__(\n self, node_map1, node_map2, word_map, edge_list_file, text_file,\n neg_sampling_size\n ):\n \"\"\"\n Args:\n edge_list (list): A list containts all edges. Each edge is a tuple\n \"\"\"\n self.edge_list = []\n self.node_list = set()\n self.text_list = dict()\n for line in open(edge_list_file):\n line = line.strip().split(' ')\n line[0] = node_map1.str2id(line[0])\n line[1] = node_map2.str2id(line[1])\n self.edge_list.append([line[0], line[1], []])\n self.node_list.add(line[1])\n self.node_list = list(self.node_list)\n for line in open(text_file):\n line = line.replace('||||', ' ').strip().split(' ')\n self.text_list[node_map2.str2id(line[0])] = [\n word_map.str2id(w) for w in line[1:]\n ]\n self.neg_sampling_size = neg_sampling_size\n self.negative_sampling()\n\n def negative_sampling(self):\n numpy.random.seed(41)\n neg = numpy.random.choice(\n list(self.node_list),\n size=(len(self.edge_list), self.neg_sampling_size)\n )\n for i in range(len(self.edge_list)):\n self.edge_list[i][2] = list(neg[i])\n\n def __len__(self):\n return len(self.edge_list)\n\n def __getitem__(self, idx):\n node, text, neg_text = self.edge_list[idx]\n text = self.text_list[text]\n neg_text = [self.text_list[t] for t in neg_text]\n return node, text, neg_text\n\n\nclass MultipleLableDataset(Dataset):\n \"\"\"\n Multiple label dataset\n Input is a file, where each line contains:\n source_node, target_node and some labels\n \"\"\"\n\n def __init__(\n self,\n u_map,\n v_map,\n label_filename,\n total_label_count,\n ratio=1.0,\n topk=-1,\n use_all_zeros=False\n ):\n \"\"\"\n Args:\n edge_list (list): A list containts all edges. Each edge is a tuple\n \"\"\"\n print('Use %0.2f labels' % ratio)\n self.labels = []\n fin = open(label_filename)\n fin.readline()\n not_count_topk = 0\n for idx, line in enumerate(fin):\n if numpy.random.rand() > ratio:\n continue\n line = line.strip().split(' ')\n line[0] = u_map.str2id(line[0])\n line[1] = v_map.str2id(line[1])\n label = [float(x) for x in line[2:]]\n label = numpy.array(label)\n if (label == 0).all():\n if not use_all_zeros:\n continue\n if topk > 0:\n if len(set(label) - {0, 1}) > 0:\n s = numpy.argsort(label)\n label[s[-topk:]] = 1.0\n label[s[:-topk]] = 0.0\n else:\n not_count_topk += 1\n self.labels.append([line[0], line[1], label])\n print('not count topk', not_count_topk)\n\n def __len__(self):\n return len(self.labels)\n\n def __getitem__(self, idx):\n return self.labels[idx]\n\n\nif __name__ == '__main__':\n edge_list_path = \"/mnt/store1/plus1lab/multilabel-data/v1_aminer_10k_1/original-edgelist.txt\"\n edge_label_path = \"/mnt/store1/plus1lab/multilabel-model/aegcn/v1_aminer_10k_1/prop-mat.txt\"\n node_map = Str2id()\n label_map = Str2id()\n edges = []\n NormalEdgeDataset(\n node_map,\n node_map,\n edge_list_path,\n 10,\n from_random_walk=True,\n window_size=10\n )\n node_map.freeze = True\n d = MultipleLableDataset(node_map, node_map, edge_label_path, 100, 1.0, 5)\n print(d[999])\n", "id": "2814801", "language": "Python", "matching_score": 1.5267219543457031, "max_stars_count": 8, "path": "datasets.py" }, { "content": "\"\"\"scoring.py: Script that demonstrates the multi-label classification used.\"\"\"\n\nimport numpy\nimport sys\nimport os\n\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom collections import defaultdict\nfrom gensim.models import Word2Vec, KeyedVectors\nfrom six import iteritems\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score, confusion_matrix\nfrom scipy.io import loadmat\nfrom sklearn.utils import shuffle as skshuffle\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\nclass TopKRanker(OneVsRestClassifier):\n def predict(self, X, top_k_list):\n assert X.shape[0] == len(top_k_list)\n probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))\n all_labels = []\n for i, k in enumerate(top_k_list):\n probs_ = probs[i, :]\n labels = self.classes_[probs_.argsort()[-k:]].tolist()\n all_labels.append(labels)\n return all_labels\n\ndef sparse2graph(x):\n G = defaultdict(lambda: set())\n cx = x.tocoo()\n for i,j,v in zip(cx.row, cx.col, cx.data):\n G[i].add(j)\n return {str(k): [str(x) for x in v] for k,v in iteritems(G)}\n\ndef main(predefined_args=None):\n parser = ArgumentParser(\"scoring\",\n formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n parser.add_argument(\"--emb\", required=True, help='Embeddings file, could be in word2vec or Matlab format.')\n parser.add_argument(\"--network\", required=True,\n help='A .mat file containing the adjacency matrix and node labels of the input network.')\n parser.add_argument(\"--adj-matrix-name\", default='network',\n help='Variable name of the adjacency matrix inside the .mat file.')\n parser.add_argument(\"--label-matrix-name\", default='group',\n help='Variable name of the labels matrix inside the .mat file.')\n parser.add_argument(\"--num-shuffles\", default=2, type=int, help='Number of shuffles.')\n parser.add_argument(\"--all\", default=False, action='store_true',\n help='''The embeddings are evaluated on all training percents\n from 10 to 90 when this flag is set to true.\n By default, only training percents of 10, 20 and 50 are used.''')\n if predefined_args:\n args = parser.parse_args(predefined_args)\n else:\n args = parser.parse_args()\n # 0. Files\n embeddings_file = args.emb\n matfile = args.network\n\n # 1. Load labels\n mat = loadmat(matfile)\n labels_matrix = mat[args.label_matrix_name]\n labels_count = labels_matrix.shape[1]\n mlb = MultiLabelBinarizer(range(labels_count))\n\n # 2. Load Embeddings\n _, file_extension = os.path.splitext(embeddings_file)\n # assume that the only key in the .mat file is the embeddings matrix\n if file_extension == '.mat':\n emb_mat = loadmat(embeddings_file)\n feature_key = [y for y in emb_mat.keys() if y not in ('__header__', '__version__', '__globals__')]\n assert len(feature_key) == 1\n features_matrix = emb_mat[feature_key[0]]\n else:\n model = KeyedVectors.load_word2vec_format(embeddings_file, binary=False)\n # Map nodes to their features (note: assumes nodes are labeled as integers 0:N-1)\n N = len(model.vocab)\n features_matrix = numpy.asarray([model[str(node)] for node in range(N)])\n\n # 2. Shuffle, to create train/test groups\n shuffles = []\n for x in range(args.num_shuffles):\n shuffles.append(skshuffle(features_matrix, labels_matrix))\n\n # 3. to score each train/test group\n all_results = defaultdict(list)\n\n if args.all:\n training_percents = numpy.asarray(range(1, 10)) * .1\n else:\n training_percents = [0.1, 0.2, 0.5]\n for train_percent in training_percents:\n for shuf in shuffles:\n\n X, y = shuf\n\n training_size = int(train_percent * X.shape[0])\n\n X_train = X[:training_size, :]\n y_train_ = y[:training_size]\n\n y_train = [[] for x in range(y_train_.shape[0])]\n\n cy = y_train_.tocoo()\n for i, j in zip(cy.row, cy.col):\n y_train[i].append(j)\n\n assert sum(len(l) for l in y_train) == y_train_.nnz\n\n X_test = X[training_size:, :]\n y_test_ = y[training_size:]\n\n y_test = [[] for _ in range(y_test_.shape[0])]\n\n cy = y_test_.tocoo()\n for i, j in zip(cy.row, cy.col):\n y_test[i].append(j)\n\n clf = TopKRanker(LogisticRegression())\n clf.fit(X_train, y_train_)\n\n # find out how many labels should be predicted\n top_k_list = [len(l) for l in y_test]\n preds = clf.predict(X_test, top_k_list)\n\n results = {}\n averages = [\"micro\", \"macro\"]\n for average in averages:\n results[average] = f1_score(mlb.fit_transform(y_test), mlb.fit_transform(preds), average=average)\n\n all_results[train_percent].append(results)\n\n print ('Results, using embeddings of dimensionality', X.shape[1])\n print ('-------------------')\n for train_percent in sorted(all_results.keys()):\n print ('Train percent:', train_percent)\n for index, result in enumerate(all_results[train_percent]):\n print ('Shuffle #%d: ' % (index + 1), result)\n avg_score = defaultdict(float)\n for score_dict in all_results[train_percent]:\n for metric, score in iteritems(score_dict):\n avg_score[metric] += score\n for metric in avg_score:\n avg_score[metric] /= len(all_results[train_percent])\n print ('Average score:', dict(avg_score))\n print ('-------------------')\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "id": "2960149", "language": "Python", "matching_score": 3.6257152557373047, "max_stars_count": 1, "path": "src/scoring.py" }, { "content": "import logging\nimport os\nimport sys\n\nimport numpy as np\n\nfrom argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter\nfrom scipy.io import mmread, mmwrite, loadmat, savemat\nfrom scipy.sparse import csc_matrix, spdiags\nfrom sklearn import random_projection\n\ndef randne_projection(A, q=3, dim=128):\n transformer = random_projection.GaussianRandomProjection(n_components=dim, random_state=42)\n # Random projection for A\n cur_U = transformer.fit_transform(A)\n U_list = [cur_U]\n\n for i in range(2, q + 1):\n cur_U = A @ cur_U\n U_list.append(cur_U)\n return U_list\n\ndef randne_merge(U_list, weights):\n U = np.zeros_like(U_list[0])\n for cur_U, weight in zip(U_list, weights):\n U += cur_U * weight\n return U\n\ndef main():\n parser = ArgumentParser('randne',\n formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n parser.add_argument('--input', nargs='?', required=True,\n help='Input graph file')\n parser.add_argument('--matfile-variable-name', default='network',\n help='Variable name of adjacency matrix inside a .mat file')\n parser.add_argument('--output', required=True,\n help='Output representation file')\n parser.add_argument('--use-trans-matrix', default=False, action='store_true',\n help='''The input matrix for RandNE. Adjacency matrix is used by default;\n set this flag to use the transition matrix instead.''')\n parser.add_argument('-q', '--order', default=3, type=int,\n help='Maximum order of adjacency matrix.')\n parser.add_argument('-d', '--representation-size', default=128, type=int,\n help='Number of latent dimensions to learn for each node.')\n parser.add_argument('--weights', nargs='+', required=True,\n help='Weights for each power of the adjacency matrix (or transition matrix).')\n args = parser.parse_args()\n\n # Process args\n mat_obj = loadmat(args.input)\n A = mat_obj[args.matfile_variable_name]\n if args.use_trans_matrix:\n N = A.shape[0]\n normalizer = spdiags(np.squeeze(1.0 / csc_matrix.sum(A, axis=1) ), 0, N, N)\n input_mat = normalizer @ A\n else:\n input_mat = A\n weights = list(map(float, args.weights))\n\n # Start RandNE\n U_list = randne_projection(input_mat, q=args.order, dim=args.representation_size)\n U = randne_merge(U_list, weights)\n\n savemat(args.output, {'emb': U})\n\nif __name__ == '__main__':\n sys.exit(main())\n", "id": "8760746", "language": "Python", "matching_score": 3.506652593612671, "max_stars_count": 1, "path": "src/randne.py" }, { "content": "import csv\nimport itertools\nimport math\nimport matplotlib\nimport time\nimport logging\nimport sys\nimport os\nimport random\nimport warnings\nimport gensim\nimport pandas as pd\nimport numpy as np\n\nfrom tqdm import tqdm_notebook as tqdm\nfrom collections import Counter, defaultdict\n\nfrom pathlib import Path\nfrom sklearn import random_projection\nfrom sklearn.preprocessing import normalize, scale, MultiLabelBinarizer\nfrom scipy.sparse import coo_matrix, csr_matrix, csc_matrix, spdiags\n\n# projection method: choose from Gaussian and Sparse\n# input matrix: choose from adjacency and transition matrix\n# alpha adjusts the weighting of nodes according to their degree\ndef fastrp_projection(A, q=3, dim=128, projection_method='gaussian', input_matrix='adj', alpha=None):\n assert input_matrix == 'adj' or input_matrix == 'trans'\n assert projection_method == 'gaussian' or projection_method == 'sparse'\n \n if input_matrix == 'adj':\n M = A\n else:\n N = A.shape[0]\n normalizer = spdiags(np.squeeze(1.0 / csc_matrix.sum(A, axis=1) ), 0, N, N)\n M = normalizer @ A\n # Gaussian projection matrix\n if projection_method == 'gaussian':\n transformer = random_projection.GaussianRandomProjection(n_components=dim, random_state=42)\n # Sparse projection matrix\n else:\n transformer = random_projection.SparseRandomProjection(n_components=dim, random_state=42)\n Y = transformer.fit(M)\n # Random projection for A\n if alpha is not None:\n Y.components_ = Y.components_ @ spdiags( \\\n np.squeeze(np.power(csc_matrix.sum(A, axis=1), alpha)), 0, N, N)\n cur_U = transformer.transform(M)\n U_list = [cur_U]\n \n for i in range(2, q + 1):\n cur_U = M @ cur_U\n U_list.append(cur_U)\n return U_list\n\n# When weights is None, concatenate instead of linearly combines the embeddings from different powers of A\ndef fastrp_merge(U_list, weights, normalization=False):\n dense_U_list = [_U.todense() for _U in U_list] if type(U_list[0]) == csc_matrix else U_list\n _U_list = [normalize(_U, norm='l2', axis=1) for _U in dense_U_list] if normalization else dense_U_list\n\n if weights is None:\n return np.concatenate(_U_list, axis=1)\n U = np.zeros_like(_U_list[0])\n for cur_U, weight in zip(_U_list, weights):\n U += cur_U * weight\n # U = scale(U.todense())\n # U = normalize(U.todense(), norm='l2', axis=1)\n return scale(U.todense()) if type(U) == csr_matrix else scale(U)\n\n# A is always the adjacency matrix\n# the choice between adj matrix and trans matrix is decided in the conf\ndef fastrp_wrapper(A, conf):\n U_list = fastrp_projection(A,\n q=len(conf['weights']),\n dim=conf['dim'],\n projection_method=conf['projection_method'],\n input_matrix=conf['input_matrix'],\n alpha=conf['alpha'],\n )\n U = fastrp_merge(U_list, conf['weights'], conf['normalization'])\n return U\n\ndef get_emb_filename(prefix, conf):\n return prefix + '-dim=' + str(conf['dim']) + ',projection_method=' + conf['projection_method'] \\\n + ',input_matrix=' + conf['input_matrix'] + ',normalization=' + str(conf['normalization']) \\\n + ',weights=' + (','.join(map(str, conf['weights'])) if conf['weights'] is not None else 'None') \\\n + ',alpha=' + (str(conf['alpha']) if 'alpha' in conf else '') \\\n + ',C=' + (str(conf['C']) if 'alpha' in conf else '1.0') \\\n + '.mat'\n\n", "id": "7556545", "language": "Python", "matching_score": 2.6510872840881348, "max_stars_count": 31, "path": "fastrp.py" } ]
2.76042
jashmehta3300
[ { "content": "from rest_framework import permissions\nfrom .models import *\nfrom django.contrib.auth.models import User\nfrom requests.api import request\n\n\nclass Permit(permissions.BasePermission):\n message = \"Access Denied\"\n\n def has_permission(self, request, view):\n try:\n m = Manager.objects.get(user_ref=request.user)\n return True\n except:\n return False\n\n\nfrom rest_framework import permissions\n\n\nclass IsOwnerOrReadOnly(permissions.BasePermission):\n def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n return obj.owner == request.user\n\n\nclass Permit2(permissions.BasePermission):\n message = \"Access Denied\"\n\n def has_permission(self, request, view):\n try:\n s = Salesperson.objects.get(User_ref=request.user)\n return True\n except:\n return False\n", "id": "7450237", "language": "Python", "matching_score": 2.54923415184021, "max_stars_count": 4, "path": "salespersonTrackerREST/permissions.py" }, { "content": "from rest_framework import permissions\n\n\nclass IsUserOrReadOnly(permissions.BasePermission):\n def has_permission(self, request, view):\n return request.method in permissions.SAFE_METHODS or request.user\n\n\nclass Permit(permissions.BasePermission):\n message: \"Not allowed To Access\"\n\n def has_permission(self, request, view):\n if request.user.is_authenticated:\n return True\n else:\n return False\n\n # def has_object_permission(self, request, view, obj):\n # return obj.user == request.user\n", "id": "720219", "language": "Python", "matching_score": 0.0874260887503624, "max_stars_count": 7, "path": "BEProjectsApp/permissions.py" }, { "content": "from rest_framework import serializers\nfrom .models import *\nfrom django.contrib.auth.models import User\n\n\nclass BillSerializer(serializers.ModelSerializer):\n class Meta:\n model = Bill\n fields = \"__all__\"\n\n\nclass DailyTargetSerializer(serializers.ModelSerializer):\n class Meta:\n model = DailyTarget\n fields = \"__all__\"\n\n\nclass Userserializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\"username\"]\n\n\nclass ManagerSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Manager\n fields = [\"user_ref\", \"Name\", \"Photo\", \"Age\"]\n\n\nclass SalespersonSerializer(serializers.ModelSerializer):\n User_ref = Userserializer(many=False, read_only=True)\n\n class Meta:\n model = Salesperson\n fields = [\n \"id\",\n \"User_ref\",\n \"Managed_By\",\n \"Name\",\n \"Photo\",\n \"Age\",\n \"last_location_lat\",\n \"last_location_long\",\n \"isLoggedin\",\n ]\n\n\nclass ItemAssignSerializer(serializers.ModelSerializer):\n class Meta:\n model = ItemAssign\n fields = [\n \"Item_Ref\",\n \"Assign_Date\",\n \"Assign_Time\",\n \"Assigned_By\",\n \"Assigned_To\",\n \"assign_quantity\",\n ]\n\n\nclass WarehouseSerializer(serializers.ModelSerializer):\n class Meta:\n model = Warehouse\n fields = [\n \"Item_Group_Code\",\n \"Company_Item_code\",\n \"Company_Code\",\n \"Name\",\n \"Description\",\n \"Quantity\",\n \"pk\"\n # \"Photo\",\n ]\n\n\nclass InventorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Inventory\n fields = \"__all__\"\n\n\nclass WarehouseUpdateSerializer(serializers.FileField):\n file = serializers.FileField()\n fields = [\"file\"]\n", "id": "5600241", "language": "Python", "matching_score": 2.371736526489258, "max_stars_count": 0, "path": "salespersonTrackerREST/serializers.py" }, { "content": "from django.contrib.auth import password_validation\nfrom .models import Teacher, Project, Contributor, User\nfrom rest_framework import serializers\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = [\n \"id\",\n \"username\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"is_contributor\",\n \"is_teacher\",\n ]\n\n\nclass ContributorSerializer(serializers.ModelSerializer):\n user = UserSerializer(many=False, read_only=True)\n\n class Meta:\n model = Contributor\n fields = \"__all__\"\n\n\nclass TeacherSerializer1(serializers.ModelSerializer):\n user = UserSerializer(many=False, read_only=True)\n\n class Meta:\n model = Teacher\n fields = \"__all__\"\n\n\nclass AllProjectSerializer(serializers.ModelSerializer):\n # teacher = serializers.HyperlinkedIdentityField(\n # many=False, view_name=\"BEProjectsApp:teacher-detail\", read_only=True\n # )\n # contributor = serializers.HyperlinkedRelatedField(\n # many=True, view_name=\"api:contributor-detail\", read_only=True\n # )\n\n contributors = ContributorSerializer(many=True, read_only=True)\n teacher = TeacherSerializer1(many=False, read_only=True)\n\n class Meta:\n model = Project\n fields = \"__all__\"\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n contributors = ContributorSerializer(many=True, read_only=True)\n teacher = TeacherSerializer1(many=False, read_only=True)\n\n class Meta:\n model = Project\n exclude = [\n \"report\",\n \"executable\",\n \"github_repo\",\n \"demo_video\",\n \"company\",\n \"supervisor\",\n \"journal\",\n ]\n\n\nclass TeacherSerializer(serializers.ModelSerializer):\n # project = serializers.HyperlinkedRelatedField(\n # many=True, view_name=\"api:project-detail\", read_only=True\n # )\n project = ProjectSerializer(many=True, read_only=True)\n user = UserSerializer(read_only=False)\n\n url = serializers.HyperlinkedIdentityField(view_name=\"api:teacher-detail\")\n\n class Meta:\n model = Teacher\n fields = (\"pk\", \"url\", \"subject\", \"project\", \"user\")\n\n # def create(self, validated_data):\n # user = User(\n # first_name=validated_data[\"user\"][\"first_name\"],\n # last_name=validated_data[\"user\"][\"last_name\"],\n # email=validated_data[\"user\"][\"email\"],\n # username=validated_data[\"user\"][\"username\"],\n # )\n # user.set_password(validated_data[\"user\"][\"password\"])\n # user.save()\n # teacher = Teacher(user=user, subject=validated_data[\"subject\"])\n # teacher.save()\n # return teacher\n\n\nclass LoginSerializer(serializers.Serializer):\n username = serializers.CharField(max_length=10)\n password = serializers.CharField(style={\"input_type\": \"password\"})\n\n\nclass UpdateProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = \"__all__\"\n\n\nclass UpdateProjectReportSerializer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = [\"report\"]\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n current_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def validate_new_password(self, value):\n password_validation.validate_password(value)\n return value\n", "id": "2708214", "language": "Python", "matching_score": 4.2262420654296875, "max_stars_count": 7, "path": "BEProjectsApp/serializers.py" }, { "content": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import AbstractUser\nfrom datetime import date\n\nDOMAIN_CHOICES = [\n (\"Data Mining and Analytics\", (\"Data Mining and Analytics\")),\n (\"Machine Learning\", (\"Machine Learning\")),\n (\"Deep Learning\", (\"Deep Learning\")),\n (\"Image Processing / Computer Vision\", (\"Image Processing / Computer Vision\")),\n (\n \"Natural Language Processing / Artificial Intelligence\",\n (\"Natural Language Processing / Artificial Intelligence\"),\n ),\n (\"Networking / Security\", (\"Networking / Security\")),\n (\"Internet of Things(IOT)\", (\"Internet of Things(IOT)\")),\n (\"Mobile Computing\", (\"Mobile Computing\")),\n (\"Big Data\", (\"Big Data\")),\n (\"Cloud Computing\", (\"Cloud Computing\")),\n (\n \"Computer Vision and Artificial Intelligence\",\n (\"Computer Vision and Artificial Intelligence\"),\n ),\n (\"Blockchain\", (\"Blockchain\")),\n (\"Operating Systems\", (\"Operating Systems\")),\n (\"GAN's\", (\"GAN's\")),\n (\"Audio Processing\", (\"Audio Processing\")),\n (\"Video Processing\", (\"Video Processing\")),\n (\"Cryptography\", (\"Cryptography\")),\n]\n\nYEAR_CHOICES = [(\"FE\", (\"FE\")), (\"SE\", (\"SE\")), (\"TE\", (\"TE\")), (\"BE\", (\"BE\"))]\n\nDIVISION_CHOICES = [(\"A\", (\"A\")), (\"B\", (\"B\"))]\n\n\nclass User(AbstractUser):\n is_contributor = models.BooleanField(default=False)\n is_teacher = models.BooleanField(default=False)\n\n\nclass Teacher(models.Model):\n user = models.OneToOneField(\n User, related_name=\"teacher_user\", on_delete=models.CASCADE, primary_key=True\n )\n\n # additional attributes\n subject = models.CharField(max_length=150)\n\n @property\n def user__username(self):\n return self.user.username\n\n @property\n def user__email(self):\n return self.user.email\n\n @property\n def user__first_name(self):\n return self.user.first_name\n\n @property\n def user__last_name(self):\n return self.user.last_name\n\n def __str__(self):\n return self.user.get_full_name()\n\n\nclass Contributor(models.Model):\n user = models.OneToOneField(\n User,\n related_name=\"contributor_user\",\n on_delete=models.CASCADE,\n primary_key=True,\n )\n\n year = models.CharField(\n choices=YEAR_CHOICES, default=\"None\", null=False, blank=False, max_length=3\n )\n division = models.CharField(\n choices=DIVISION_CHOICES, default=\"None\", null=False, blank=False, max_length=2\n )\n github_id = models.URLField(blank=True)\n\n @property\n def user__username(self):\n return self.user.username\n\n @property\n def user__email(self):\n return self.user.email\n\n @property\n def user__first_name(self):\n return self.user.first_name\n\n @property\n def user__last_name(self):\n return self.user.last_name\n\n def __str__(self):\n return self.user.get_full_name()\n\n\nclass Project(models.Model):\n # Project title\n title = models.CharField(max_length=100, null=False, blank=False)\n\n # ID of the teacher mentoring the project\n teacher = models.ForeignKey(Teacher, on_delete=models.SET_NULL, null=True)\n\n # Project description\n description = models.TextField(null=False, blank=False)\n\n # Project abstract\n abstract = models.TextField(null=False, blank=False)\n\n # Year created\n year_created = models.PositiveSmallIntegerField(\n null=False, blank=False, default=int(str(date.today())[:4])\n )\n\n # Contributor year\n contributor_year = models.CharField(\n choices=YEAR_CHOICES, default=\"None\", null=False, blank=False, max_length=3\n )\n\n # Is this a BE project?\n is_BE_project = models.BooleanField(null=False, blank=False, default=False)\n\n # Project ID (only for BE projects) - Format: <Year_GroupID> eg. 2020_69\n BE_project_id = models.CharField(max_length=10, default=\"None\")\n\n # Domain list\n domain = models.CharField(\n choices=DOMAIN_CHOICES, default=\"None\", null=False, blank=False, max_length=100\n )\n\n # PDF to be uploaded\n report = models.FileField(null=True, blank=True)\n\n # Executable to be uploaded\n executable = models.FileField(null=True, blank=True)\n\n # GitHub repo link\n github_repo = models.CharField(max_length=200, null=True, blank=True, default=\"\")\n\n # URL of the video demo\n demo_video = models.CharField(max_length=200, null=True, blank=True, default=\"\")\n\n # To check whether project is approved or not\n approved = models.BooleanField(default=False)\n\n # Boolean field to check whether the project is inhouse or outhouse\n is_inhouse = models.BooleanField(null=False, blank=False, default=True)\n\n awards = models.TextField(null=True, blank=True, default=\"None\")\n\n # Property of an outhouse project\n company = models.CharField(max_length=100, null=True, blank=True, default=\"None\")\n\n supervisor = models.CharField(max_length=100, null=True, blank=True, default=\"None\")\n\n journal = models.CharField(max_length=100, null=True, blank=True, default=\"None\")\n\n contributors = models.ManyToManyField(Contributor)\n\n def publish(self):\n self.approved = True\n self.save()\n\n def __str__(self):\n return self.title\n", "id": "4712554", "language": "Python", "matching_score": 3.5508148670196533, "max_stars_count": 7, "path": "BEProjectsApp/models.py" }, { "content": "from django_filters.rest_framework import DjangoFilterBackend\nimport django_filters\nfrom .models import *\n\n\nclass BrowseProjectFilter(DjangoFilterBackend):\n def filter_queryset(self, request, queryset, view):\n filter_class = self.get_filter_class(view, queryset)\n\n if filter_class:\n return filter_class(\n request.query_params, queryset=queryset, request=request\n ).qs\n return queryset\n\n\nclass ProjectFilter(django_filters.FilterSet):\n description = django_filters.CharFilter(lookup_expr=\"icontains\")\n teacher__user__username = django_filters.CharFilter(lookup_expr=\"icontains\")\n contributors__user__username = django_filters.CharFilter(lookup_expr=\"icontains\")\n\n class Meta:\n model = Project\n fields = (\n \"domain\",\n \"approved\",\n \"year_created\",\n \"teacher__user__id\",\n \"is_inhouse\",\n \"contributors__user__username\",\n \"is_BE_project\",\n \"contributor_year\",\n )\n", "id": "11941252", "language": "Python", "matching_score": 0.7616775631904602, "max_stars_count": 7, "path": "BEProjectsApp/filters.py" }, { "content": "from django.contrib import admin\nfrom django.urls import include, path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\nfrom rest_framework import routers\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r\"daily_target\", views.DailyTargetView, basename=\"daily_target\")\nrouter.register(r\"bill\", views.BillView, basename=\"bill\")\nrouter.register(r\"manager\", views.ManagerViewSet)\nrouter.register(r\"user\", views.UserViewSet)\nrouter.register(r\"salesperson\", views.SalespersonViewSet)\nrouter.register(r\"warehouse\", views.WarehouseViewSet)\nrouter.register(r\"inventory\", views.InventoryViewSet)\n# router.register(r'targets_completed', TargetsCompletedView)\n\n\nurlpatterns = [\n path(\"SignIn\", views.SignIn, name=\"SignIn\"),\n # path('VerifyChangePassword/<slug:timestamp/<slug:username>/',views.VerifyChangePassword,name='VerifyChangePassowrd'),\n path(\"ChangePassword\", views.ChangePassword, name=\"ChangePassword\"),\n path(\"Logout\", views.Logout, name=\"Logout\"),\n path(\"AddSalesperson\", views.AddSalesperson.as_view(), name=\"AddSalesPerson\"),\n path(\"GetCoordinates\", views.GetCoordinates.as_view(), name=\"GetCoordinates\"),\n path(\n \"GetSalespersonData\", views.SalespersonData.as_view(), name=\"GetSalespersonData\"\n ),\n # path(\"Test\", views.Test, name=\"Test\"),\n # path(\"accept\", views.accept, name=\"accept\"),\n # path('daily_target',DailyTargetView.as_view(),name='daily_target'),\n # path('bill', BillView.as_view(),name='bill'),\n path(\n \"Salesperson/<int:pk>/Add\",\n views.AddToInventory.as_view(),\n name=\"AddToInventory\",\n ),\n path(\"InventoryList\", views.InventoryList.as_view(), name=\"InventoryList\"),\n path(\"ManagerPopulate\", views.ManagerPopulate, name=\"ManagerPopulate\"),\n path(\"SalespersonPopulate\", views.SalespersonPopulate, name=\"SalespersonPopulate\"),\n # path(\"UpdateCoordinates\", views.UpdateCoordinates.as_view(), name=\"UpdateCoordinates\"),\n path(\"warehouse\", views.WarehouseView.as_view(), name=\"WarehouseView\"),\n path(\"ItemAssign\", views.ItemAssignView.as_view(), name=\"ItemAssignView\"),\n]\n\nurlpatterns += router.urls\n", "id": "3038469", "language": "Python", "matching_score": 1.238577961921692, "max_stars_count": 4, "path": "salespersonTrackerREST/urls.py" }, { "content": "from django.contrib import admin\nfrom .models import Teacher, Project, Contributor, User\nfrom django.contrib.auth.admin import UserAdmin\nfrom datetime import date\n\nfrom datetime import date\n\n\nclass UserAdmin(UserAdmin):\n\n fieldsets = UserAdmin.fieldsets + (\n (None, {\"fields\": (\"is_contributor\", \"is_teacher\")}),\n )\n\n\n# Register your models here.\n\n\nadmin.site.register(Teacher)\nadmin.site.register(Project)\nadmin.site.register(Contributor)\nadmin.site.register(User, UserAdmin)\n", "id": "1225949", "language": "Python", "matching_score": 0.5173519253730774, "max_stars_count": 7, "path": "BEProjectsApp/admin.py" } ]
1.805157
SurajAlamoni
[ { "content": "# PyPassword Generator\nimport random\nimport streamlit as st\n\nalphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\nnumbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\nchars = ['%','!','#','$','*']\n\nvalue = int(input(\"how many alphabets do u want\"))\nnumbs = int(input(\"how many numbers do u want\"))\nch = int(input(\"how many symbols do u want\"))\n\n# value = 4\n# numbs = 2\n# ch = 2\n\npassword = []\nfor n in range(1,value+1):\n password += random.choice(alphabets)\n\nfor n in range(1,numbs+1):\n password += random.choice(numbers)\n\nfor n in range(1,ch+1):\n password += random.choice(chars)\n\nrandom.shuffle(password)\nprint(password)\n\nfinal = \"\"\nfor c in password:\n final += c\n \nst.write(final)\n", "id": "3517508", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "PasswordGenerator.py" } ]
0
KanoComputing
[ { "content": "{\n \"targets\": [\n {\n \"target_name\": \"GRT\",\n \"cflags_cc\": [\"-std=c++11\", \"-fexceptions\", \"-frtti\"],\n \"sources\": [\"src/GRT.cc\", \"src/NodeTimeSeriesClassificationData.cc\", \"src/NodeDTW.cc\", \"src/NodeHMM.cc\", \"src/NodeKMeansQuantizer.cc\"],\n \"include_dirs\" : [\n \"<!(node -e \\\"require('nan')\\\")\",\n \t \t\t\t\"./include\",\n\t\t\t],\n \"conditions\": [\n ['OS==\"mac\"',\n {\n \"libraries\": [\n \"<!(pwd)/lib/libgrt.a\"\n ],\n \"xcode_settings\": {\n \"GCC_ENABLE_CPP_EXCEPTIONS\": \"YES\",\n \"GCC_ENABLE_CPP_RTTI\": \"YES\",\n },\n }\n ],\n ['OS==\"mac\" and <!(node -e \"console.log(parseInt(os.release()) > 15 ? 1 : 0)\")',\n {\n \"xcode_settings\": {\n \"OTHER_CFLAGS\": [\"-std=c++11\", \"-stdlib=libc++\"]\n }\n }\n ],\n ['OS==\"win\"',\n {\n \"libraries\": [\n \"<!(cd)/lib/grt.lib\",\n ],\n \"msvs_settings\": {\n \"VCCLCompilerTool\": {\n \"RuntimeTypeInfo\": 'true',\n 'RuntimeLibrary': 'false',\n \"AdditionalOptions\": [\n \"/D NOMINMAX\",\n \"/D GRT_STATIC_LIB\",\n ],\n },\n }\n }\n ]\n ]\n }\n ],\n}\n", "id": "6494748", "language": "Python", "matching_score": 0, "max_stars_count": 4, "path": "binding.gyp" } ]
0
chandrakanth-jp
[ { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: chandrakanth\n\"\"\"\n\nimport os\nimport numpy as np\nfrom utilities_motion_corruption import load_image, manipulate_kspace_columns, image_reconstruction, write_image, generate_motion_trajectory\n\n\ninput_dir = '/directory' # Enter path to directory containing dicom image volumes, each image volume is expected to be in a separate directory\n\nfor root,dirnames,filenames in os.walk(input_dir):\n for image in sorted(filenames):\n filepath = os.path.join(root, image)\n if '.dcm' in filepath: \n image, array = load_image(filepath)\n imagesize = image.GetSize()\n print(filepath)\n print(imagesize)\n motion_table=generate_motion_trajectory(imagesize)\n np.save(root+'/motion_table',motion_table)\n corrupted_data = manipulate_kspace_columns(motion_table,filepath,imagesize)\n corrupted_image = image_reconstruction(corrupted_data)\n write_image(root,corrupted_image)\n \n", "id": "507199", "language": "Python", "matching_score": 0, "max_stars_count": 7, "path": "motion_corruption.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: chandrakanth\n\"\"\"\n\nimport numpy as np\nimport SimpleITK as sitk\nfrom nibabel import quaternions\nimport random\n\n\ndef load_image(filepath):\n \"\"\" Loads an 3D image\n Args:\n param1: path to the image file\n \n Returns:\n The image volume, corresponding numpy array \n \"\"\"\n input_img = sitk.ReadImage(filepath)\n input_array = sitk.GetArrayFromImage(input_img)\n return input_img, input_array\n\ndef affine_translate(transform, x_translation, y_translation,z_translation): \n \"\"\" Creates a transform for 3D translation\n \n Args:\n param1: 3D transform object\n param2: translation in x-axis\n param3: translation in y-axis\n param4: translation in z-axis\n \n Returns:\n Affine translation object for 3D translation\n \"\"\"\n new_transform = sitk.AffineTransform(transform)\n new_transform.SetTranslation((x_translation, y_translation,z_translation))\n return new_transform\n\ndef affine_rotate(transform, rotation_matrix,dimension=3):\n \"\"\" Creates a transform object for 3D rotation, combines 3D translation and rotation transforms\n \n Args:\n param1: Affine translation object for 3D translation\n param2: rotation_matrix for 3D rotation\n param3: dimension of transformation\n \n Returns: \n Transformation object which combines 3D translation and rotation\n \"\"\"\n \n matrix = np.array(transform.GetMatrix()).reshape((dimension,dimension))\n new_matrix = np.dot(rotation_matrix,matrix)\n transform.SetMatrix(new_matrix.ravel())\n return transform\n\ndef create_transform(table,slice,row):\n \"\"\" Iterates through the motion data table and generates transformation matrices\n \n Args:\n param1: motion_trajectory_data (numpy array)\n param2: slice number\n param3: row number (k-space line)\n \n Returns:\n transformation object corresponding to a 4x4 transformation matrix\n \"\"\"\n shift = table[slice,row,0]\n angle = (table[slice,row,1])\n axis = table[slice,row,2]\n rot_matrix = quaternions.angle_axis2mat(angle,axis)\n affine_transform = affine_translate(sitk.AffineTransform(3), shift[0], shift[1], shift[2])\n combined_transform = affine_rotate(affine_transform, rot_matrix)\n return combined_transform\n\ndef affine_transformation(input_img, transform_matrix):\n \"\"\" Performs affine(rigid) transformation on the input image volume\n \n Args:\n param1: input image volume\n param2: transformation object corresponding to a 4x4 transformation matrix\n \n Returns:\n Transformed image\n \n \"\"\"\n center=input_img.TransformContinuousIndexToPhysicalPoint((input_img.GetSize()[0]/2,input_img.GetSize()[1]/2,input_img.GetSize()[2]/2))\n transform_matrix.SetCenter(center)\n transformed_image = sitk.Resample(input_img,input_img.GetSize(),transform_matrix,sitk.sitkLinear,input_img.GetOrigin(),input_img.GetSpacing(), input_img.GetDirection())\n return transformed_image\n\n\n\n \ndef manipulate_kspace_columns(table,filepath,imagesize):\n \"\"\" Generates a k-space corresponding to a motion corrupted acquisition by of merging the k-space lines\n from the transfromed image with that of the original uncorrupted image\n \n Args:\n param1: motion trajectory data/motion table (numpy array)\n param2: file path of the image volume\n param3: size of the input image\n \n Returns:\n motion corrupted 3D k-space\n \n \"\"\"\n \n input_img, input_array = load_image(filepath) # Load image and array\n \n print(\"Generating K-Space of original image\")\n print('-'*30)\n \n img_fft = np.fft.fftn(input_array,axes=(-2,-1))\n img_fft = np.fft.fftshift(img_fft,axes=(-2, -1))\n print(\"K-Space Generated\") \n\n print(\"Image transformations and K-Space manipulation....\")\n\n for slice in range(np.shape(table)[0]):\n for row in range(np.shape(table)[1]):\n shift = table[slice,row,0]\n\n if np.sum(abs(shift[0]+shift[1]+shift[2]))!=0:\n transform = create_transform(table,slice,row)\n transformed_img = affine_transformation(input_img, transform)\n transformed_array = sitk.GetArrayFromImage(transformed_img)\n\n coil1_dist_array = transformed_array*np.ones((imagesize[1],imagesize[0]))\n coil1_dist_kspace = np.fft.fft2(coil1_dist_array[slice])\n coil1_dist_kspace = np.fft.fftshift(coil1_dist_kspace,axes=(-2, -1))\n img_fft[slice,:,row] = coil1_dist_kspace[:,row] # Substitute Original K-space lines \n \n \n if slice % 10 == 0:\n print('Done: {0}/{1} Slices'.format(slice, np.shape(input_array)[0]))\n print(\"K-Space manipulation complete\")\n return img_fft \n\n\n\n\ndef image_reconstruction(corrupted_k_space):\n \"\"\"Reconstructs motion corrupted 3D image from corrupted k-space\n \n Args:\n param1: 3D corrupted k-space\n \n Returns:\n Motion corrupted 3D image\n \"\"\"\n img_array=np.zeros((np.shape(corrupted_k_space)))\n for i in range(np.shape(corrupted_k_space)[0]):\n img = np.fft.ifft2(corrupted_k_space[i])\n img = np.abs(img)\n img_array[i] = img\n return img_array\n\ndef write_image(folder, image):\n \"\"\" Writes image to directory\n \n Args:\n param1: path to image folder\n param2: image \n \n \"\"\"\n coil1_img = sitk.GetImageFromArray(image)\n castFilter = sitk.CastImageFilter()\n castFilter.SetOutputPixelType(sitk.sitkUInt16)\n corrupted_img = castFilter.Execute(coil1_img)\n sitk.WriteImage(corrupted_img, folder+\"/corrupted_image.dcm\")\n \ndef generate_motion_trajectory(imagesize, x_shift=4, y_shift=4, z_shift=0, rotation=np.pi/60 ):\n \"\"\" Generates a 3D random motion trajectory, each row specifies the transformation parameters\n for each k-space line\n \n Args:\n param1: image size(tuple)\n param2: maximum shift in x-axis in mm\n param3: maximum shift in y-axis in mm\n param3: maximum shift in z-axis in mm\n param4: maximum angle of rotation \n \n Returns:\n Numpy array, motion trajectory data\n \"\"\"\n motion_table = np.array([np.zeros(3),np.zeros(1),np.zeros(3)]) # creating a template for entering parmeter values for affine transformation\n trajectory = [[motion_table],]*imagesize[2]*imagesize[0]\n trajectory = np.array(trajectory)\n trajectory = np.reshape(trajectory,(imagesize[2],imagesize[0],3))\n \n trajectory[:,:,2] = trajectory[:,:,2]+1 # Adding 1 to avoid creation of invalid transformation \n for slice in range(imagesize[2]):\n num = list(range(0,int(imagesize[0]/2-10))) + list(range(int(imagesize[0]/2+10),imagesize[0])) \n motion_events = np.random.randint(2,high=6)\n k_space_lines = random.sample(num,motion_events) # Select random k-space lines for manipulation of raw-data\n for row in k_space_lines:\n shift = np.array([np.random.normal(loc=0,scale=x_shift),np.random.normal(loc=0,scale=y_shift),z_shift])\n rot = np.random.normal(loc=0, scale=rotation)\n axes =np.array([0,0,1])\n \n trajectory[slice,row,0] = shift\n trajectory[slice,row,1] = rot\n trajectory[slice,row,2] = axes\n \n return trajectory\n \n \n\n ", "id": "10852857", "language": "Python", "matching_score": 0, "max_stars_count": 7, "path": "utilities_motion_corruption.py" } ]
0
hunkimForks
[ { "content": "import os\nimport json\nimport glob\nimport time\n \nimport mxnet as mx\nimport gluonnlp as nlp\n\nfrom gluonnlp.data import SentencepieceTokenizer\nfrom kogpt2.model.gpt import GPT2Model as MXGPT2Model\nfrom kogpt2.utils import get_tokenizer\n\ndef get_kogpt2_model(model_file,\n vocab_file,\n ctx=mx.cpu(0)):\n vocab_b_obj = nlp.vocab.BERTVocab.from_sentencepiece(vocab_file,\n mask_token=None,\n sep_token=None,\n cls_token=None,\n unknown_token='<unk>',\n padding_token='<pad>',\n bos_token='<s>',\n eos_token='</s>')\n mxmodel = MXGPT2Model(units=768,\n max_length=1024,\n num_heads=12,\n num_layers=12,\n dropout=0.1,\n vocab_size=len(vocab_b_obj))\n mxmodel.load_parameters(model_file, ctx=ctx)\n \n return (mxmodel, vocab_b_obj)\n\ndef model_fn(model_dir): \n voc_file_name = glob.glob('{}/*.spiece'.format(model_dir))[0]\n model_param_file_name = glob.glob('{}/*.params'.format(model_dir))[0]\n \n # check if GPU is available\n if mx.context.num_gpus() > 0:\n ctx = mx.gpu()\n else:\n ctx = mx.cpu()\n \n model, vocab = get_kogpt2_model(model_param_file_name, voc_file_name, ctx)\n tok = SentencepieceTokenizer(voc_file_name)\n \n return model, vocab, tok, ctx\n\ndef transform_fn(model, request_body, content_type, accept_type):\n model, vocab, tok, ctx = model\n \n sent = request_body.encode('utf-8')\n sent = sent.decode('unicode_escape')[1:]\n sent = sent[:-1]\n toked = tok(sent)\n \n t0 = time.time()\n inference_count = 0\n while 1:\n input_ids = mx.nd.array([vocab[vocab.bos_token]] + vocab[toked]).expand_dims(axis=0)\n pred = model(input_ids.as_in_context(ctx))[0]\n gen = vocab.to_tokens(mx.nd.argmax(pred, axis=-1).squeeze().astype('int').asnumpy().tolist())[-1]\n if gen == '</s>':\n break\n sent += gen.replace('▁', ' ')\n toked = tok(sent)\n inference_count += 1\n \n response_body = json.dumps([sent, inference_count, time.time() - t0])\n \n return response_body, content_type", "id": "11939175", "language": "Python", "matching_score": 0, "max_stars_count": 24, "path": "gpt2-inference.py" }, { "content": "import csv\nimport sys\nimport gzip\nimport os\nimport csv\nimport numpy as np\nimport random\n\n\"\"\"\nNote:\nUse this script to generate the necessary data sets to run udc_predict/test/train.py scripts.\n\nCommand:\n$python [Command] input_file_path [output_file_path_if_required]\n\n1. Process the raw FB export so that each line consists of a \"context\" and \"utterance.\"\n$python prepare_fb_data.py process_raw_export raw_export_file_path output_file_path\nex) $python ./scripts/prepare_fb_data.py process_raw_export ./data/tf.txt.gz ./data/tf_processed.csv\n\n2. Generate training/test/valid sets using the processed data from the above step\n$python prepare_fb_data.py generate_data_sets processed_file_path\nex) $python ./scripts/prepare_fb_data.py generate_data_sets ./data/tf_processed.csv\n\"\"\"\n\nPROCESS_RAW_EXPORT = 'process_raw_export'\nGENERATE_DATA_SETS = 'generate_data_sets'\n\n# NOTE: we don't make use of num_likes at all\ndef _parse_info_from_line(line):\n line = line.decode('utf-8')\n raw = line.split(\"\\t\")\n post = {'num_likes': raw[0], 'body': raw[1].rstrip()}\n raw_comments = raw[2:]\n comments = []\n for i in range(0, len(raw_comments), 2):\n comments.append({'num_likes': raw_comments[i], 'body': raw_comments[i+1].rstrip()})\n return {'post': post, 'comments': comments}\n\n# Process the raw data so each row contains a post and its reply.\n# Note that \"post\" == \"context\" and \"reply\" == \"utterance\".\ndef process_raw_export(filename, output):\n with gzip.open(filename) as f1:\n with open(output, 'w') as f2:\n fieldnames = ['Context', 'Utterance']\n writer = csv.DictWriter(f2, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n\n # NOTE: if a post has many comments, we add multiple rows with the same post body\n for line in f1:\n info = _parse_info_from_line(line)\n for comment in info['comments']:\n writer.writerow({\n 'Context': info['post']['body'].encode('utf-8'),\n 'Utterance': comment['body'].encode('utf-8')\n })\n\n# With the processed_data from above, we have to make train.csv, test.csv, and valid.csv.\n# Let's separate it so that the ratio is 2:1:1 respectively.\n# columns for train.csv: [Context, Utterance, Label]\n# columns for test/valid.csv: [Context, Utterance, Distractor_0, .., Distractor_n]\ndef generate_data_sets(path_to_processed_data):\n with open(path_to_processed_data) as f1:\n rows = [r.decode('utf-8') for r in f1.read().splitlines()[1:]]\n rows = np.array(rows)\n np.random.shuffle(rows)\n\n # NOTE: we don't care if they are not exactly separated four ways -- close enough\n four_way_separated = np.array_split(rows, 4)\n train_rows = four_way_separated[0].tolist() + four_way_separated[1].tolist()\n test_rows = four_way_separated[2].tolist()\n valid_rows = four_way_separated[3].tolist()\n make_train_set(train_rows, './data/train_set.csv')\n make_test_set(test_rows, './data/test_set.csv')\n make_valid_set(valid_rows, './data/valid_set.csv')\n\n# Get all the rows that do not have the given context\n# i.e. get all the other posts\ndef _get_rows_with_different_context(context, rows):\n rows_with_different_context = []\n for row in rows:\n row_info = row.split('\\t')\n if row_info[0] != context:\n rows_with_different_context.append(row)\n return rows_with_different_context\n\n# Input: [Context, Utterance]\n# Output: [Context, Utterance, Label]\n# Label == 1 if Utterance belongs to the right Context\n# Label == 0 otherwise.\n# Make one set correctly labeled and the other set incorrectly labeled\n# The output's size will be twice the input size\ndef make_train_set(train_rows, output):\n correct_rows = ['\\t'.join([r, '1']) for r in train_rows]\n\n # assign a wrong utterance for each context\n incorrect_rows = []\n for row in train_rows[:]:\n row_info = row.split('\\t')\n differently_contexted_rows = _get_rows_with_different_context(row_info[0], train_rows)\n random_row = random.choice(differently_contexted_rows)\n wrong_utterance = random_row.split('\\t')[1]\n row_info[1] = wrong_utterance\n row_info.append('0')\n incorrect_rows.append('\\t'.join(row_info))\n\n combined = correct_rows + incorrect_rows\n np.random.shuffle(combined)\n\n with open(output, 'w') as f2:\n fieldnames = ['Context', 'Utterance', 'Label']\n writer = csv.DictWriter(f2, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n\n for row in combined:\n row_info = row.split('\\t')\n writer.writerow({\n 'Context': row_info[0].encode('utf-8'),\n 'Utterance': row_info[1].encode('utf-8'),\n 'Label': row_info[2].encode('utf-8'),\n })\n\n# Input: [Context, Utterance]\n# Output: [Context, Utterance, Distractor_0, Distractor_1, Distractor_2]\ndef make_test_set(test_rows, output, num_distractors=3):\n output_rows = []\n\n for row in test_rows:\n row_info = row.split('\\t')\n differently_contexted_rows = _get_rows_with_different_context(row_info[0], test_rows)\n distractor_rows = random.sample(differently_contexted_rows, num_distractors)\n distractor_utterances = [r.split('\\t')[1] for r in distractor_rows]\n test_row = row_info + distractor_utterances\n output_rows.append('\\t'.join(test_row))\n\n with open(output, 'w') as f2:\n fieldnames = ['Context', 'Utterance'] + [('Distractor_%d' % i) for i in range(num_distractors)]\n writer = csv.DictWriter(f2, fieldnames=fieldnames, delimiter='\\t')\n writer.writeheader()\n\n for row in output_rows:\n row_info = row.split('\\t')\n writer.writerow({\n 'Context': row_info[0].encode('utf-8'),\n 'Utterance': row_info[1].encode('utf-8'),\n 'Distractor_0': row_info[2].encode('utf-8'),\n 'Distractor_1': row_info[3].encode('utf-8'),\n 'Distractor_2': row_info[4].encode('utf-8')\n })\n\n# Note: redundant wrapper to express that we make valid_set and test_set the same way\ndef make_valid_set(valid_rows, output, num_distractors=3):\n make_test_set(valid_rows, output, num_distractors)\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if (cmd == PROCESS_RAW_EXPORT):\n print('Processing the raw export: ' + sys.argv[2])\n process_raw_export(sys.argv[2], sys.argv[3])\n elif (cmd == GENERATE_DATA_SETS):\n print('Generating data sets with: ' + sys.argv[2])\n generate_data_sets(sys.argv[2])\n", "id": "7639492", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "scripts/prepare_fb_data.py" } ]
0
layal20-meet
[ { "content": "import turtle\nturtle.goto(0,0)\n\nUP = 0\nRIGHT =1 \ndirection = None\ndef up():\n global direction\n print(\"you pressed the up key\")\n direction = UP\n on_move()\n\ndef right():\n print(\"you pressed the right key\")\n global direction\n direction = RIGHT\n on_move()\nturtle.listen()\n\n", "id": "6512314", "language": "Python", "matching_score": 1.372941493988037, "max_stars_count": 0, "path": "meet-coding/fun2.py" }, { "content": "import turtle\nturtle.penup()\nturtle.goto(-200,-100)\nturtle.pendown()\n#draw the M\nturtle.goto(-200,-100+200)\nturtle.goto(-200+50,-100)\nturtle.goto(-200+100,-100+200)\nturtle.goto(-200+100,-100)\n#draw the E\nturtle.penup()\nturtle.goto(-200+150,-100+200)\nturtle.pendown()\nturtle.goto(-200+150,-100)\nturtle.goto(+50,-100)\nturtle.penup()\nturtle.goto(-200+150,0)\nturtle.pendown()\nturtle.goto(-200+250,0)\nturtle.mainloop()\n\n\n", "id": "2976066", "language": "Python", "matching_score": 1.208735466003418, "max_stars_count": 0, "path": "meet-coding/MEETinTurtle.py" }, { "content": "import turtle\nimport random\n\nt = turtle.clone()\n\nwords = ('entrepreneurship','meet','nature','global warming','animals','recycle','tree','plastic', ' pollution')\nunderscore_position = []\n\n#user_word_input = input('enter a letter')\n#user_input = input()\nt.penup()\nt.goto(-400,0)\n#functions here\n#secret_word chooses a random word\nsecret_word = random.choice(words)\nprint(secret_word)\ndef words_length_underscore():\n for letter in secret_word:\n underscore_position.append(t.pos())\n t.pendown()\n t.forward(20)\n t.penup()\n t.forward(20)\n print(underscore_position)\nwords_length_underscore()\n\nturtle.penup()\nturtle.goto (400,-300)\nturtle.pendown()\n\n#hanger\nturtle.goto (400, -300)\nturtle.goto (300, -300)\nturtle.goto (300, 300)\nturtle.goto (0, 300)\nturtle.goto (0, 220)\nglobal guess\nguess = input('what letter would you like to guess')\n\ndef head():\n#head\n\tturtle.circle(-50, 360)\ndef body():\n#for body\n\tturtle.penup()\n\tturtle.goto (0, 120)\n\tturtle.pendown()\n\t#body\n\tturtle.goto (0, -110)\ndef hand1():\n#for hand1\n\tturtle.penup()\n\tturtle.goto (0, 120)\n\tturtle.pendown()\n#hand1\n\tturtle.goto (50, 40)\ndef hand2():\n#for hand2\n\tturtle.penup()\n\tturtle.goto (0,120)\n\tturtle.pendown()\n\n\t#hand2\n\tturtle.goto (-50, 40)\ndef leg1():\n#for leg1\n\tturtle.penup ()\n\tturtle.goto (0, -110)\n\tturtle.pendown ()\n\n#leg1\n\tturtle.goto (50, -190)\ndef leg2():\n#for leg2\n\tturtle.penup()\n\tturtle.goto ( 0, -110)\n\tturtle.pendown()\n\n#leg2\n\tturtle.goto (-50, -190)\ndef eye1():\n#for eye1\n\tturtle.penup()\n\tturtle.goto (-20, 190)\n\tturtle.pendown()\n\n#eye1\n\tturtle.circle(-5, 360)\ndef eye2():\n#for eye2\n\tturtle.penup()\n\tturtle.goto (20, 190)\n\tturtle.pendown()\n\n#eye2\n\tturtle.circle(-5, 360)\n\ndef mouth():\n#for mouth\n\tturtle.penup()\n\tturtle.goto (-20, 145)\n\tturtle.pendown()\n\tturtle.right(-90)\n\tturtle.circle(-20, 180)\n\n#mouth_data = (-20, 145, -90, -20, 180)\n\n#without refactor:\n#draw_part_functions = [eye1, eye2]\n\ni = -1\ndef check_letter_index():\n#for letter in secret_word:\n global guess\n for index_of_letter in range(0,len(secret_word)):\n letter = secret_word[index_of_letter]\n print('checking letter: '+letter)\n if letter == guess:\n position_tuple_to_go_to = underscore_position[index_of_letter]\n \n t.goto(position_tuple_to_go_to)\n t.write(letter)\n guess = input('enter another letter')\n elif letter != guess:\n head()\n guess =input()\n print('here')\n elif letter != guess:\n body()\n guess = input()\n elif letter != guess:\n hand1()\n guess = input('enter a letter')\n elif letter != guess: \n hand2()\n guess = input('enter a letter')\n elif letter != guess:\n leg1()\n guess = input('enter a letter')\n elif letter != guess:\n leg2()\n guess = input('enter a letter')\n elif letter != guess:\n eye1()\n guess = input('enter a letter')\n elif letter != guess:\n eye2()\n guess = input('enter a letter')\n elif letter != guess:\n mouth()\n guess = input('enter a letter')\n else:\n print('you`re right')\n \ndef printhi(thing_to_print):\n\tprint(thing_to_print)\ncheck_letter_index()\n\n\n", "id": "3908034", "language": "Python", "matching_score": 1.895796775817871, "max_stars_count": 0, "path": "meet-coding/layal20-hangman.py" }, { "content": "\nimport turtle, random\ndiretoria='/home/bcasaleiro/Documents/WorkPlace/Python/Hangman/wordlist.txt'\nstage=[0]\n\ndef wordlist():\n\tficheiro=open(diretoria, 'r')\n\tlst=ficheiro.readlines()\n\tficheiro.close()\n\treturn lst[random.randint(0,len(lst))]\n\t\ndef go_to(x, y, p):\n\tturtle.hideturtle()\n\tturtle.penup()\n\tturtle.goto(x,y)\n\tturtle.setheading(p)\n\tturtle.pendown()\n\t\ndef hang():\n\tturtle.speed(0)\n\tif stage[0]==0:\n\t\tgo_to(-300,0,0)\n\t\tturtle.forward(600)\n\t\tgo_to(-100,0, 90)\n\t\tturtle.forward(200)\n\t\tturtle.right(90)\n\t\tturtle.forward(100)\n\t\tturtle.right(90)\n\t\tturtle.forward(25)\n\telif stage[0]==1:\n\t\tgo_to(0, 150, 0)\n\t\tturtle.circle(12.5)\n\telif stage[0]==2:\n\t\tgo_to(0,150, -90)\n\t\tturtle.forward(50)\n\telif stage[0]==3:\n\t\tgo_to(0,140, -45)\n\t\tturtle.forward(25)\n\t\tgo_to(0,140, -135)\n\t\tturtle.forward(25)\n\telif stage[0]==4:\n\t\tgo_to(0,100, -45)\n\t\tturtle.forward(25)\n\t\tgo_to(0,100, -135)\n\t\tturtle.forward(25)\n\tstage[0]+=1\n\treturn 0\n\ndef spaces(word):\n\tl=len(word)\n\tif l %2 !=0:\n\t\tgo_to(-5-(l//2*20) - (l//2*10), -150, 0)\n\t\tfor i in range(l):\n\t\t\tturtle.forward(20)\n\t\t\tturtle.penup()\n\t\t\tturtle.forward(10)\n\t\t\tturtle.pendown()\n\telse:\n\t\tgo_to(-(l//2*20) - (l//2*10), -150, 0)\n\t\tfor i in range(l):\n\t\t\tturtle.forward(20)\n\t\t\tturtle.penup()\n\t\t\tturtle.forward(10)\n\t\t\tturtle.pendown()\n\ndef error(word, char):\n\tgo_to(-5-(len(word)//2*20) - (len(word)//2*10), -200, 0)\n\tturtle.penup()\n\tfor j in range(stage[0]):\n\t\tturtle.forward(20)\n\tturtle.pendown()\n\tturtle.write(char, align='center', font=(\"Arial\", 8, \"normal\"))\n\thang()\n\t\n\t\n\t\ndef point(word, char, i):\n\tgo_to(-5-(len(word)//2*20) - (len(word)//2*10), -150, 0)\n\tturtle.penup()\n\tfor j in range(i):\n\t\tturtle.forward(20)\n\t\tturtle.forward(10)\n\tturtle.forward(10)\n\tturtle.pendown()\n\tturtle.write(char, align='center', font=(\"Arial\", 24, \"normal\"))\n\ndef play(word, out):\n\tch=raw_input('Character? ')\n\tkey=''\n\tif ch in word:\n\t\tfor i in range(len(word)):\n\t\t\tif ch==word[i]:\n\t\t\t\tkey+=ch\n\t\t\t\tpoint(word, ch, i)\n\t\t\telse:\n\t\t\t\tkey+=out[i]\n\t\treturn key\n\telse:\n\t\terror(word, ch)\n\t\treturn out\n\ndef main():\n\tword=wordlist().strip('\\n').lower()\n\tprint(word)\n\tspaces(word)\n\tout=''\n\tfor i in range(len(word)):\n\t\tout+='_'\n\twhile out != word and stage[0]<=4:\n\t\tprint(out)\n\t\tout=play(word, out)\n\tif stage[0] > 4:\n\t\tprint('DEAD!')\n\t\tturtle.bgcolor('red')\n\t\tturtle.exitonclick()\n\telse:\n\t\tprint('CONGRATS!!!! The word was ' + word + '!')\n\t\tturtle.exitonclick()\n\nif __name__ == '__main__':\n main()\n\t\n", "id": "1475161", "language": "Python", "matching_score": 1.5908232927322388, "max_stars_count": 0, "path": "meet-coding/layal20-try.py" }, { "content": "import turtle\nloadWindow=turtle.screen()\nturtle.speed(2)\n\nfor i in range:\n turtle.circle(5*i)\n turtle.circle(-5*i)\n turtle.left(i)\nturtle.exitonclick()\n", "id": "4025942", "language": "Python", "matching_score": 0.9606548547744751, "max_stars_count": 0, "path": "meet-coding/layal20-spiral1.py" }, { "content": "import turtle\nbgcolor.('purple')\nlayal=turtle.Turtle()\nlayal.shape('circle')\nlayal.pencolor('yellow')\nlayal.left(90)\nlayal.forward(150)\nlayal.left(90)\nlayal.forward(75)\nlayal.pencolor(white)\nlayal.left(90)\nlayal.forward(150)\nturtle.mainloop\n", "id": "6177744", "language": "Python", "matching_score": 2.020195484161377, "max_stars_count": 0, "path": "meet-coding/funturtle.py" }, { "content": "from turtle import *\nspeed(0)\npencolor('white')\nbgcolor('black')\nx=0\nup()\nrt(45)\nfd(90)\nrt(135)\n\ndown()\nwhile x <120:\n fd(200)\n rt(61)\n fd(200)\n rt(61)\n fd(200)\n rt(61)\n fd(200)\n rt(61)\n fd(200)\n rt(61)\n fd(200)\n rt(61)\n\n rt(11.1111)\n x=x+1\nexitonclick()\nturtle.mainloop\n", "id": "9067082", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meet-coding/layal20-spiral.py" }, { "content": "strawberries=50\nweekend=True\n", "id": "4438740", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meet-coding/turtle_party.py" }, { "content": "language=\"italian\"\ntranslation=\"ciao mondo\"\nprint(\"the translation of hello world in\" + (language)+ \"is\" + (translation))\n \nlanguage= \" esperanto\"\ntranslation= \"saluton mondo\"\nprint(\"the translation of hello world in \" + (language) + \"is \" + (translation))\n", "id": "1093540", "language": "Python", "matching_score": 0.10173582285642624, "max_stars_count": 0, "path": "meet-coding/HelloWold.py" }, { "content": "my_name=input(\"what is your name?\")\nmy_name=my_name.capitalize()\nprint(\"hello there,\" + my_name)\nlength=\" 5 \"\nprint(\"your name is\" + length + \"letters long!\")\nprint(\"the first letter of your name is \" + my_name[0] + \" and the last letter of your name is \" + my_name[4])\n", "id": "8372410", "language": "Python", "matching_score": 1.4683270454406738, "max_stars_count": 0, "path": "meet-coding/greeting.py" }, { "content": "Python 3.6.5 (default, Apr 1 2018, 05:46:30) \n[GCC 7.3.0] on linux\nType \"copyright\", \"credits\" or \"license()\" for more information.\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nTraceback (most recent call last):\n File \"/home/student/greeting.py\", line 2, in <module>\n my_name=layal\nNameError: name 'layal' is not defined\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nTraceback (most recent call last):\n File \"/home/student/greeting.py\", line 2, in <module>\n name=layal\nNameError: name 'layal' is not defined\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,name\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,Layal\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,Layal\nyour name is 5letters long!\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,Layal\nyour name is 5 letters long!\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,Layal\nyour name is 5 letters long!\nthe first letter of your name isL and the last letter of your name isl\n>>> \n===================== RESTART: /home/student/greeting.py =====================\nwhat is your name?layal\nhello there,Layal\nyour name is 5 letters long!\nthe first letter of your name is L and the last letter of your name is l\n>>> var=\"my name is Claire!\"\n>>> print(var[21:27])\n\n>>> var=\"Hi there, my name is Claire. Nice to meet you!\"\n>>> print(var[21:27])\nClaire\n>>> \n", "id": "7385924", "language": "Python", "matching_score": 4.360788822174072, "max_stars_count": 0, "path": "meet-coding/my name.py" }, { "content": "Python 3.6.5 (default, Apr 1 2018, 05:46:30) \n[GCC 7.3.0] on linux\nType \"copyright\", \"credits\" or \"license()\" for more information.\n>>> a=['she', 'selld', 'sea', 'shells', 'by', 'the', 'sea', 'shore']\n>>> b=\"selfish shellfish\"\n>>> c=[1, 1, 2, 3, 5, 8, 13]\n>>> b[3:4]\n'f'\n>>> c[:-2]\n[1, 1, 2, 3, 5]\n>>> a[2]\n'sea'\n>>> a[2:4]\n['sea', 'shells']\n>>> c[1]\n1\n>>> c[1]+c[2]\n3\n>>> a[3]\n'shells'\n>>> \" \"+a[3]\n' shells'\n>>> a*3\n['she', 'selld', 'sea', 'shells', 'by', 'the', 'sea', 'shore', 'she', 'selld', 'sea', 'shells', 'by', 'the', 'sea', 'shore', 'she', 'selld', 'sea', 'shells', 'by', 'the', 'sea', 'shore']\n>>> 'self'in b\nTrue\n>>> one=[1,2,3,4]\n>>> two=[7,6,5,4]\n>>> three=[\"y1\", \"friends\", \"fun\"]\n>>> print(one=two)\nTraceback (most recent call last):\n File \"<pyshell#17>\", line 1, in <module>\n print(one=two)\nTypeError: 'one' is an invalid keyword argument for this function\n>>> print(one+two)\n[1, 2, 3, 4, 7, 6, 5, 4]\n>>> print(one[3])\n4\n>>> one.remove(4)\n>>> print(one)\n[1, 2, 3]\n>>> one.append(4)\n>>> print(one)\n[1, 2, 3, 4]\n>>> \n", "id": "7915584", "language": "Python", "matching_score": 1.2826429605484009, "max_stars_count": 0, "path": "meet-coding/list.py" }, { "content": "new_fruit=input('what fruit am I sorting?')\nif new_fruit==\"apple\":\n print('Bin 1')\nelif new_fruit==\"oranges\":\n print ('Bin 2')\nelif new_fruit==\"olives\":\n print('Bin 3')\nelse:\n print('Error! I do not recognize this fruit!')\n \n", "id": "7621846", "language": "Python", "matching_score": 0.5148708820343018, "max_stars_count": 0, "path": "meet-coding/fruit_sorter.py" }, { "content": "print(\"instrusctor Alex asks,\\\"what are you learning today?\\\" the students reply, \\\"we are learning how to print!\"\n", "id": "7369123", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meet-coding/specialCharacters.py" }, { "content": "indention=False\nif indention:\n print('chocolate')\n print('indentions are cool!')\n\n \n", "id": "8813998", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meet-coding/indentation.py" }, { "content": "\ndef add_number(start, end):\n c=0\n for number in range(start,end):\n c=c+number\n return c\n\ntest1 = add_number(333,777)\nprint(test1)\n\n \n \n \n", "id": "1227378", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meet-coding/fun1.py" }, { "content": "for i in range (100):\n count=i+1\n if count %3==0 and count%5==0:\n print(\"fizzbuzz\")\n elif count%3==0:\n print(\"fizz\")\n elif count%5==0:\n print(\"buzz\")\n else:\n print(count)\n", "id": "3120876", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meet-coding/fizzbuzz.py" } ]
0.960655
athob
[ { "content": "import numpy as np\nfrom scipy import interpolate,linalg\n\n\ndef cumsummedian(a,weights=None):\n \"\"\"\n Compute the weighted median.\n\n Returns the median of the array elements.\n\n Parameters\n ----------\n a : array_like, shape (n, )\n Input array or object that can be converted to an array.\n weights : {array_like, shape (n, ), None}, optional\n Input array or object that can be converted to an array.\n\n Returns\n -------\n median : float\n\n \"\"\"\n if weights is None:\n weights=np.ones(np.array(a).shape)\n A = np.array(a).astype('float')\n W = np.array(weights).astype('float')\n if not(np.product(np.isnan(A))):\n I = np.argsort(A)\n cumweight = np.hstack([0,np.cumsum(W[I])])\n X = np.hstack([0,(cumweight[:-1]+cumweight[1:])/(2*cumweight[-1]),1])\n Y = np.hstack([np.min(A),A[I],np.max(A)])\n P = interpolate.interp1d(X,Y)(0.5)\n return float(P)\n else:\n return np.nan\n\ndef center_and_unloop(XYZ,XYZ0,BoxL=np.inf):\n \"\"\"\n Center and unloop the input coordinates.\n\n Returns the centered and unlooped coordinates.\n\n Parameters\n ----------\n XYZ : array_like of dtype float, shape (n, 3)\n Particles coordinates (in unit of length L) such that XYZ[:,0] = X,\n XYZ[:,1] = Y & XYZ[:,2] = Z\n XYZ0 : array_like of dtype float, shape (3, )\n Centre coordinates (in unit of length L) such that XYZ0[0] = X0,\n XYZ0[1] = Y0 & XYZ0[2] = Z0\n BoxL : float, optional\n Length of the looping cubical box. Default is infinity\n\n Returns\n -------\n XYZ_out : array of dtype float, shape (n, 3)\n Centered and unlooped particles coordinates (in unit of length L) such\n that XYZ[:,0] = X, XYZ[:,1] = Y & XYZ[:,2] = Z\n\n \"\"\"\n XYZ_out = XYZ.copy()\n XYZ_out-=XYZ0\n if np.isfinite(BoxL):\n XYZ_out+=BoxL/2.\n XYZ_out%=BoxL\n XYZ_out-=BoxL/2.\n return XYZ_out\n\ndef kinematics_diagnostics(XYZ,mass,Vxyz,PBE,aperture=0.03,CoMvelocity=True):\n \"\"\"\n Compute the various kinematics diagnostics.\n\n Returns the kinematics diagnostics for the input particles.\n\n Parameters\n ----------\n XYZ : array_like of dtype float, shape (n, 3)\n Particles coordinates (in unit of length L) such that XYZ[:,0] = X,\n XYZ[:,1] = Y & XYZ[:,2] = Z\n mass : array_like of dtype float, shape (n, )\n Particles masses (in unit of mass M)\n Vxyz : array_like of dtype float, shape (n, 3)\n Particles coordinates (in unit of velocity V) such that Vxyz[:,0] = Vx,\n Vxyz[:,1] = Vy & Vxyz[:,2] = Vz\n PBE : array_like of dtype float, shape (n, )\n Particles specific binding energies\n aperture : float, optional\n Aperture (in unit of length L) for the computation. Default is 0.03 L\n CoMvelocity : bool, optional\n Boolean to allow the centering of velocities by the considered particles\n centre-of-mass velocity. Default to True\n\n Returns\n -------\n kappa : float\n The kinetic energy fraction invested in co-rotation.\n discfrac : float\n The disc-to-total mass fraction estimated from the counter-rotating\n bulge.\n orbi : float\n The median orbital circularity of the particles values.\n vrotsig : float\n The rotation-to-dispersion ratio .\n delta : float\n The dispersion anisotropy.\n zaxis : array of dtype float, shape (3, )\n The unit vector of the momentum axis (pointing along the momentum direction).\n Momentum : float\n The momentum magnitude (in unit M.L.V).\n\n \"\"\"\n particlesall = np.vstack([XYZ.T,mass,Vxyz.T,PBE]).T\n # Compute distances\n distancesall = np.linalg.norm(particlesall[:,:3],axis=1)\n # Restrict particles\n extract = (distancesall<aperture)\n particles = particlesall[extract].copy()\n distances = distancesall[extract].copy()\n Mass = np.sum(particles[:,3])\n if CoMvelocity:\n # Compute CoM velocty & correct\n dvVmass = np.nan_to_num(np.sum(particles[:,3][:,np.newaxis]*particles[:,4:7],axis=0)/Mass)\n particlesall[:,4:7]-=dvVmass\n particles[:,4:7]-=dvVmass\n # Compute momentum\n smomentums = np.cross(particles[:,:3],particles[:,4:7])\n momentum = np.sum(particles[:,3][:,np.newaxis]*smomentums,axis=0)\n Momentum = np.linalg.norm(momentum)\n # Compute cylindrical quantities\n zaxis = (momentum/Momentum)\n zheight = np.sum(zaxis*particles[:,:3],axis=1)\n cylposition = particles[:,:3]-zheight[:,np.newaxis]*[zaxis]\n cyldistances = np.sqrt(distances**2-zheight**2)\n smomentumz = np.sum(zaxis*smomentums,axis=1)\n vrots = smomentumz/cyldistances\n vrads = np.sum(cylposition*particles[:,4:7]/cyldistances[:,np.newaxis],axis=1)\n vheis = np.sum(zaxis*particles[:,4:7],axis=1)\n # Compute co-rotational kinetic energy fraction\n Mvrot2 = np.sum((particles[:,3]*vrots**2)[vrots>0])\n kappa = Mvrot2/np.sum(particles[:,3]*(np.linalg.norm(particles[:,4:7],axis=1))**2)\n # Compute disc-to-total ratio\n discfrac = 1-2*np.sum(particles[vrots<=0,3])/Mass\n # Compute orbital circularity\n sbindingenergy = particles[:,7]; sortE = np.argsort(sbindingenergy); unsortE = np.argsort(sortE)\n jzE = np.vstack([sbindingenergy,smomentumz]).T[sortE]\n orbital = (jzE[:,1]/np.maximum.accumulate(np.abs(jzE[:,1])))[unsortE]\n orbi = np.median(orbital)\n # Compute rotation-to-dispersion and dispersion anisotropy\n Vrot = np.abs(cumsummedian(vrots,weights=particles[:,3]))\n SigmaXY = np.sqrt(np.average(np.sum(particles[:,[3]]*np.vstack([vrads,vrots]).T**2,axis=0)/Mass))#\n SigmaO = np.sqrt(SigmaXY**2-.5*Vrot**2)\n SigmaZ = np.sqrt(np.average(vheis**2,weights=particles[:,3]))\n vrotsig = Vrot/SigmaO\n delta = 1-(SigmaZ/SigmaO)**2\n # Return\n return kappa,discfrac,orbi,vrotsig,delta,zaxis,Momentum\n\ndef morphological_diagnostics(XYZ,mass,Vxyz,aperture=0.03,CoMvelocity=True,reduced_structure=True):\n \"\"\"\n Compute the morphological diagnostics through the (reduced or not) inertia tensor.\n\n Returns the morphological diagnostics for the input particles.\n\n Parameters\n ----------\n ----------\n XYZ : array_like of dtype float, shape (n, 3)\n Particles coordinates (in unit of length L) such that XYZ[:,0] = X,\n XYZ[:,1] = Y & XYZ[:,2] = Z\n mass : array_like of dtype float, shape (n, )\n Particles masses (in unit of mass M)\n Vxyz : array_like of dtype float, shape (n, 3)\n Particles coordinates (in unit of velocity V) such that Vxyz[:,0] = Vx,\n Vxyz[:,1] = Vy & Vxyz[:,2] = Vz\n aperture : float, optional\n Aperture (in unit of length L) for the computation. Default is 0.03 L\n CoMvelocity : bool, optional\n Boolean to allow the centering of velocities by the considered particles\n centre-of-mass velocity. Default to True\n reduced_structure : bool, optional\n Boolean to allow the computation to adopt the iterative reduced form of the\n inertia tensor. Default to True\n\n Returns\n -------\n ellip : float\n The ellipticity parameter 1-c/a.\n triax : float\n The triaxiality parameter (a^2-b^2)/(a^2-c^2).\n Transform : array of dtype float, shape (3, 3)\n The orthogonal matrix representing the 3 axes as unit vectors: in real-world\n coordinates, Transform[0] = major, Transform[1] = inter, Transform[2] = minor. \n abc : array of dtype float, shape (3, )\n The corresponding (a,b,c) lengths (in unit of length L).\n\n \"\"\"\n particlesall = np.vstack([XYZ.T,mass,Vxyz.T]).T\n # Compute distances\n distancesall = np.linalg.norm(particlesall[:,:3],axis=1)\n # Restrict particles\n extract = (distancesall<aperture)\n particles = particlesall[extract].copy()\n distances = distancesall[extract].copy()\n Mass = np.sum(particles[:,3])\n # Compute kinematic diagnostics\n if CoMvelocity:\n # Compute CoM velocty, correct\n dvVmass = np.nan_to_num(np.sum(particles[:,3][:,np.newaxis]*particles[:,4:7],axis=0)/Mass)\n particlesall[:,4:7]-=dvVmass\n particles[:,4:7]-=dvVmass\n # Compute momentum\n smomentums = np.cross(particlesall[:,:3],particlesall[:,4:7])\n momentum = np.sum(particles[:,3][:,np.newaxis]*smomentums[extract],axis=0)\n # Compute morphological diagnostics\n s = 1; q = 1; Rsphall = 1+reduced_structure*(distancesall-1); stop = False\n while not('structure' in locals()) or (reduced_structure and not(stop)):\n particles = particlesall[extract].copy()\n Rsph = Rsphall[extract]; Rsph/=np.median(Rsph)\n # Compute structure tensor\n structure = np.sum((particles[:,3]/Rsph**2)[:,np.newaxis,np.newaxis]*(np.matmul(particles[:,:3,np.newaxis],particles[:,np.newaxis,:3])),axis=0)/np.sum(particles[:,3]/Rsph**2)\n # Diagonalise structure tensor\n eigval,eigvec = linalg.eigh(structure)\n # Get structure direct oriented orthonormal base\n eigvec[:,2]*=np.round(np.sum(np.cross(eigvec[:,0],eigvec[:,1])*eigvec[:,2]))\n # Return minor axe\n structmainaxe = eigvec[:,np.argmin(eigval)].copy()\n # Permute base and align Y axis with minor axis in momentum direction\n sign = int(np.sign(np.sum(momentum*structmainaxe)+np.finfo(float).tiny))\n structmainaxe *= sign\n temp = np.array([1,sign,1])*(eigvec[:,(np.argmin(eigval)+np.array([(3+sign)/2,0,(3-sign)/2]))%3])\n eigval = eigval[(np.argmin(eigval)+np.array([(3+sign)/2,0,(3-sign)/2]))%3]\n # Permute base to align Z axis with major axis\n foo = (np.argmax(eigval)/2)*2\n temp = np.array([(-1)**(1+foo/2),1,1])*(temp[:,[2-foo,1,foo]])\n eigval = eigval[[2-foo,1,foo]]\n # Compute change of basis matrix\n transform = linalg.inv(temp)\n stop = (np.max((1-np.sqrt(eigval[:2]/eigval[2])/np.array([q,s]))**2)<1e-4)\n if (reduced_structure and not(stop)):\n q,s = np.sqrt(eigval[:2]/eigval[2])\n Rsphall = linalg.norm(np.matmul(transform,particlesall[:,:3,np.newaxis])[:,:,0]/np.array([q,s,1]),axis=1)\n extract = (Rsphall<aperture/(q*s)**(1/3.))\n Transform = transform.copy()\n ellip = 1-np.sqrt(eigval[1]/eigval[2])\n triax = (1-eigval[0]/eigval[2])/(1-eigval[1]/eigval[2])\n Transform = Transform[...,[2,0,1],:]#so that transform[0] = major, transform[1] = inter, transform[2] = minor\n abc = np.sqrt(eigval[[2,0,1]])\n # Return\n return ellip,triax,Transform,abc\n\n", "id": "8352656", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "morphokinematicsdiagnostics.py" } ]
0
jrenslin
[ { "content": "#! /usr/bin/env python3\n# ^\n\n######## Import ########\nimport os, sys, uuid, json, pwd, copy\nfrom datetime import datetime\n\n######## Functions ########\ndef json_dump (inp):\n\treturn json.dumps(inp, sort_keys=True, indent=2)\n\ndef create_json_file (filename):\n\thandle = open (filename + '.json', \"w\")\n\thandle.write(\"{}\")\n\thandle.close()\n\ndef read_json (filename):\n\thandle = open (filename + '.json', \"r\")\n\tcontent = json.loads(handle.read())\n\thandle.close()\n\treturn content\n\ndef write_json_file (filename, content):\n\thandle = open (filename + '.json', \"w\")\n\thandle.write(json_dump(content))\n\thandle.close()\n\ndef print_help ():\n\tprint (\"Help for pyclitr\\n\")\n\t\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"Command\", \"\", \"Description\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"init\", \"\", \"Inititalize pyclitr for the current directory\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"ls\", \"\", \"List all pending issues\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"pending\", \"\", \"alias for ls\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"completed\", \"\", \"List all completed issues\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"add\", \"<>\", \"Adds a new issue with the given name. Additional values can be set, e.g. with project:test.\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"delete\", \"<uuid>\", \"Delete issue with specified uuid\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"modify\", \"<uuid>\", \"Modify issue with specified uuid\"))\n\tprint(\"{:<20} {:<20} {:<35}\".format(\"complete\", \"<uuid>\", \"Mark issue with specified uuid as completed\"))\n\t\n\ndef note_edit (uuid, old, new):\n\tglobal pyclitr_dir\n\tedits = read_json(pyclitr_dir + 'edits')\n\tif not uuid in edits:\n\t\tedits[uuid] = []\n\tedits[uuid].append({'editor' : pwd.getpwuid(os.getuid()).pw_name, 'time' : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'from' : old, 'to' : new})\n\t\n\twrite_json_file(pyclitr_dir + \"edits\", edits)\n\ndef dict_changes (first, second):\n\tfor key, value in second.items():\n\t\tif key not in first:\n\t\t\tprint (\"Added \" + key + str(value))\n\t\telif value != first[key]:\n\t\t\tprint (\"Changed \" + key + \" from '\" + str(first[key]) + \"' to '\" + str(value) + \"'\")\n\nif True == False:\n\tif sys.argv[2] in issues:\n\t\tissue = issues[sys.argv[2]]\n\telse:\n\t\tstatus = 'completed'\n\t\tissues = read_json(pyclitr_dir + status)\n\t\tif sys.argv[2] in issues:\n\t\t\tissue = issues[sys.argv[2]]\n\t\telse:\n\t\t\tsys.exit(\"No item with this uuid has been found.\")\n\n# Set basic variables\ncwd = os.getcwd() + '/'\npyclitr_dir = cwd + '.pyclitr/'\n\n# Check if pyclitr has been initialized for this directory\nif os.path.isdir (pyclitr_dir):\n\tinitialized = True\nelse:\n\tinitialized = False\n\n\n\nif initialized == False:\n\n\tif not len(sys.argv) > 1:\n\t\tprint (\"Hidden directory .pyclitr does not exist. To set up pyclitr, use 'pyclitr init'\\n\\n\")\n\t\tprint_help()\n\n\telif not sys.argv[1] == 'init':\n\t\tprint (\"Hidden directory .pyclitr does not exist. To set up pyclitr, use 'pyclitr init'\\n\\n\")\n\t\tprint_help()\n\nelif len(sys.argv) == 1 or len(sys.argv) == 2 and sys.argv[1] == 'pending' or len(sys.argv) == 2 and sys.argv[1] == 'completed':\n\t\n\tif len(sys.argv) == 2 and sys.argv[1] == 'completed':\n\t\tissues = read_json(pyclitr_dir + \"completed\")\n\telse:\n\t\tissues = read_json(pyclitr_dir + \"pending\")\n\n\torder = ['entry', 'description', 'creator', 'project', 'uuid', 'assign', 'due']\n\tprint(\"\\033[4m{:<10}\\033[0m \\033[4m{:<45}\\033[0m \\033[4m{:<10}\\033[0m \\033[4m{:<15}\\033[0m \\033[4m{:<36}\\033[0m \\033[4m{:<10}\\033[0m \\033[4m{:<10}\\033[0m\".format(order[0], order[1], order[2], order[3], order[4], order[5], order[6]))\n\tfor iuuid, issue in issues.items():\n\t\tprint(\"{:<10} {:<45} {:<10} {:<15} {:<36} {:<10} {:<10}\".format(issue[order[0]][0:10], issue[order[1]], issue[order[2]], issue[order[3]], iuuid, issue[order[5]], issue[order[6]]))\n\nif len(sys.argv) == 2:\n\n\tif sys.argv[1] == 'init':\n\n\t\tos.mkdir (pyclitr_dir)\n\t\topen(pyclitr_dir + 'config', 'a').close()\n\t\tcreate_json_file (pyclitr_dir + 'pending')\n\t\tcreate_json_file (pyclitr_dir + 'completed')\n\t\tcreate_json_file (pyclitr_dir + 'edits')\n\n\t\tprint (\"Initialized at .pyclitr\")\n\n\telif sys.argv[1] == 'help':\n\t\tprint_help()\n\nif len(sys.argv) == 3 and sys.argv[1] == 'show':\n\n\tissues = read_json(pyclitr_dir + 'pending')\n\n\tif sys.argv[2] in issues:\n\t\tissue = issues[sys.argv[2]]\n\telse:\n\t\tissues = read_json(pyclitr_dir + 'completed')\n\t\tif sys.argv[2] in issues:\n\t\t\tissue = issues[sys.argv[2]]\n\t\telse:\n\t\t\tsys.exit(\"No item with this uuid has been found.\")\n\n\tprint(\"\\033[4m{:<20}\\033[0m \\033[4m{:<35}\\033[0m\".format(\"Name\", \"Value\"))\n\tprint(\"{:<20} {:<35}\".format(\"UUID\", sys.argv[2]))\n\tfor key, value in issue.items():\n\t\tif key != 'annotation' and value != '': \n\t\t\tprint(\"{:<20} {:<35}\".format(key, value))\n\n\t# Display edits if there are any\n\tedits = read_json (pyclitr_dir + \"edits\")\n\tif sys.argv[2] in edits:\n\t\tfor i in edits[sys.argv[2]]:\n\t\t\tprint (\"\\n\\033[1m\" + i['editor'] + \"\\033[0m edited this task on \\033[4m\" + i['time'] + \"\\033[0m:\")\n\n\t\t\tdict_changes (i['from'], i['to'])\n\nif len(sys.argv) > 2 and sys.argv[1] == 'add':\n\n\tpending = read_json(pyclitr_dir + 'pending')\n\tiuuid = str(uuid.uuid1())\n\n\tissue = {\"entry\" : datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \"creator\" : pwd.getpwuid(os.getuid()).pw_name, 'annotation' : [], 'project' : '', 'status' : 'pending', 'assign' : '', 'due' : ''}\n\n\targs = sys.argv[2:]\n\n\ttitle = ''\n\tfor i in args:\n\t\tattr = i.split(\":\")\n\t\tif (len(attr) == 2):\t\t\n\t\t\tissue[str(attr[0])] = str(attr[1])\n\t\telse:\n\t\t\ttitle = title + i + \" \"\n\tissue['description'] = str(title).strip(\" \")\n\n\tpending[iuuid] = issue\n\twrite_json_file (pyclitr_dir + \"pending\", pending)\n\tprint (\"Issue \\033[1m\" + title + \"\\033[0m added\")\n\nif len(sys.argv) > 2 and sys.argv[1] == 'modify':\n\n\t# Check whether this issue is pending or completed\n\tstatus = 'pending'\n\tissues = read_json(pyclitr_dir + status)\n\n\tif sys.argv[2] in issues:\n\t\tissue = issues[sys.argv[2]]\n\telse:\n\t\tstatus = 'completed'\n\t\tissues = read_json(pyclitr_dir + status)\n\t\tif sys.argv[2] in issues:\n\t\t\tissue = issues[sys.argv[2]]\n\t\telse:\n\t\t\tsys.exit(\"No item with this uuid has been found.\")\n\n\toriginal = copy.copy(issue)\n\n\targs = sys.argv[3:]\n\ttitle = ''\n\tfor i in args:\n\t\tattr = i.split(\":\")\n\t\tif (len(attr) == 2):\n\t\t\tissue[str(attr[0])] = str(attr[1])\n\t\telse:\n\t\t\ttitle = title + i + \" \"\n\tif title != '':\n\t\tissue['description'] = str(title).strip(\" \")\n\n\tissues[sys.argv[2]] = issue\n\twrite_json_file (pyclitr_dir + status, issues)\n\tprint (\"Issue \\033[1m\" + sys.argv[2] + \" (\" + issue['description'] + \")\" + \"\\033[0m modified\")\n\n\tnote_edit(sys.argv[2], original, issue)\n\nif len(sys.argv) > 2 and sys.argv[1] == 'complete':\n\n\t# Check whether this issue is pending or completed\n\tstatus = 'pending'\n\tissues = read_json(pyclitr_dir + status)\n\n\tif sys.argv[2] in issues:\n\t\tissue = issues[sys.argv[2]]\n\telse:\n\t\tstatus = 'completed'\n\t\tissues = read_json(pyclitr_dir + status)\n\t\tif sys.argv[2] in issues:\n\t\t\tissue = issues[sys.argv[2]]\n\t\telse:\n\t\t\tsys.exit(\"No item with this uuid has been found.\")\n\n\toriginal = copy.copy(issue)\n\n\tissue['status'] = 'completed'\n\n\tcompleted = read_json(pyclitr_dir + \"completed\")\n\tcompleted[sys.argv[2]] = issue\n\twrite_json_file (pyclitr_dir + \"completed\", completed)\n\n\tdel issues[sys.argv[2]]\n\twrite_json_file (pyclitr_dir + status, issues)\n\tprint (\"Issue \\033[1m\" + sys.argv[2] + \" (\" + issue['description'] + \")\" + \"\\033[0m moved to completed\")\n\n\tnote_edit(sys.argv[2], original, issue)\n\nif len(sys.argv) == 3 and sys.argv[1] == 'delete':\n\n\t# Check whether this issue is pending or completed\n\tstatus = 'pending'\n\tissues = read_json(pyclitr_dir + status)\n\n\tif sys.argv[2] in issues:\n\t\tissue = issues[sys.argv[2]]\n\telse:\n\t\tstatus = 'completed'\n\t\tissues = read_json(pyclitr_dir + status)\n\t\tif sys.argv[2] in issues:\n\t\t\tissue = issues[sys.argv[2]]\n\t\telse:\n\t\t\tsys.exit(\"No item with this uuid has been found.\")\n\n\tdel issues[sys.argv[2]]\n\n\twrite_json_file (pyclitr_dir + status, issues)\n\n", "id": "6623565", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "pyclitr.py" } ]
0
A2hari
[ { "content": "\n\nimport subprocess\nimport optparse\n\n\ndef change_mac(interface, mac_address):\n subprocess.call(['ifconfig', interface, 'down'])\n subprocess.call(['ifconfig', interface, 'hw', 'ether', mac_address])\n subprocess.call(['ifconfig', interface, 'up'])\n print('[+] Interface = ' + options.interface + '\\n[+] New Mac Address =' + options.mac_address)\n\n\ndef get_arguments(): #optarse part code used to get input as arguments in a single line\n parse = optparse.OptionParser()\n parse.add_option('-i', '--interface', dest=\"interface\", help='Interface of the module to change mac addres')\n parse.add_option('-m', '--mac', dest=\"mac_address\", help=\"mac address to be changed\")\n (options,arguments)= parse.parse_args()\n if not options.interface:\n parse.error('[-] please specify a valid interface. For help check --help command')\n elif not options.mac_address:\n parse.error('[-] please specify a valid mac address. For help check --help command')\n return options\n\n\n\n# interface = input('\\tInterface(wlan0/eth0/other) > ') #to change the user interface un comment this and comment the entire optparse and its code\n# mac_address = input('\\tNew Mac > ')\n\nprint('[+] Mac Address changing program ') #messages to be print\nprint('[+] NOTE::Use python 3 to run this program(recomended) ')\n\noptions = get_arguments()\n\ninterface = options.interface #interface = options.(destination(dest in parse.add_option) file name)\nmac = options.mac_address #mac= new variable\n\nchange_mac(interface, mac) #function call to change mac address\n", "id": "2659801", "language": "Python", "matching_score": 0.6042352914810181, "max_stars_count": 0, "path": "mac_changer.py" }, { "content": "from googlesearch import search\nimport os\nimport argparse\nfrom http import cookiejar\nimport requests\nimport random\n\nclass BlockAll(cookiejar.CookiePolicy):\n return_ok = set_ok = domain_return_ok = path_return_ok = lambda self, *args, **kwargs: False\n netscape = True\n rfc2965 = hide_cookie2 = False\n\ndef random_tld(tld_file,current_dir): # select a random tld\n try:\n r_lines = open(tld_file).read().splitlines()\n tld_sel = random.choice(r_lines)\n return tld_sel\n except:\n print(\"\\n\")\n print(\"[+] looks like you lost data in tld.txt file or file may be missing\")\n print(\"[+] don't panic we are here to solve.\")\n print(\"[+] download the tld.txt file from the our github project repository and paste it in \" + current_dir + \"/bin/ or simply re-clone the project\")\n\n\n\ndef google_search(file, dk, out,tld_file,current_dir): #google search function\n if(file==\"\"):\n g_dork=[\"\"]\n else:\n g_dork = open(file)\n for i in g_dork:\n count=0\n flag=0\n while (count <= 6 and flag != 1):\n TLD = random_tld(tld_file,current_dir)\n query = dk + \" \" + i\n print(query)\n output = open(out, \"a\")\n print(\"\\n[+] Fetching results for the dork => \" + query)\n print(\"[+] Fetching results using tld => \" + TLD)\n output.write(\"\\n[+] Fetching results for the dork => \" + query + \"\\n\")\n output.write(\"[+] Fetching results using tld => \" + TLD + \"\\n \\n\")\n count+=1 #This is to ensure to check using 6 different tld if triggered error while search\n try:\n for results in search(query, tld=TLD, num=10, stop=10, pause=random.choice([2, 3, 4])):\n print(results)\n output.write(results + \"\\n\")\n flag = 1\n except:\n print(\"\\n[+] Fetching results for the dork => \" + query)\n print(\"[+] Fetching results using tld => \" + TLD)\n output.write(\"\\n[+] Fetching results for the dork => \" + query + \"\\n\")\n output.write(\"[+] Fetching results using tld => \" + TLD + \"\\n \\n\")\n output.close()\n output = open(out, \"a\")\n print(\"\\n-------------------------------------------------------------------------\")\n output.write(\"\\n-------------------------------------------------------------------------\")\n output.close()\ndef main():\n\n version = 1.3\n current_dir = os.getcwd()\n\n tld_file = os.path.join(current_dir, \"bin/tld.txt\") # Setting the path location for tld file.\n logo_load_loc = os.path.join(current_dir, \"bin/logo\")\n random_logo = random.choice(os.listdir(logo_load_loc))\n random_logo_file = os.path.join(logo_load_loc, random_logo) # select random file from the bin/logo directory\n os.system('cat ' + random_logo_file) # code for printing the logo\n print(\"\\n\")\n print(\" Version %.1f\\n\" % (version))\n\n s = requests.Session()\n s.cookies.set_policy(BlockAll())\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", type=str, help=\"File with manually entered dorks\")\n parser.add_argument(\"-o\", \"--output\", type=str, help=\"Output file name\")\n args = parser.parse_args()\n if (args.file is not None):\n output = open(args.output, \"w\")\n output.close()\n\n if (args.output is None): # non empty parameter pass\n print(\"[+] For help press -h or --help\")\n print(\"[+] Read the documentation from https://github.com/A2hari/Xplore for more help\")\n exit()\n if (args.file is not None):\n try:\n if (os.stat(args.file).st_size == 0):\n print(\"\\n[+] \" + args.file + \" is empty \")\n print(\"[+] Read the documentation from https://github.com/A2hari/Xplore for more help\")\n exit()\n except FileNotFoundError:\n print(\"[+] No such file or directory :\" + args.file)\n print(\"[+] Read the documentation from https://github.com/A2hari/Xplore for more help\")\n exit()\n\n dork = input(\"Enter the target url/file dork : \")\n if (args.file is not None):\n google_search(args.file, dork, args.output,tld_file,current_dir)\n else:\n google_search(\"\", dork, args.output,tld_file,current_dir)\n\nif __name__==\"__main__\":\n main()", "id": "1213641", "language": "Python", "matching_score": 1.0467898845672607, "max_stars_count": 1, "path": "xplore.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2018 <NAME> (@LeapSecurity)\n\nimport argparse, sys, os\nfrom lib.workbench import *\nfrom lib.soup import *\nfrom lib.export import *\nfrom lib.logger import *\n\nhunterapi = \"\" #insert hunterio api key here\n\nif not hunterapi:\n\tprint(\"[+] Your hunter api key is Empty\")\n\tprint(\"[+] Hunter Api Key is required please fill the hunter api key opening the InSpy.py File\")\n\tsys.exit(404)\n\nVersion =\"4.0\"\n\nparser = argparse.ArgumentParser(description='InSpy - A LinkedIn enumeration tool by <NAME>(TheCyberMonster)\\n A forked project of InSpy 3.0')\nparser.add_argument('company', help=\"Company name to use for tasks.\")\nparser.add_argument('--domain', help=\"Company domain to use for searching.\")\nparser.add_argument('--email', help=\"Email format to create email addresses with. [Accepted Formats: <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>, <EMAIL>]\")\nparser.add_argument('--titles', metavar='file', default=\"wordlists/title-list-small.txt\", nargs='?', help=\"Discover employees by title and/or department. Titles and departments are imported from a new line delimited file. [Default: title-list-small.txt]\")\noutgroup = parser.add_argument_group(title=\"Output Options\")\noutgroup.add_argument('--html', metavar='file', help=\"Print results in HTML file.\")\noutgroup.add_argument('--csv', metavar='file', help=\"Print results in CSV format.\")\noutgroup.add_argument('--json', metavar='file', help=\"Print results in JSON.\")\noutgroup.add_argument('--xml', metavar='file', help=\"Print results in XML.\")\n\nif len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\nargs = parser.parse_args()\nstart_logger(args.company)\n\nemail = args.email\ndomain = args.domain\n\nprint(\"\\nInSpy {}\".format(Version))\n\ntry:\n\tif domain and not email: #search hunter.io for email format\n\t\temail = get_email_format(args.domain, hunterapi)\n\tif email and not domain: #search clearbit for domain\n\t\tdomain = get_domain(args.company)\t\n\tif not email and not domain: #no domain or email provided - fully automate it\n\t\tdomain = get_domain(args.company)\n\t\tif domain:\n\t\t\temail = get_email_format(domain, hunterapi)\n\n\tif email and domain:\n\n\t\temail = email.replace(\"{\", \"\").replace(\"}\",\"\")\n\n\t\tprint(\"Domain: {}, Email Format: {}\".format(domain, email))\n\n\t\temployees = {}\n\n\t\tif os.path.exists(os.path.abspath(args.titles)):\n\t\t\tfor response in search_linkedin(args.company, os.path.abspath(args.titles)):\n\t\t\t\tfor name, title in get_employees(soupify(response)).items():\n\t\t\t\t\tif args.company.lower() in title.lower():\n\t\t\t\t\t\tif not name in employees:\n\t\t\t\t\t\t\temployees[name] = title\n\t\t\tprint(\"{} Employees identified\".format(len(employees.keys())))\n\t\telse:\n\t\t\tprint(os.path.abspath(args.titles))\n\t\t\tprint(\"No such file or directory: '{}'\".format(args.titles))\n\t\temails=[]\n\t\tif employees:\n\t\t\t#output employees\n\t\t\tfor name, title in employees.items():\n\t\t\t\tprint(\"{} {}\".format(name, title[:50].replace('&amp;', '&')))\n\t\t\t\n\t\t\t#craft emails\n\t\t\temails = create_emails(employees, domain, email)\n\n\n\t\t\tif emails:\n\t\t\t\t#output emails\n\t\t\t\tprint(\"Emails crafted\".format(len(emails.keys())))\n\t\t\t\tfor name, email in emails.items():\n\t\t\t\t\tprint(email)\n\n\t\t#export results\n\t\tif args.html:\n\t\t\toutput(\"html\", args.html, args.company, domain, employees, emails)\n\t\tif args.xml:\n\t\t\toutput(\"xml\", args.xml, args.company, domain, employees, emails)\n\t\tif args.json:\n\t\t\toutput(\"json\", args.json, args.company, domain, employees, emails)\n\t\tif args.csv:\n\t\t\toutput(\"csv\", args.csv, args.company, domain, employees, emails)\nexcept (KeyboardInterrupt, SystemExit):\n\tprint(\"Terminated script.\")\n", "id": "6642730", "language": "Python", "matching_score": 2.315121650695801, "max_stars_count": 1, "path": "InSpy.py" }, { "content": "import json, os, time, csv\nimport xml.dom.minidom\nfrom xml.etree.ElementTree import Element, SubElement, tostring\n\ndef output(format, file, company, domain, employees, emails):\n\tif format == \"xml\":\n\t\toxml(file, company, domain, employees, emails)\n\tif format == \"csv\":\n\t\tocsv(file, company, domain, employees, emails)\n\tif format == \"html\":\n\t\tohtml(file, company, domain, employees, emails)\n\tif format == \"json\":\n\t\tojson(file, company, domain, employees, emails)\n\n#CSV\ndef ocsv(filename, company, domain, employees, emails):\n\twith open(os.path.abspath(filename), 'a') as csvfile:\n\t\tfieldnames = [\"Employee Name\", \"Title\", \"Email\"]\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\t\twriter.writeheader()\n\t\tfor name, title in employees.items():\n\t\t\twriter.writerow({\"Employee Name\": name, \"Title\": title.replace('&amp;', '&'), \"Email\": emails[name]})\n\n#JSON\ndef ojson(file, company, domain, employees, emails):\n\temployee_json = []\n\n\tfor name, title in employees.items():\n\t\temployee_json.append({\"name\": name, \"title\": title.replace('&amp;', '&'), \"email\": emails[name]})\n\n\tfull_json = {\n\t\"company\": {\"name\":company, \"domain\": domain},\n\t\"employees\": employee_json\n\t}\n\n\twith open(os.path.abspath(file), 'w') as f:\n\t\tf.write(json.dumps(full_json))\n\n#XML\ndef oxml(file, company, domain, employees, emails):\n\ttop = Element('InSpy')\n\tcxml = SubElement(top, 'Company')\n\n\t#company name\n\tcnxml = SubElement(cxml, \"Name\")\n\tcnxml.text = company\n\t#company domain\n\tcdxml = SubElement(cxml, \"Domain\")\n\tcdxml.text = domain\n\n\techild = SubElement(top, 'Employees')\n\n\tfor name, title in employees.items():\n\t\t\n\t\temployee = SubElement(echild, \"Employee\")\t\t\n\t\t#name\n\t\tnxml = SubElement(employee, \"Name\")\n\t\tnxml.text = name\n\t\t#title\n\t\ttxml = SubElement(employee, \"Title\")\n\t\ttxml.text = title.replace(\"&amp;\", \"&\")\n\t\t#email\n\t\texml = SubElement(employee, \"Email\")\n\t\texml.text = emails[name]\n\n\tfxml = xml.dom.minidom.parseString(tostring(top))\n\n\twith open(os.path.abspath(file), 'w') as f:\n\t\tf.write(fxml.toprettyxml())\n\n#HTML\ndef ohtml(file, company, domain, employees, emails):\n\temployee_html = []\n\n\tfor name, title in employees.items():\n\t\temployee_html.append(\"<tr><td>{name}</td><td>{title}</td><td>{email}</td></tr>\".format(name=name, title=title, email=emails[name]))\n\n\tpage = \"\"\"\n\t<html>\n\t<head><title>InSpy - {company}</title>\n\t<meta charset=\"UTF-8\">\n\t</head>\n\t<body style='font-family: arial, sans-serif; font-size: 14px; margin: 10px 0 0 20px;'>\n\t<h2>InSpy</h2>\n\t<p>Company: {company}</p><p>Date: {time}</p>\n\t<table border='1'>\n\t\t<tr style='background-color: #0057b8; color: #fff;'>\n\t\t\t<th>Employee Name</th>\n\t\t\t<th>Title</th>\n\t\t\t<th>E-mail</th>\n\t\t</tr>\n\t\t{html}\n\t</table>\n\t<br/>\n\t</body>\n\t</html>\n\t\"\"\".format(company=company, time=time.strftime(\"%Y/%m/%d %H:%M:%S\"), html=employee_html)\n\n\twith open(os.path.abspath(file), 'w') as f:\n\t\tf.write(page)\n", "id": "2449845", "language": "Python", "matching_score": 2.051103115081787, "max_stars_count": 1, "path": "lib/export.py" } ]
1.548946
Elaina-Alex
[ { "content": "from instance import *\r\nfrom API import HttpUtil, UrlConstants\r\n\r\nclass Login:\r\n\r\n def __init__(self, username, password):\r\n self.username = username\r\n self.password = password\r\n\r\n\r\n def Login_account(self):\r\n user_data = {'account': self.username, 'pwd': self.password}\r\n login_user = HttpUtil.post(UrlConstants.USER_LOGIN, data=user_data)\r\n # print(login_user)\r\n login_info, login_code, login_msg = (\r\n login_user.get('data'), login_user.get('code'),\r\n login_user.get('msg'))\r\n if login_code == 1 and login_msg == 'ok':\r\n user_id, nickname, user_account, user_sex, user_token, user_img = (\r\n login_info['user_id'], str(login_info['nickname']),\r\n login_info['user_account'], login_info['user_sex'],\r\n login_info['user_token'], login_info['user_img'])\r\n Vars.cfg.data['nickname'] = nickname\r\n Vars.cfg.data['user_token'] = user_token\r\n Vars.cfg.data['user_id'] = user_id\r\n Vars.cfg.save()\r\n print(\"{} login successfully!\".format(nickname))\r\n \r\n elif login_code == 0 and login_msg == '账号或密码错误!':\r\n print(login_msg, '自动注册')\r\n self.register()\r\n\r\n\r\n\r\n def register(self):\r\n user_data = {'account': self.username, 'pwd': self.password}\r\n register_info = HttpUtil.post(UrlConstants.USER_REGISTER, data=user_data)\r\n register_msg, register_code, register_data = (\r\n register_info.get('msg'), register_info.get('code'), register_info.get('data'))\r\n \r\n if register_msg == '该账号已存在!' or register_code == 0:\r\n print(register_msg, ',不在进行注册,请检查账号或密码是否正确')\r\n \r\n elif register_data != []:\r\n user_id, nickname, user_token = (\r\n register_data['user_id'], str(register_data['nickname']),\r\n register_data['user_token']\r\n )\r\n Vars.cfg.data['nickname'] = nickname\r\n Vars.cfg.data['user_token'] = user_token\r\n Vars.cfg.data['user_id'] = user_id\r\n Vars.cfg.data['password'] = <PASSWORD>\r\n Vars.cfg.save()\r\n print(\"{} login successfully!\".format(nickname))", "id": "6801726", "language": "Python", "matching_score": 1.900902509689331, "max_stars_count": 2, "path": "function/userlogin.py" }, { "content": "BOOK_INDEX = 'novel/txt/0/{}/index.html'\nUSER_LOGIN = 'user/login'\nCHAP_CONTENT = 'novel/txt/{}/{}/{}.html'\nSERCH_BOOK = 'Search/index?key={}&page={}'\nUSER_REGISTER = 'user/register'\nCATEGOR_URL = 'novel/lists?order=0&status=0&sex=1&page={}&type={}'\n\n\n\nWEB_SITE = 'https://api.laomaoxs.com/'", "id": "2516623", "language": "Python", "matching_score": 0.5756215453147888, "max_stars_count": 2, "path": "API/UrlConstants.py" }, { "content": "import re\n\nimport requests\nfrom lxml import etree\n\n\ndef str_mid(string: str, left: str, right: str, start=None, end=None):\n pos1 = string.find(left, start, end)\n if pos1 > -1:\n pos2 = string.find(right, pos1 + len(left), end)\n if pos2 > -1:\n return string[pos1 + 1: pos2]\n return ''\n\n\nheaders = {\n 'pragma': 'no-cache',\n\n 'cache-control': 'no-cache',\n 'upgrade-insecure-requests': '1',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 '\n 'YaBrowser/19.7.0.1635 Yowser/2.5 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'\n 'application/signed-exchange;v=b3;q=0.9',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh-TW;q=0.9,zh;q=0.8,en-US;q=0.7,en;q=0.6',\n}\npage = ''\nbookid = ''\ndef content_downlaod():\n response = requests.get(f'https://fu44.pw/e/action/ShowInfo.php?classid={page}&id={bookid}', headers=headers)\n response.encoding = 'GBK'\n # 解析HTML文档,返回根节点对象\n html = etree.HTML(response.text)\n\n book_name = str_mid(html.xpath('/html/body/main/div/div[2]/div[1]/h2/text()')[0], '【', '】')\n content = html.xpath(\"/html/body/main/div/div[2]/div[1]/div[1]/text()\")\n print(book_name)\n\n content_list = []\n for line in content:\n if book_name in line:\n continue\n if '作者' in line:\n author_name = re.sub('作者:', '', line)\n continue\n content_line = re.sub('「', '“', line)\n content_line = re.sub('」', '”\\n  ', content_line)\n content_list.append(content_line + '\\n')\n\n file = open(f'{book_name}.txt', 'a', encoding='utf-8')\n file.write(''.join(content_list))\n", "id": "7512041", "language": "Python", "matching_score": 3.2477662563323975, "max_stars_count": 0, "path": "run.py" }, { "content": "import requests\r\nfrom rich import print\r\nimport re\r\n\r\n\r\ndef str_mid(string: str, left: str, right: str, start=None, end=None):\r\n pos1 = string.find(left, start, end)\r\n if pos1 > -1:\r\n pos2 = string.find(right, pos1 + len(left), end)\r\n if pos2 > -1:\r\n return string[pos1 + len(left): pos2]\r\n return ''\r\n\r\n\r\nheaders = {\r\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/96.0.4664.110 Mobile Safari/537.36 Edg/96.0.1054.62 '\r\n}\r\n\r\n\r\ndef get(url, **kwargs):\r\n params = kwargs.get(\"params\")\r\n try:\r\n return requests.get(url, params=params, headers=headers).text\r\n except Exception as e:\r\n print(\"get请求错误: %s\" % e)\r\n\r\n\r\nchapterurl_list = []\r\nresult = get('https://book.sfacg.com/Novel/492364/MainIndex/')\r\n# print(result)\r\ndir_ = str_mid(result, '<ul class=\"mulu_list\">', '<div class=\"bottom_menu\">')\r\nhtml_list = dir_.replace('\\r\\n<a href=\"/c/', '/\"><li>').split('/\"><li>')\r\nfor chapterid in html_list:\r\n if chapterid.isdigit():\r\n chapterurl_list.append('https://book.sfacg.com/Novel/492364/659995/{}'.format(chapterid))\r\n# print(chapterurl_list)\r\n\r\nfor url in chapterurl_list:\r\n chapter_result = get(url)\r\n title = str_mid(chapter_result, '返回</a></li><li>', '</li>')\r\n content = str_mid(chapter_result, '<div style=\"text-indent: 2em;\">', '</p></div>')\r\n content = re.sub('</p>', '\\n', content)\r\n content = re.sub('<p>', '  ', content)\r\n print(title)\r\n print(content)", "id": "8894539", "language": "Python", "matching_score": 1.9980498552322388, "max_stars_count": 1, "path": "boluobao_web.py" }, { "content": "# coding=utf-8\nfrom config import *\nfrom rich import print\n\n\nclass Vars:\n cfg = Config('Settings - Config.json', os.getcwd())\n\n\ndef str_mid(string: str, left: str, right: str, start=None, end=None):\n pos1 = string.find(left, start, end)\n if pos1 > -1:\n pos2 = string.find(right, pos1 + len(left), end)\n if pos2 > -1:\n return string[pos1 + len(left): pos2]\n return ''\n\n\ndef isCN(info):\n cn_no = 0\n for ch in str(info):\n if '\\u4e00' <= ch <= '\\u9fff':\n cn_no += 1\n return 20 - cn_no\n\n\ndef re_book_name(novel_name: str):\n return re.sub(r'[??*|“<>:/]', '', novel_name)\n\n\ndef input_(prompt, default=None):\n while True:\n ret = input(prompt)\n if ret != '':\n return ret\n elif default is not None:\n return default\n\n\nclass obj(object):\n def __init__(self, d):\n for a, b in d.items():\n if isinstance(b, (list, tuple)):\n setattr(self, a, [obj(x) if isinstance(\n x, dict) else x for x in b])\n else:\n setattr(self, a, obj(b) if isinstance(b, dict) else b)\n\n\ndef write(path: str, mode: str, info=None):\n if info is not None:\n try:\n with open(path, f'{mode}', encoding='UTF-8', newline='') as file:\n file.writelines(info)\n except (UnicodeEncodeError, UnicodeDecodeError) as e:\n print(e)\n with open(path, f'{mode}', encoding='gbk', newline='') as file:\n file.writelines(info)\n else:\n try:\n return open(path, f'{mode}', encoding='UTF-8')\n except (UnicodeEncodeError, UnicodeDecodeError) as error:\n print(error)\n return open(path, f'{mode}', encoding='gbk')\n\n\ndef mkdir(path: str):\n if not os.path.exists(path):\n os.mkdir(path)\n\n\ndef makedirs(path: str):\n if not os.path.exists(path):\n os.makedirs(path)\n", "id": "2303670", "language": "Python", "matching_score": 2.637089490890503, "max_stars_count": 0, "path": "instance.py" }, { "content": "import re\nimport time\nfrom config import *\n\n\nclass Vars:\n cfg = Config('Config.json', os.getcwd())\n book_info = None\n epub_info = None\n\n\nclass Msgs:\n msg_help = [\n \"输入指令, 输入首字母即可 | 爱下电子书网址:https://m.aixdzs.com/\",\n \"d | bookid\\t\\t\\t\\t\\t———输入书籍序号下载单本小说\",\n \"t | tagid\\t\\t\\t\\t\\t———输入分类号批量下载分类小说\",\n \"n | bookname\\t\\t\\t\\t\\t———下载单本小说\",\n \"h | help\\t\\t\\t\\t\\t———获取使用程序帮助\",\n \"q | quit\\t\\t\\t\\t\\t———退出运行的程序\",\n \"m | method\\t\\t\\t\\t\\t———切换多线程和多进程\",\n \"p | pool\\t\\t\\t\\t\\t———改变线程数目\",\n \"u | updata\\t\\t\\t\\t\\t———下载指定文本中的bookid \",\n ]\n msg_agree_terms = '是否以仔细阅读且同意LICENSE中叙述免责声明\\n如果同意声明,请输入英文 \\\"yes\\\" 或者中文 \\\"同意\\\" \"\\\n \"后按Enter建,如果不同意请关闭此程式'\n msg_tag = {1: '玄幻', 2: '奇幻', 3: '武侠', 4: '仙侠', 5: '都市', 6: '职场', 7: '历史',\n 8: '军事', 9: '游戏', 10: '竞技', 11: '科幻', 12: '灵异', 13: '同人', 14: '轻小说'}\n\n\ndef mkdir(file_path: str):\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n\n\ndef makedirs(file_path: str):\n if not os.path.exists(os.path.join(file_path)):\n os.makedirs(os.path.join(file_path))\n\n\ndef isCN(book_name):\n cn_no = 0\n for ch in book_name:\n if '\\u4e00' <= ch <= '\\u9fff':\n cn_no += 1\n return 40 - cn_no\n\n\ndef inputs_(prompt, default=None):\n while True:\n ret = input(prompt)\n if ret != '':\n return ret\n elif default is not None:\n return default\n\n\ndef content_(content: str):\n return ''.join([re.sub(r'^\\s*', \"\\n  \", content)\n for content in content.split(\"\\n\") if re.search(r'\\S', content) is not None])\n\n\ndef write(path: str, mode: str, info=None):\n if info is not None:\n try:\n with open(path, f'{mode}', encoding='utf-8', newline='') as file:\n file.write(info)\n except (UnicodeEncodeError, UnicodeDecodeError) as error:\n print(\"error:\", error)\n with open(path, f'{mode}', encoding='gbk', newline='') as file:\n file.write(info)\n else:\n try:\n return open(path, f'{mode}', encoding='utf-8')\n except (UnicodeEncodeError, UnicodeDecodeError) as e:\n return open(path, f'{mode}', encoding='gbk')\n\n\ndef setup_config():\n Vars.cfg.load()\n config_change = False\n if type(Vars.cfg.data.get('save_book')) is not str or Vars.cfg.data.get('save_book') == \"\":\n Vars.cfg.data['save_book'] = 'novel'\n config_change = True\n if type(Vars.cfg.data.get('config_book')) is not str or Vars.cfg.data.get('config_book') == \"\":\n Vars.cfg.data['config_book'] = 'config'\n config_change = True\n if type(Vars.cfg.data.get('max_threads')) is not int or Vars.cfg.data.get('max_threads') == \"\":\n Vars.cfg.data['max_threads'] = 12\n config_change = True\n\n if type(Vars.cfg.data.get('real_time_cache')) is not bool:\n Vars.cfg.data['real_time_cache'] = False\n config_change = True\n if config_change:\n Vars.cfg.save()\n if not os.path.exists(Vars.cfg.data.get('save_book')):\n mkdir(Vars.cfg.data.get('save_book'))\n if not os.path.exists(Vars.cfg.data.get('config_book')):\n mkdir(Vars.cfg.data.get('config_book'))\n", "id": "7888637", "language": "Python", "matching_score": 4.054113864898682, "max_stars_count": 1, "path": "instance.py" }, { "content": "from config import *\nimport os\nimport re\nimport time\nfrom rich import print\n\n\nclass Vars:\n cfg = Config('Config.json', os.getcwd())\n current_bookshelf = None\n current_book = None\n\n\ndef mkdir(file_path):\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n print(f'已创建{file_path}文件夹')\n else:\n pass\n\n\ndef makedirs(file_path, file_name):\n if not os.path.exists(os.path.join(file_path, file_name)):\n os.makedirs(os.path.join(file_path, file_name))\n\n\ndef time_(_time_):\n if type(_time_) is not int:\n _time_ = int(_time_)\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(_time_))\n\n\ndef get(prompt, default=None):\n while True:\n ret = input(prompt)\n if ret != '':\n return ret\n elif default is not None:\n return default\n\n\ndef del_title(title):\n \"\"\"删去windowns不规范字符\"\"\"\n return re.sub(r'[??\\*|“<>:/\\\\]', '', title)\n\n\ndef content_(content):\n return ''.join([re.sub(r'^\\s*', \"\\n  \", content)\n for content in content.split(\"\\n\") if re.search(r'\\S', content) != None])\n\n\ndef write(PATH, mode, info=None):\n if info is not None:\n try:\n with open(PATH, f'{mode}', encoding='UTF-8', newline='') as file:\n file.writelines(info)\n except (UnicodeEncodeError, UnicodeDecodeError)as e:\n print(e)\n with open(PATH, f'{mode}', encoding='gbk', newline='') as file:\n file.writelines(info)\n else:\n try:\n file = open(PATH, f'{mode}', encoding='UTF-8')\n return file\n except (UnicodeEncodeError, UnicodeDecodeError) as e:\n print(e)\n file = open(PATH, f'{mode}', encoding='gbk')\n return file\n", "id": "603728", "language": "Python", "matching_score": 2.0702614784240723, "max_stars_count": 2, "path": "instance.py" }, { "content": "import os\r\nimport re\r\n\r\n\r\ndef inputs() -> str:\r\n while True:\r\n info = input(\"输入文件夹名称:\")\r\n if info != \"\" and info is not None:\r\n return info\r\n else:\r\n print(\"请输入文件夹名!\")\r\n\r\n\r\ndef file_list(file_path: str) -> list:\r\n dir_list = os.listdir(file_path)\r\n return sorted(dir_list, key=lambda x: os.path.getmtime(os.path.join(file_path, x))) \\\r\n if dir_list else []\r\n\r\n\r\ndef merge_file(files_name: str):\r\n file = open(\"out_file.txt\", \"a\", encoding=\"utf-8\")\r\n for file_name in file_list(files_name):\r\n with open(os.path.join(files_name, file_name), \"r\", encoding=\"utf-8\", newline=\"\") as f:\r\n content = \"\"\r\n for line in f.read().splitlines():\r\n line = line.replace(\"\\u3000\", \"\")\r\n if re.compile(r'\\S').search(line) is not None:\r\n content += re.sub(r\"^(\\s*)\", \"\\n  \", line)\r\n file.write(\"\\n\\n\\n\\n\" + file_name.replace(\".txt\", \"\\n\") + content)\r\n file.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n merge_file(inputs())\r\n", "id": "12148201", "language": "Python", "matching_score": 1.6595498323440552, "max_stars_count": 0, "path": "run.py" }, { "content": "# coding=utf-8\nimport json\nimport os\nimport re\nimport time\nimport sys\n\n\nclass Config:\n file_path = None\n dir_path = None\n data = None\n\n def __init__(self, file_path, dir_path):\n self.file_path = file_path\n self.dir_path = dir_path\n self.data = {}\n\n def load(self):\n try:\n with open(self.file_path, 'r', encoding='utf-8') as f:\n self.data = json.load(f) or {}\n except FileNotFoundError:\n try:\n if not os.path.isdir(self.dir_path):\n os.makedirs(self.dir_path)\n with open(self.file_path, 'w'):\n pass\n except Exception as e:\n print('error: ', e)\n print('error: while creating config file: ' + self.file_path)\n except Exception as e:\n pass\n # print('error: ', e)\n # print('error: while reading config file: ' + self.file_path)\n\n def save(self):\n try:\n if not os.path.isdir(self.dir_path):\n os.makedirs(self.dir_path)\n with open(self.file_path, 'w', encoding='utf-8') as f:\n json.dump(self.data, f, indent=4, ensure_ascii=False)\n except Exception as e:\n print('error: ', e)\n print('error: while saving config file: ' + self.file_path)\n", "id": "5555865", "language": "Python", "matching_score": 0.4434141516685486, "max_stars_count": 0, "path": "config.py" }, { "content": "import requests\r\nfrom instance import *\r\nimport urllib\r\nimport re\r\nimport os\r\nimport zipfile\r\nimport shutil\r\n\r\n\r\nVars.cfg.load()\r\n\r\n\r\nclass Epub:\r\n\r\n def __init__(self, book_name, book_id, author_name, tag_name, book_description, lastUpdateTime):\r\n self.book_name = book_name\r\n self.bookid = book_id\r\n self.book_description = book_description\r\n self.author_name = author_name\r\n self.tag_name = tag_name\r\n self.lastUpdateTime = lastUpdateTime\r\n\r\n def create_mimetype(self):\r\n write(os.path.join(Vars.cfg.data.get('save_dir'),\r\n self.book_name, 'mimetype'), 'w', 'application/epub+zip')\r\n\r\n def set_cover(self, url: str, png_name=None):\r\n if png_name is None:\r\n image_path = self.tempdir + '/OEBPS/Images/cover.jpg'\r\n else:\r\n image_path = self.tempdir + '/OEBPS/Images/' + png_name\r\n if os.path.exists(image_path):\r\n if os.path.getsize(image_path) != 0:\r\n return\r\n for retry in range(10):\r\n try:\r\n urllib.request.urlretrieve(url, image_path)\r\n return\r\n except OSError as e:\r\n print('下载封面图片失败')\r\n\r\n def create_content(self):\r\n chaptrt_content = ''\r\n chaptrt_content += \"<?xml version='1.0' encoding='utf-8'?>\\r\\n\"\r\n chaptrt_content += '<!DOCTYPE html>\\r\\n'\r\n chaptrt_content += '<html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:epub=\"http://www.idpf.org/2007/ops\" epub:prefix=\"z3998: http://www.daisy.org/z3998/2012/vocab/structure/#\" lang=\"zh-CN\" xml:lang=\"zh-CN\">\\r\\n'\r\n chaptrt_content += '<head>\\r\\n<title>{self.chapter_title}</title>\\r\\n'\r\n chaptrt_content += '<link href=\"style/default.css\" rel=\"stylesheet\" type=\"text/css\"/>\\r\\n</head>\\r\\n'\r\n chaptrt_content += f\"<body>\\r\\n<h1>{self.chapter_title}</h1>\\r\\n<p></p>\\r\\n\"\r\n content_line = self.content.split('\\n')\r\n for line in content_line:\r\n chaptrt_content += f\"<p>{line}</p>\\r\\n\"\r\n chaptrt_content += \"\\r\\n</body>\\r\\n</html>\"\r\n self.file_chapter_name = str(self.number).rjust(\r\n 4, \"0\") + '-' + f'{self.chapter_title}'\r\n path = os.path.join(self.book_name, 'OEBPS', 'Text',\r\n self.file_chapter_name + '.xhtml')\r\n\r\n with open(path, 'w', encoding='utf-8') as file:\r\n file.write(chaptrt_content)\r\n\r\n def style_flie(self):\r\n style_flie_path = os.path.join(Vars.cfg.data.get(\r\n 'save_dir'), self.book_name, 'OEBPS', 'style')\r\n if not os.path.exists(style_flie_path):\r\n os.mkdir(style_flie_path)\r\n \"\"\"book_name/OEBPS/style/nav.css\"\"\"\r\n nav_css = ''\r\n nav_css += 'body {font-family: Auto;}\\r\\n'\r\n nav_css += 'p{font-family: Auto;\\r\\ntext-indent: 2em;}\\r\\n'\r\n nav_css += 'h2 {text-align: left;\\r\\ntext-transform: uppercase;\\r\\nfont-weight: 200;}\\r\\n'\r\n nav_css += 'ol {list-style-type: none;}\\r\\n'\r\n nav_css += 'ol > li:first-child {margin-top: 0.3em;}\\r\\n'\r\n nav_css += \"nav[epub|type~='toc'] > ol > li > ol {list-style-type:square;}\\r\\n\"\r\n nav_css += \"nav[epub|type~='toc'] > ol > li > ol > li {margin-top: 0.3em;}\\r\\n\"\r\n write(os.path.join(style_flie_path, 'nav.css'), 'w', nav_css)\r\n\r\n \"\"\"book_name/OEBPS/style/default.css\"\"\"\r\n default_css = ''\r\n default_css += \"body {font-size:100%;}\\r\\n\"\r\n default_css += \"p{font-family: Auto;\\r\\ntext-indent: 2em;}\\r\\n\"\r\n default_css += \"h1{font-style: normal;\\r\\nfont-size: 20px;\\r\\nfont-family: Auto;}\\r\\n\"\r\n write(os.path.join(style_flie_path, 'default.css'), 'w', default_css)\r\n\r\n def create_info(self):\r\n path_cover = os.path.join(Vars.cfg.data.get(\r\n 'save_dir'), self.book_name, \"OEBPS\", \"Text\")\r\n if not os.path.exists(path_cover):\r\n os.makedirs(path_cover)\r\n intro_cover = ''\r\n intro_cover += \"<?xml version='1.0' encoding='utf-8'?>\\r\\n<!DOCTYPE html>\"\r\n intro_cover += '<html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:epub=\"http://www.idpf.org/2007/ops\" epub:prefix=\"z3998: http://www.daisy.org/z3998/2012/vocab/structure/#\" lang=\"zh-CN\" xml:lang=\"zh-CN\">'\r\n intro_cover += '<head>\\r\\n<title>书籍封面</title>\\r\\n</head>'\r\n \"\"\"图片路径../Images/cover.png\"\"\"\r\n intro_cover += '<body>\\r\\n<div style=\"text-align: center; padding: 0pt; margin: 0pt;\">\\r\\n'\r\n intro_cover += '<svg xmlns=\"http://www.w3.org/2000/svg\" height=\"100%\" preserveAspectRatio=\"xMidYMid meet\" version=\"1.1\" viewBox=\"0 0 179 248\" width=\"100%\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">'\r\n intro_cover += '<image height=\"248\" width=\"179\" xlink:href=\"../Images/cover.jpg\"></image>\\r\\n</svg>\\r\\n'\r\n intro_cover += '</div>\\r\\n</body>\\r\\n</html>'\r\n text = f'<h1>书名:{self.book_name}</h1>\\r\\n' + \\\r\n f'<h3>序号:{self.bookid}</h3>\\r\\n' + \\\r\n f'<h3>作者:{self.author_name}</h3>\\r\\n' + \\\r\n f'<h3>更新:{self.lastUpdateTime}</h3>\\r\\n' + \\\r\n f'<h3>标签:{self.tag_name}</h3>\\r\\n' + \\\r\n f'<h3>简介:{self.book_description}</h3>'\r\n text = re.sub('</body>\\r\\n</html>', text +\r\n '\\r\\n</body>\\r\\n</html>', intro_cover)\r\n write(os.path.join(path_cover, 'cover.xhtml'), 'w', text)\r\n\r\n def create_container(self):\r\n \"\"\"bookname/META-INF/container.xml\"\"\"\r\n container_infp = ''\r\n container_infp += \"<?xml version='1.0' encoding='utf-8'?>\\r\\n\"\r\n container_infp += '<container xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\" version=\"1.0\">\\r\\n'\r\n container_infp += '<rootfiles>\\r\\n<rootfile media-type=\"application/oebps-package+xml\" full-path=\"OEBPS/content.opf\"/>'\r\n container_infp += '</rootfiles>\\r\\n</container>'\r\n container_flie_path = os.path.join(\r\n Vars.cfg.data.get('save_dir'), self.book_name, 'META-INF')\r\n if not os.path.exists(container_flie_path):\r\n os.mkdir(container_flie_path)\r\n write(os.path.join(container_flie_path,\r\n 'container.xml'), 'w', container_infp)\r\n\r\n def create_toc(self):\r\n nav = \"\"\r\n nav += \"<?xml version='1.0' encoding='utf-8'?>\\r\\n\"\r\n nav += '<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">\\r\\n'\r\n nav += '<head>\\r\\n<meta content=\"{self.bookid}\" name=\"dtb:uid\"/>\\r\\n'\r\n nav += '<meta content=\"0\" name=\"dtb:depth\"/>\\r\\n'\r\n nav += '<meta content=\"0\" name=\"dtb:totalPageCount\"/>\\r\\n'\r\n nav += '<meta content=\"0\" name=\"dtb:maxPageNumber\"/>\\r\\n'\r\n nav += '</head>\\r\\n<docTitle>\\r\\n'\r\n nav += '<text>{self.book_name}</text>\\r\\n'\r\n nav += '</docTitle>\\r\\n<navMap>\\r\\n'\r\n chapter_nav = ''\r\n chapter_nav += '<navPoint id=\"${chapter_title}\">\\r\\n<navLabel>\\r\\n'\r\n chapter_nav += '<text>${chapter_title}</text>\\r\\n</navLabel>\\r\\n'\r\n chapter_nav += '<content src=\"${file_chapter_name}.xhtml\"/>\\r\\n</navPoint>\\r\\n'\r\n nav += chapter_nav + '</navMap>\\r\\n</ncx>'\r\n toc_file_path = os.path.join(Vars.cfg.data.get(\r\n 'save_dir'), self.book_name, \"OEBPS\")\r\n if not os.path.exists(os.path.join(toc_file_path, 'toc.ncxl')):\r\n write(os.path.join(toc_file_path, 'toc.ncxl'), 'a', nav)\r\n\r\n def add_toc(self, Volume, title, file_chapter_name):\r\n toc_file_path = os.path.join(Vars.cfg.data.get(\r\n 'save_dir'), self.book_name, \"OEBPS\", 'toc.ncxl')\r\n toc_file = write(os.path.join(toc_file_path), 'r').read()\r\n # print(toc_file)\r\n add_toc = toc_file.replace('${Volume_name}', Volume)\r\n add_toc = add_toc.replace('${chapter_title}', title)\r\n add_toc = add_toc.replace('${file_chapter_name}', file_chapter_name)\r\n chapter_nav = '<navPoint id=\"${chapter_title}\">\\r\\n<navLabel>\\r\\n'\r\n chapter_nav += '<text>${chapter_title}</text>\\r\\n</navLabel>\\r\\n'\r\n chapter_nav += '<content src=\"${file_chapter_name}.xhtml\"/>\\r\\n</navPoint>\\r\\n'\r\n add_toc = add_toc.replace('</navMap>', chapter_nav + '\\r\\n</navMap>')\r\n write(toc_file_path, 'w', add_toc)\r\n\r\n def create_content_opf(self):\r\n content_opf = \"<?xml version='1.0' encoding='utf-8'?>\\r\\n\"\r\n content_opf = '<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"id\" version=\"3.0\" prefix=\"rendition: http://www.idpf.org/vocab/rendition/#\">\\r\\n'\r\n content_opf += '<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" xmlns:opf=\"http://www.idpf.org/2007/opf\">\\r\\n'\r\n content_opf += '<meta property=\"dcterms:modified\">{self.lastUpdateTime}</meta>\\r\\n'\r\n content_opf += '<dc:identifier id=\"id\">{self.bookid}</dc:identifier>\\r\\n'\r\n content_opf += '<dc:title>{self.book_name}</dc:title>\\r\\n'\r\n content_opf += '<dc:language>zh-CN</dc:language>\\r\\n'\r\n content_opf += '<dc:creator id=\"creator\">{self.author_name}</dc:creator>\\r\\n'\r\n content_opf += '<meta name=\"generator\" content=\"Ebook-lib 0.17.1\"/>\\r\\n'\r\n content_opf += '<meta name=\"cover\" content=\"cover-img\"/>\\r\\n'\r\n content_opf += '</metadata>\\r\\n<manifest>\\r\\n'\r\n\r\n content_opf_href = '<item href=\"image/cover.png\" id=\"cover-img\" media-type=\"image/png\" properties=\"cover-image\"/>\\r\\n'\r\n content_opf_href += '<item href=\"Text/cover.xhtml\" id=\"cover\" media-type=\"application/x-dtbncx+xml\"/>\\r\\n'\r\n content_opf_href += '<item href=\"style/default.css\" id=\"style_default\" media-type=\"text/css\"/>\\r\\n'\r\n content_opf_href += '<item href=\"style/nav.css\" id=\"style_nav\" media-type=\"text/css\"/>\\r\\n'\r\n content_opf_href += '<item href=\"toc.ncx\" id=\"ncx\" media-type=\"application/x-dtbncx+xml\"/>\\r\\n'\r\n content_opf_href += '<item href=\"nav.xhtml\" id=\"nav\" media-type=\"application/xhtml+xml\" properties=\"nav\"/>\\r\\n'\r\n\r\n content_opf_href += '<item href=\"Text/${file_chapter_name}.xhtml\" id=\"${file_chapter_name}\" media-type=\"application/xhtml+xml\"/>\\r\\n'\r\n\r\n content_opf_href += '</manifest>\\r\\n'\r\n content_opf_href += '<spine toc=\"ncx\">\\r\\n'\r\n content_opf_href += '<itemref idref=\"nav\"/>\\r\\n'\r\n content_opf_href += '<itemref idref=\"$id{file_chapter_name}\"/>\\r\\n'\r\n content_opf_href += '</spine>\\r\\n</package>\\r\\n'\r\n opf_file_path = os.path.join(Vars.cfg.data.get('save_dir'), self.book_name, \"OEBPS\", 'content.opf')\r\n if not os.path.exists(opf_file_path):\r\n write(opf_file_path, 'w', content_opf + content_opf_href)\r\n\r\n def add_content_opf(self, file_chapter_name):\r\n opf_file_path = os.path.join(Vars.cfg.data.get(\r\n 'save_dir'), self.book_name, \"OEBPS\", 'content.opf')\r\n opf_file = write(os.path.join(opf_file_path), 'r').read()\r\n print(opf_file)\r\n add_opf = opf_file.replace('${file_chapter_name}', file_chapter_name)\r\n add_opf = add_opf.replace(\r\n '</manifest>', '<item href=\"Text/${file_chapter_name}.xhtml\" id=\"${file_chapter_name}\" media-type=\"application/xhtml+xml\"/>\\r\\n</manifest>')\r\n add_opf = add_opf.replace('$id{file_chapter_name}', file_chapter_name)\r\n add_opf = add_opf.replace(\r\n '</spine>', '<itemref idref=\"$id{file_chapter_name}\"/>\\r\\n</spine>')\r\n write(opf_file_path, 'w', add_opf)\r\n\r\n def create_nav(self):\r\n nav = \"<?xml version='1.0' encoding='utf-8'?>\"\r\n nav += '<!DOCTYPE html>'\r\n nav += '<html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:epub=\"http://www.idpf.org/2007/ops\" lang=\"zh-CN\" xml:lang=\"zh-CN\">\\r\\n'\r\n nav += '<head>\\r\\n'\r\n nav += '<title>{self.book_name}</title>\\r\\n'\r\n nav += '</head>\\r\\n<body>\\r\\n'\r\n nav += '<nav epub:type=\"toc\" id=\"id\" role=\"doc-toc\">\\r\\n'\r\n nav += '<h2>{self.book_name}</h2>\\r\\n'\r\n nav += '<ol>\\r\\n<li>\\r\\n'\r\n nav += '<a href=\"${file_chapter_name}.xhtml\">${chapter_title}</a>\\r\\n'\r\n nav += '</li>\\r\\n</ol>\\r\\n</nav>\\r\\n</body>\\r\\n</html>\\r\\n'\r\n nav_file_path = os.path.join(Vars.cfg.data.get('save_dir'), self.book_name, \"OEBPS\", 'nav.xhtml')\r\n if not os.path.exists(nav_file_path):\r\n write(nav_file_path, 'w', nav)\r\n\r\n def add_nav(self, file_chapter_name):\r\n nav_file_path = os.path.join(Vars.cfg.data.get('save_dir'), self.book_name, \"OEBPS\", 'nav.xhtml')\r\n nav_file = write(os.path.join(nav_file_path), 'r').read()\r\n # print(nav_file)\r\n add_nav = nav_file.replace('${file_chapter_name}', file_chapter_name).replace('${chapter_title}', self.chapter_title)\r\n add_nav = add_nav.replace(\r\n '</li>', '\\r\\n<a href=\"${file_chapter_name}.xhtml\">${chapter_title}</a>\\r\\n'+ '</li>')\r\n write(nav_file, 'w', add_nav)\r\n \r\n \r\nif __name__ == '__main__':\r\n Epubs = Epub('大师姐', '43534534', '可乐', '百合', '这是一个简介', '2021年')\r\n Epubs.create_info()\r\n Epubs.style_flie()\r\n Epubs.create_container()\r\n Epubs.create_mimetype()\r\n Epubs.create_toc()\r\n Epubs.add_toc('这是卷', '这是章节', '这是路径')\r\n Epubs.create_nav()\r\n Epubs.create_content_opf()\r\n", "id": "4736660", "language": "Python", "matching_score": 3.7744925022125244, "max_stars_count": 2, "path": "epub.py" }, { "content": "from epub_novel import epub\r\nfrom instance import *\r\nimport API\r\n\r\n\r\nclass EpubFile:\r\n def __init__(self, book_id, book_name, author_name):\r\n self.book_id = book_id\r\n self.book_name = book_name\r\n self.author_name = author_name\r\n self.epub = epub.EpubBook()\r\n self.EpubList = list()\r\n self.path = os.path.join\r\n self.epub.set_language('zh-CN')\r\n self.epub.set_identifier(book_id)\r\n self.epub.set_title(book_name)\r\n self.epub.add_author(author_name)\r\n\r\n def add_intro(self, author_name, up_time, up_chapter, intro, novel_tag):\r\n intro_config = epub.EpubHtml(title='简介信息', file_name='0000-000000-intro.xhtml', lang='zh-CN')\r\n intro_html = \"\"\"<html><head></head><body>\\n<img src=\"./{}.png\" alt=\"书籍封面\"/>\\n<h1>简介</h1>\r\n \\n<p>书籍书名:{}</p>\\n<p>书籍序号:{}</p>\\n<p>书籍作者:{}</p>\\n<p>更新时间:{}</p>\r\n \\n<p>最新章节:{}</p>\\n<p>系统标签:{}</p>\\n<p>简介信息:</p>\\n{}</body></html> \"\"\"\r\n intro_config.content = intro_html.format(\r\n self.book_name, self.book_name, self.book_id, author_name, up_time, up_chapter, novel_tag, intro\r\n )\r\n self.epub.add_item(intro_config)\r\n self.EpubList.append(intro_config)\r\n self.epub.set_cover(self.book_name + '.png', API.Cover.download_cover())\r\n\r\n def add_chapter(self, chapter_title: str, content: str, serial_number: str):\r\n chapter_serial = epub.EpubHtml(\r\n title=chapter_title, file_name=str(serial_number).rjust(4, \"0\") + '.xhtml',\r\n lang='zh-CN', uid='chapter_{}'.format(serial_number)\r\n )\r\n\r\n chapter_serial.content = content.replace('\\n', '</p>\\r\\n<p>')\r\n self.epub.add_item(chapter_serial)\r\n self.EpubList.append(chapter_serial)\r\n\r\n def save(self):\r\n self.epub.toc = tuple(self.EpubList)\r\n self.epub.spine = ['nav']\r\n self.epub.spine.extend(self.EpubList)\r\n self.epub.add_item(epub.EpubNcx())\r\n self.epub.add_item(epub.EpubNav())\r\n\r\n epub.write_epub(self.path(Vars.cfg.data.get('save_book'), self.book_name, self.book_name + '.epub'), self.epub,\r\n {})\r\n", "id": "7300760", "language": "Python", "matching_score": 1.7962987422943115, "max_stars_count": 1, "path": "epub.py" }, { "content": "import threading\r\nimport API\r\nfrom instance import *\r\n\r\n\r\ndef output_chapter_content(chapter_content, chapter_title=\"\", intro=False):\r\n content = \"\"\r\n if intro is True:\r\n for line in chapter_content.splitlines():\r\n chapter_line = line.strip(\" \").strip()\r\n if chapter_line != \"\":\r\n content += \"\\n\" + chapter_line[:60]\r\n return content\r\n for line in chapter_content.splitlines():\r\n chapter_line = line.strip(\" \").strip()\r\n if chapter_line != \"\" and len(chapter_line) > 2:\r\n if \"http\" in chapter_line:\r\n continue\r\n content += \"\\n  {}\".format(chapter_line)\r\n return f\"{chapter_title}\\n\\n{content}\"\r\n\r\n\r\nclass Book:\r\n\r\n def __init__(self, book_info: dict, index=None):\r\n self.index = index\r\n self.progress_bar = 1\r\n self.config_json = []\r\n self.chapter_id_list = []\r\n self.thread_list = list()\r\n self.pool_sema = threading.BoundedSemaphore(Vars.cfg.data.get('max_threads'))\r\n self.book_name = book_info.get('title')\r\n self.book_id = book_info.get('_id')\r\n self.author_name = book_info.get('author')\r\n self.book_intro = book_info.get('longIntro')\r\n self.book_state = book_info.get('zt')\r\n self.book_tag = book_info.get('cat')\r\n self.word_count = book_info.get('wordCount')\r\n self.book_updated = book_info.get('updated')\r\n self.last_chapter = book_info.get('lastChapter')\r\n self.book_config = f\"{Vars.cfg.data.get('config_book')}/{self.book_name}\" + '.json'\r\n\r\n def show_book_info(self) -> str:\r\n show_info = '作者:{0:<{2}}状态:{1}\\n'.format(self.author_name, self.book_state, isCN(self.author_name))\r\n show_info += '标签:{0:<{2}}字数:{1}\\n'.format(self.book_tag, self.word_count, isCN(self.book_tag))\r\n show_info += '最新:{0:<{2}}更新:{1}\\n'.format(self.last_chapter, self.book_updated, isCN(self.last_chapter))\r\n print(show_info)\r\n if not os.path.exists(self.book_config):\r\n open(self.book_config, \"a\").write(\"[]\")\r\n self.config_json = json.loads(open(self.book_config, 'r', encoding='utf-8').read())\r\n return '{}简介:\\n{}'.format(show_info, output_chapter_content(self.book_intro, intro=True))\r\n\r\n def start_downloading_novels(self):\r\n save_dir = os.path.join(Vars.cfg.data.get('save_book'), self.book_name, f'{self.book_name}.txt')\r\n if self.last_chapter is not None:\r\n write(save_dir, 'w', self.show_book_info())\r\n chapter_list = self.get_chapter_url()\r\n if len(chapter_list) == 0:\r\n print(\"没有需要下载的章节!\")\r\n else:\r\n self.download_chapter_threading(len(chapter_list), chapter_list)\r\n print('\\n下载完成!')\r\n self.output_text_and_epub(save_dir)\r\n print(self.book_name, '本地档案合并完毕')\r\n\r\n def progress_count(self, length):\r\n print('{}/{} 进度:{:^3.0f}%'.format(self.progress_bar, length, (self.progress_bar / length) * 100), end='\\r')\r\n self.progress_bar += 1\r\n\r\n def thread_download_content(self, chapter_url, chapter_index, download_length):\r\n self.pool_sema.acquire()\r\n chapter_title, chapter_content = API.Chapter.download_chapter(chapter_url)\r\n content_config = {\r\n 'index': chapter_index,\r\n 'title': chapter_title,\r\n 'content': output_chapter_content(chapter_content, chapter_title),\r\n }\r\n self.config_json.append(content_config)\r\n if Vars.cfg.data.get('real_time_cache'):\r\n with open(self.book_config, 'w', encoding='utf-8') as f:\r\n json.dump(self.config_json, f, ensure_ascii=False)\r\n self.progress_count(download_length)\r\n self.pool_sema.release()\r\n\r\n def output_text_and_epub(self, save_dir):\r\n self.config_json = sorted(self.config_json, key=lambda list1: int(list1[\"index\"])) # 按照数字顺序排序文本\r\n for config_info in self.config_json: # 遍历文件名\r\n Vars.epub_info.add_chapter(config_info['title'], config_info['content'], config_info['index'])\r\n\r\n write(save_dir, 'a', ''.join([\"\\n\\n\\n\" + config_info['content'] for config_info in self.config_json]))\r\n Vars.epub_info.save(), self.config_json.clear(), self.chapter_id_list.clear()\r\n\r\n def get_chapter_url(self):\r\n response = API.Book.catalogue(self.book_id)\r\n if response is None:\r\n return self.chapter_id_list\r\n config_tests = [chapters.get('title') for chapters in self.config_json]\r\n if len(self.config_json) == 0:\r\n link_list = [chapters.get('link') for chapters in response]\r\n return link_list\r\n for index, info in enumerate(response):\r\n if info['title'] in config_tests and info.get('content') != \"\":\r\n continue\r\n self.chapter_id_list.append(info['link'])\r\n\r\n return self.chapter_id_list\r\n\r\n def download_chapter_threading(self, download_length, chapter_list):\r\n if download_length == 0:\r\n return download_length\r\n\r\n for index, chapter_url in enumerate(chapter_list):\r\n self.thread_list.append(threading.Thread(\r\n target=self.thread_download_content, args=(chapter_url, chapter_url.split('/')[1], download_length,)\r\n ))\r\n\r\n for thread in self.thread_list:\r\n thread.start()\r\n\r\n for thread in self.thread_list:\r\n thread.join()\r\n self.thread_list.clear()\r\n with open(self.book_config, 'w', encoding='utf-8') as f:\r\n json.dump(self.config_json, f, ensure_ascii=False)\r\n", "id": "7258019", "language": "Python", "matching_score": 2.803912878036499, "max_stars_count": 1, "path": "book.py" }, { "content": "import threading\r\nimport PySimpleGUI as sg\r\nimport API\r\nimport book\r\nimport epub\r\nfrom instance import *\r\n\r\nsetup_config()\r\n\r\n\r\ndef start_downloading_novels():\r\n book_name = Vars.book_info.book_name\r\n Vars.epub_info = epub.EpubFile(Vars.book_info.book_id, book_name, Vars.book_info.author_name)\r\n Vars.epub_info.add_intro(\r\n Vars.book_info.author_name, Vars.book_info.book_updated, Vars.book_info.last_chapter,\r\n Vars.book_info.book_intro, Vars.book_info.book_tag\r\n )\r\n Vars.book_info.start_downloading_novels()\r\n\r\n\r\ndef download_tag(tag_id):\r\n if not Msgs.msg_tag.get(tag_id):\r\n print(f\"{tag_id} 标签号不存在\\n\")\r\n for key, Value in Msgs.msg_tag.items():\r\n print('{}:\\t\\t\\t{}'.format(key, Value))\r\n return\r\n page = 0\r\n while True:\r\n tag_name = Msgs.msg_tag[tag_id]\r\n response = API.Tag.tag_info(tag_id, tag_name, page)\r\n if response is None: break\r\n for index, tag_info_data in enumerate(response, start=1):\r\n print(\"\\n\\n{}分类 第{}本\\n\".format(tag_name, index))\r\n Vars.book_info = API.Book.novel_info(tag_info_data['_id'])\r\n if Vars.book_info is not None and isinstance(Vars.book_info, dict):\r\n Vars.book_info = book.Book(Vars.book_info)\r\n print('开始下载{}'.format(Vars.book_info.book_name))\r\n start_downloading_novels()\r\n else:\r\n print(\"获取失败\")\r\n page += 20\r\n\r\n\r\ndef main():\r\n sg.theme('Default1')\r\n\r\n layout = [\r\n [sg.Menu([['选项', ['使用帮助', '免责声明']]])],\r\n [sg.Text('book id'), sg.InputText(key='-BID-', size=(25, 1))],\r\n [sg.Text('tags id'), sg.InputText(key='-TID-', size=(25, 1))],\r\n [sg.B('download', key=\"_DOWNLOAD_\"), sg.B('tag', key=\"_TAG_\"), sg.Button('exit')]]\r\n\r\n window = sg.Window('Window Title', layout, finalize=True)\r\n while True: # Event Loop\r\n event, values = window.read()\r\n print(event, values)\r\n if event == sg.WIN_CLOSED or event == 'Exit':\r\n break\r\n elif event == '使用帮助':\r\n text = '使用帮助:\\n'\r\n sg.popup_scrolled(text, title='使用帮助')\r\n\r\n elif event == '免责声明':\r\n text = '本项目提供用于个人学习、研究或欣赏。通过使用项目随之而来的风险与作者无关'\r\n sg.popup(text, title='免责声明')\r\n\r\n elif event == \"_DOWNLOAD_\":\r\n print(values['-BID-'])\r\n if values['-BID-'] != \"\":\r\n Vars.book_info = API.Book.novel_info(values['-BID-'])\r\n if Vars.book_info is not None and isinstance(Vars.book_info, dict):\r\n Vars.book_info = book.Book(Vars.book_info)\r\n sg.popup_cancel('开始下载{}'.format(Vars.book_info.book_name))\r\n threading.Thread(target=start_downloading_novels).start()\r\n else:\r\n print(\"获取失败\")\r\n else:\r\n sg.popup_ok('获取书籍信息失败!', title='提醒')\r\n elif event == '_TAG_':\r\n if values['-TID-'] != \"\":\r\n threading.Thread(target=download_tag, args=(values['-TID-'],)).start()\r\n else:\r\n print(\"没有输入标签序号\")\r\n window.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "id": "5065286", "language": "Python", "matching_score": 3.337850570678711, "max_stars_count": 1, "path": "gui.py" }, { "content": "import API\r\nimport book\r\nimport epub\r\nfrom instance import *\r\n\r\n\r\ndef agreed_read_readme():\r\n if Vars.cfg.data.get('agreed_to_readme') != 'yes':\r\n print(Msgs.msg_agree_terms)\r\n confirm = inputs_('>').strip()\r\n if confirm == 'yes' or confirm == '同意':\r\n Vars.cfg.data['agreed_to_readme'] = 'yes'\r\n Vars.cfg.save()\r\n else:\r\n sys.exit()\r\n\r\n\r\ndef shell_book(inputs): # 通过小说ID下载单本小说\r\n if len(inputs) >= 2:\r\n Vars.book_info = API.Book.novel_info(inputs[1])\r\n if Vars.book_info is not None and isinstance(Vars.book_info, dict):\r\n Vars.book_info = book.Book(Vars.book_info)\r\n book_name = Vars.book_info.book_name\r\n Vars.epub_info = epub.EpubFile(Vars.book_info.book_id, book_name, Vars.book_info.author_name)\r\n Vars.epub_info.add_intro(\r\n Vars.book_info.author_name, Vars.book_info.book_updated, Vars.book_info.last_chapter,\r\n Vars.book_info.book_intro, Vars.book_info.book_tag\r\n )\r\n print(\"开始下载《{}》\".format(book_name))\r\n makedirs(Vars.cfg.data.get('save_book') + \"/\" + book_name)\r\n Vars.book_info.start_downloading_novels()\r\n else:\r\n print(\"获取书籍信息失败,请检查id或者重新尝试!\")\r\n else:\r\n print('未输入Book id')\r\n\r\n\r\ndef shell_search_book(inputs):\r\n if len(inputs) >= 2:\r\n start = time.time()\r\n response = API.Book.search_book(inputs[1])\r\n for index, books in enumerate(response):\r\n shell_book([index, books.get('_id')])\r\n print(f'下载耗时:{round(time.time() - start, 2)} 秒')\r\n else:\r\n print('未输入书名')\r\n\r\n\r\ndef get_pool(inputs):\r\n if len(inputs) >= 2:\r\n if inputs[1].isdigit():\r\n Vars.cfg.data['Thread_Pool'] = int(inputs[1])\r\n Vars.cfg.save(), print(\"线程已设置为\", Vars.cfg.data.get('Thread_Pool'))\r\n return\r\n print(\"设置失败,输入信息不是数字\")\r\n else:\r\n print(\"默认线程为\", Vars.cfg.data.get('Thread_Pool'))\r\n\r\n\r\ndef shell_tag(inputs):\r\n if len(inputs) >= 2 and inputs[1].isdigit():\r\n tag_id = int(inputs[1])\r\n if Msgs.msg_tag.get(tag_id) is None:\r\n print(f\"{tag_id} 标签号不存在\\n\")\r\n for key, Value in Msgs.msg_tag.items():\r\n print('{}:\\t\\t\\t{}'.format(key, Value))\r\n return\r\n page = 0\r\n while True:\r\n tag_name = Msgs.msg_tag[tag_id]\r\n response = API.Tag.tag_info(inputs[1], tag_name, page)\r\n if response is None: break\r\n for index, tag_info_data in enumerate(response, start=1):\r\n print(\"\\n\\n{}分类 第{}本\\n\".format(tag_name, index))\r\n shell_book([index, tag_info_data.get('_id')])\r\n page += 20\r\n else:\r\n print(API.Tag.get_type())\r\n\r\n\r\ndef shell_ranking(inputs):\r\n if len(inputs) >= 2:\r\n novel_list = []\r\n for data in API.Tag.ranking(inputs[1])['ranking']['books']:\r\n for key, Value in data.items():\r\n if key == 'title':\r\n print('\\n\\n{}:\\t\\t\\t{}'.format(key, Value))\r\n continue\r\n book_info = '{}:\\t\\t\\t{}'.format(key, Value) if len(\r\n key) <= 6 else '{}:\\t\\t{}'.format(key, Value)\r\n print(book_info)\r\n novel_list.append(data.get('_id'))\r\n for index, novel_id in enumerate(novel_list):\r\n shell_book([index, novel_id])\r\n\r\n else:\r\n ranking_dict = {'周榜': '1', '月榜': '2', '总榜': '3'}\r\n for key, Value in ranking_dict.items():\r\n print('{}:\\t\\t\\t{}'.format(key, Value))\r\n\r\n\r\ndef shell_list(inputs):\r\n start = time.time()\r\n list_file_name = inputs[1] + '.txt' if len(inputs) >= 2 else 'list.txt'\r\n try:\r\n list_file_input = open(list_file_name, 'r', encoding='utf-8')\r\n book_list = [line for line in list_file_input.readlines() if re.match(\"^\\\\s*([0-9]{1,7}).*$\", line)]\r\n for book_id in book_list:\r\n shell_book(['', re.sub(\"^\\\\s*([0-9]{1,7}).*$\\\\n?\", \"\\\\1\", book_id)])\r\n print(f'下载耗时:{round(time.time() - start, 2)} 秒')\r\n except OSError:\r\n print(f\"{list_file_name}文件不存在\")\r\n\r\n\r\ndef shell():\r\n if len(sys.argv) > 1:\r\n command_line, inputs = True, sys.argv[1:]\r\n else:\r\n print('\\n'.join(Msgs.msg_help))\r\n command_line, inputs = False, re.split('\\\\s+', inputs_('>').strip())\r\n while True:\r\n if inputs[0].startswith('q') or inputs[0] == '--quit':\r\n sys.exit(\"已退出程序\")\r\n if inputs[0] == 'h' or inputs[0] == '--help':\r\n print('\\n'.join(Msgs.msg_help))\r\n elif inputs[0] == 't' or inputs[0] == '--tag':\r\n shell_tag(inputs)\r\n elif inputs[0] == 'd' or inputs[0] == '--download':\r\n shell_book(inputs)\r\n elif inputs[0] == 'n' or inputs[0] == '--name':\r\n shell_search_book(inputs)\r\n elif inputs[0] == 'r' or inputs[0] == '--rank':\r\n shell_ranking(inputs)\r\n elif inputs[0] == 'u' or inputs[0] == '--update':\r\n shell_list(inputs)\r\n elif inputs[0] == 'p' or inputs[0] == '--pool':\r\n get_pool(inputs)\r\n else:\r\n print(inputs[0], '不是有效命令')\r\n if command_line is True:\r\n sys.exit(1)\r\n inputs = re.split('\\\\s+', inputs_('>').strip())\r\n\r\n\r\nif __name__ == '__main__':\r\n setup_config()\r\n agreed_read_readme()\r\n shell()\r\n", "id": "4083223", "language": "Python", "matching_score": 1.9465245008468628, "max_stars_count": 1, "path": "run.py" }, { "content": "import os\r\nimport re\r\nimport time\r\nfrom rich import print\r\nimport yaml\r\n\r\n\r\nclass Msg:\r\n msg_help = [\r\n \"输入首字母\",\r\n \"h | help\\t\\t\\t\\t\\t\\t--- 显示说明\",\r\n \"q | quit\\t\\t\\t\\t\\t\\t--- 退出正在运作的程序\",\r\n \"d | picture\\t\\t\\t\\t\\t\\t--- 输入id或url下载插画\",\r\n \"t | recommend\\t\\t\\t\\t\\t--- 下载pixiv推荐插画\",\r\n \"s | start\\t\\t\\t\\t\\t\\t--- 下载账号收藏插画\",\r\n \"r | rank\\t\\t\\t\\t\\t\\t--- 下载排行榜作品\",\r\n \"n | tag name\\t\\t\\t\\t\\t--- 输入插画名或者表情名\",\r\n \"u | read text pid\\t\\t\\t\\t\\t--- 读取本地文本里的pid批量下载\",\r\n \"f | follow\\t\\t\\t\\t\\t\\t--- 下载关注的画师作品\",\r\n ]\r\n\r\n\r\nclass YamlData:\r\n def __init__(self, file_path=None, file_dir=None):\r\n if file_dir is not None:\r\n self.file_dir = os.path.join(os.getcwd(), file_dir)\r\n if not os.path.exists(self.file_dir):\r\n try:\r\n os.mkdir(self.file_dir)\r\n except (FileExistsError, OSError) as err:\r\n print(err)\r\n self.file_path = os.path.join(os.getcwd(), file_path)\r\n self.data = {}\r\n\r\n def load(self):\r\n try:\r\n with open(file=self.file_path, mode=\"r\", encoding='utf-8') as f:\r\n self.data = yaml.load(f, Loader=yaml.FullLoader)\r\n if self.data is None:\r\n self.data = {}\r\n except FileNotFoundError:\r\n with open(self.file_path, 'w', encoding='utf-8'):\r\n self.data = {}\r\n\r\n def save(self):\r\n with open(file=self.file_path, mode=\"w\", encoding='utf-8') as f:\r\n yaml.safe_dump(self.data, f, default_flow_style=False, allow_unicode=True)\r\n\r\n\r\ndef write_file(file_dir: str, m: str, content: str = \"\"):\r\n if m == \"r\":\r\n return open(file_dir, \"r\", encoding='utf-8').read()\r\n with open(file_dir, m, encoding='utf-8', newline=\"\") as f:\r\n f.write(content)\r\n\r\n\r\nclass Vars:\r\n cfg = YamlData('pixiv-config.yaml')\r\n images_info = None\r\n complex_images_info = list()\r\n images_info_list = list()\r\n\r\n\r\ndef count_time(func: callable) -> callable:\r\n def wrapper(*arg, **kwargs):\r\n start_time = time.time()\r\n result = func(*arg, **kwargs)\r\n print(f\"下载耗时:{time.time() - start_time:.2f}s\")\r\n return result\r\n\r\n return wrapper\r\n\r\n\r\ndef remove_str(content: str):\r\n res_compile = re.compile(u'[\\U00010000-\\U0010ffff\\\\uD800-\\\\uDBFF\\\\uDC00-\\\\uDFFF]')\r\n return res_compile.sub(\"\", re.sub('[/:*?\"<>|x08]', '-', content))\r\n\r\n\r\ndef rec_id(book_id: str):\r\n book_id = book_id if 'http' not in book_id else re.findall(r'/(\\d+)/?', book_id)[0]\r\n return str(book_id) if book_id.isdigit() else f'输入信息 {book_id} 不是数字或链接!'\r\n\r\n\r\ndef index_title(division_index: int, image_name: str):\r\n return str(division_index).rjust(4, \"0\") + '-' + str(image_name)\r\n\r\n\r\ndef input_str(prompt, default=None):\r\n while True:\r\n ret = input(prompt)\r\n if ret != '':\r\n return ret\r\n elif default is not None:\r\n return default\r\n\r\n\r\ndef input_int(prompt: str, max_number: int = None):\r\n while True:\r\n ret = input(prompt)\r\n if ret.isdigit():\r\n if max_number is None:\r\n return int(ret)\r\n if max_number is not None and int(ret) < max_number:\r\n return int(ret)\r\n else:\r\n print(f\"输入数字 {ret} 需要小于索引 {max_number} \")\r\n continue\r\n else:\r\n if ret.strip() != '':\r\n print(f\"输入的内容 {ret} 不是数字,请重新输入\")\r\n\r\n\r\ndef set_config():\r\n Vars.cfg.load()\r\n config_change = False\r\n if type(Vars.cfg.data.get('max_thread')) is not int:\r\n Vars.cfg.data['max_thread'] = 5\r\n config_change = True\r\n\r\n if Vars.cfg.data.get('save_file') is not str:\r\n Vars.cfg.data['save_file'] = 'image_file'\r\n config_change = True\r\n\r\n if Vars.cfg.data.get('out_file') is not str:\r\n Vars.cfg.data['out_file'] = 'downloaded'\r\n config_change = True\r\n\r\n if type(Vars.cfg.data.get('save_type')) is not bool:\r\n Vars.cfg.data['save_type'] = False\r\n config_change = True\r\n\r\n if type(Vars.cfg.data.get('access_token')) is not str:\r\n Vars.cfg.data['access_token'] = \"\"\r\n config_change = True\r\n\r\n if type(Vars.cfg.data.get('refresh_token')) is not str:\r\n Vars.cfg.data['refresh_token'] = \"\"\r\n config_change = True\r\n\r\n if type(Vars.cfg.data.get('max_retry')) is not int:\r\n Vars.cfg.data['max_retry'] = 5 # retry times when download failed\r\n config_change = True\r\n\r\n if not isinstance(Vars.cfg.data.get('file_name_config'), dict):\r\n Vars.cfg.data['file_name_config'] = {'image_id': True, 'author': 'author'}\r\n config_change = True\r\n\r\n if not isinstance(Vars.cfg.data.get('user_info'), dict):\r\n Vars.cfg.data['user_info'] = {} # save user info to config file\r\n config_change = True\r\n\r\n if config_change: # if config change, save it to file and reload.\r\n Vars.cfg.save()\r\n\r\n if not os.path.exists(Vars.cfg.data.get('save_file')):\r\n os.mkdir(Vars.cfg.data.get('save_file'))\r\n", "id": "12785098", "language": "Python", "matching_score": 3.2957231998443604, "max_stars_count": 1, "path": "instance.py" }, { "content": "from PixivAPI import HttpUtil\nfrom instance import *\nimport PixivAPI\n\n\nclass ImageInfo:\n def __init__(self, result_info: dict):\n self.image_id = str(result_info[\"id\"])\n self.author_id = str(result_info['user'][\"id\"])\n self.author_name = remove_str(str(result_info['user'][\"name\"]))\n self.page_count = result_info['page_count']\n self.image_name = remove_str(result_info['title'])\n self.create_date = result_info['create_date']\n self.tag_name = ' '.join([data[\"name\"] for data in result_info['tags'] if data[\"name\"]])\n self.original_url = result_info.get('meta_single_page', {}).get('original_image_url')\n self.original_url_list = [url['image_urls'][\"original\"] for url in result_info.get('meta_pages')]\n\n def show_images_information(self, thread_status: bool = False):\n if not thread_status:\n print(\"插画名称: {}:\".format(self.image_name))\n print(\"插画序号: {}\".format(self.image_id))\n print(\"作者名称: {}\".format(self.author_name))\n print(\"作者序号: {}\".format(self.author_id))\n print(\"插画标签: {}\".format(self.tag_name))\n print(\"画集数量: {}\".format(self.page_count))\n if self.page_count == 1:\n print(\"插画地址:{}\".format(re.sub(r\"pximg.net\", \"pixiv.cat\", self.original_url)))\n else:\n for index, original_url in enumerate(self.original_url_list, start=1):\n print(\"画集{}:{}\".format(index, re.sub(r\"pximg.net\", \"pixiv.cat\", original_url)))\n print(\"发布时间: {}\\n\".format(self.create_date))\n\n def save_file(self, image_name: str, image_url: str):\n if Vars.cfg.data.get('save_type'):\n out_dir = os.path.join(Vars.cfg.data.get(\"save_file\"), self.author_name, self.image_name)\n else:\n out_dir = os.path.join(Vars.cfg.data.get(\"save_file\"), self.author_name)\n YamlData(\"\", out_dir)\n if not os.path.exists(os.path.join(out_dir, f'{image_name}.png')):\n with open(os.path.join(out_dir, f'{image_name}.png'), 'wb+') as file:\n file.write(PixivAPI.get(api_url=image_url, head=\"png\", types=\"content\"))\n\n # file.write(HttpUtil.get_api(api_url=image_url, return_type=\"content\"))\n\n def save_image(self, image_url_list):\n if isinstance(image_url_list, list):\n for index, url in enumerate(image_url_list, start=1):\n if Vars.cfg.data.get(\"file_name_config\").get(\"image_id\"):\n file_name = self.image_id + \"-\" + str(index).rjust(4, \"0\") + '-' + self.image_name\n else:\n file_name = self.author_id + \"-\" + index_title(index, self.image_name)\n self.save_file(file_name, url)\n else:\n if Vars.cfg.data.get(\"file_name_config\").get(\"image_id\"):\n file_name = self.image_id + \"-\" + self.image_name\n else:\n file_name = self.author_id + \"-\" + self.image_name\n self.save_file(file_name, image_url_list)\n", "id": "4622216", "language": "Python", "matching_score": 1.4858942031860352, "max_stars_count": 1, "path": "Image.py" }, { "content": "from API import HttpUtil, UrlConstants\nfrom API.AesDecrypt import decrypt, example\nfrom instance import *\nimport threading\n# from rich.progress import DownloadColumn, TextColumn, Progress, BarColumn, TimeRemainingColumn\n\n\nclass Download:\n def __init__(self):\n self.bookid = ''\n self.bookName = \"\"\n self.progress_num = 0\n self.save_dir = Vars.cfg.data.get('save_dir')\n self.output_dir = Vars.cfg.data.get('output_dir')\n self.max_worker = Vars.cfg.data.get('max_workers_number')\n\n def filedir(self):\n meragefiledir = os.path.join(self.save_dir, self.bookName)\n file_names_list = os.listdir(meragefiledir)\n file_names_list.sort(key=lambda x: int(x.split('.')[0]))\n write(os.path.join(Vars.cfg.data.get('output_dir'),f'{self.bookName}.txt'), 'w')\n for filename in file_names_list: \n config_file = open(os.path.join(meragefiledir, filename), 'r', encoding='utf-8').read()\n if Vars.cfg.data.get('shield') not in config_file:\n write(os.path.join(Vars.cfg.data.get('output_dir'),\n f'{self.bookName}.txt'), 'a', config_file)\n else:\n continue\n\n def ThreadPool(self, chapters_url_list, info_dict):\n self.bookid = info_dict.get('book_id')\n self.bookName = info_dict.get('book_title')\n self.novel_intro = info_dict.get('book_desc')\n self.authorName = info_dict.get('book_author')\n self.chapter_list = info_dict.get('chapter_list')\n self.lastUpdateTime = info_dict.get('update_time')\n self.book_type = info_dict.get('book_type')\n self.isFinish = info_dict.get('book_status')\n \"\"\"多线程并发实现\"\"\"\n\n # 创建 rich 进度条\n lock_tasks_list = threading.Lock()\n lock_progress = threading.Lock()\n tasks = []\n threads = []\n # progress = Progress(\n # # TextColumn(\"[bold blue]{task.fields[filename]}\", justify=\"right\"),\n # BarColumn(bar_width=None),\n # \"[progress.percentage]{task.percentage:>3.1f}%\",\n # \"•\",\n # DownloadColumn(),\n # # \"•\",\n # # TransferSpeedColumn(),\n # \"•\",\n # TimeRemainingColumn(),\n # )\n\n # 生成下载队列.\n for number, book_url in enumerate(chapters_url_list):\n tasks.append(\n (UrlConstants.WEB_SITE + book_url, number)\n )\n\n # prgtask = progress.add_task(\n # \"Download\", total=len(tasks)\n # )\n self.chapters_url_list = chapters_url_list\n # print(self.chapters_url_list)\n\n def downloader():\n \"\"\"多线程下载函数\"\"\"\n nonlocal lock_tasks_list, lock_progress, tasks # progress, prgtask\n \n while tasks:\n lock_tasks_list.acquire()\n url, number = tasks.pop()\n lock_tasks_list.release()\n self.progress_num += 1\n print(\n f'下载进度:{self.progress_num}/{len(self.chapters_url_list)}', end=\"\\r\")\n\n book_title = del_title(self.chapter_list[number-1])\n # print(book_title)\n fd = write(\n os.path.join(self.save_dir, self.bookName,f\"{number}.{book_title}.txt\"),\n 'w',\n )\n content = content_(HttpUtil.get(url).get('data'))\n # print(content)\n fd.write('\\n\\n\\n{}\\n{}'.format(book_title, content))\n\n lock_progress.acquire()\n # progress.update(prgtask, advance=1)\n lock_progress.release()\n\n for _ in range(self.max_worker):\n th = threading.Thread(target=downloader)\n threads.append(th)\n th.start()\n\n # wait downloader\n for th in threads:\n th.join()\n\n self.filedir()\n print(f'\\n小说 {self.bookName} 下载完成\\n\\n')\n", "id": "6763653", "language": "Python", "matching_score": 4.767911434173584, "max_stars_count": 2, "path": "API/LaoMaoxsAPI.py" }, { "content": "from instance import *\r\nfrom API.LaoMaoxsAPI import Download\r\nfrom API import UrlConstants\r\n\r\n\r\nclass BOOK:\r\n bookid = None\r\n bookName = None\r\n novel_intro = None\r\n authorName = None\r\n chapter_list = None\r\n book_type = None\r\n isFinish = None\r\n\r\n def __init__(self, BOOK_INFO):\r\n self.book_info = BOOK_INFO\r\n self.book_info_msg = BOOK_INFO.get('msg')\r\n self.book_info_code = BOOK_INFO.get('code')\r\n self.book_info_data = self.book_info.get('data')\r\n self.save_dir = Vars.cfg.data.get('save_dir')\r\n self.output_dir = Vars.cfg.data.get('output_dir')\r\n \r\n\r\n def get_book_info(self):\r\n if self.book_info_msg == 'ok':\r\n self.bookid = self.book_info_data.get('book_id')\r\n self.bookName = self.book_info_data.get('book_title')\r\n self.book_type = self.book_info_data.get('book_type')\r\n self.isFinish = self.book_info_data.get('book_status')\r\n self.novel_intro = self.book_info_data.get('book_desc')\r\n self.authorName = self.book_info_data.get('book_author')\r\n self.chapter_list = self.book_info_data.get('chapter_list')\r\n self.lastUpdateTime = time_(self.book_info_data.get('update_time'))\r\n return 200\r\n else:\r\n return 404\r\n\r\n def book_show(self):\r\n if self.get_book_info() == 200:\r\n \"\"\"创建配置 output_dir 和 创建config 文件夹 \"\"\" \r\n mkdir(self.output_dir); mkdir(self.save_dir)\r\n \"\"\"创建config/bookname/ 文件夹 \"\"\"\r\n makedirs(self.save_dir, self.bookName)\r\n \"\"\"打印书名信息\"\"\"\r\n show_intro = \"书名:{}\\n序号:{}\\n作者:{}\\n分类:{}\\n更新:{}\".format(\r\n self.bookName, self.bookid, self.authorName,\r\n self.book_type, self.lastUpdateTime)\r\n print(show_intro)\r\n \r\n show_intro += \"\\n简介信息:{}\\n\".format(content_(self.novel_intro))\r\n \"\"\"保存书籍详细到 config/bookname/0.intro.txt\"\"\"\r\n write(os.path.join(self.save_dir, self.bookName, '0.intro.txt'), 'w', show_intro)\r\n\r\n \"\"\"执行下载任务!\"\"\"\r\n chapter_list = self.chapters()\r\n if chapter_list == 'null':\r\n print(\"没有需要下载的章节\\n\\n\")\r\n else:\r\n print(f'开始下载 {self.bookName} ,剩余{len(chapter_list)}章')\r\n Download().ThreadPool(self.chapters(), self.book_info_data)\r\n elif self.get_book_info() == 404:\r\n print(self.book_info_msg)\r\n\r\n def chapters(self):\r\n chapter_list = list()\r\n config_bookname = os.listdir(os.path.join(self.save_dir, self.bookName))\r\n for chapter_id_num, chapter_id in enumerate(range(len(self.chapter_list))):\r\n \"\"\"跳过已经下载的章节\"\"\"\r\n chapter_title = self.chapter_list[chapter_id_num]\r\n if del_title(chapter_title) in ''.join(config_bookname):\r\n continue\r\n url_num = int(int(self.bookid)/1000) # 书本编号等于bookid÷1000\r\n \r\n chapter_url = UrlConstants.CONTENT.format(url_num, self.bookid, chapter_id)\r\n chapter_list.append(chapter_url)\r\n \r\n if len(chapter_list) == 0:\r\n return 'null'\r\n return chapter_list\r\n\r\n # 单线程\r\n # for chapter_id_num, chapter_id in enumerate(track(range(len(self.chapter_list)))):\r\n # url_num = int(int(self.bookid)/1000) # 书本编号等于bookid÷1000\r\n # book_title = self.chapter_list[chapter_id_num]\r\n # \"\"\"跳过已经下载的章节\"\"\"\r\n # if self.chapter_list[chapter_id_num] in ''.join(self.config_bookname()):\r\n # print(self.chapter_list[chapter_id_num], '已经下载过')\r\n # continue\r\n # url = self.chapterurl.format(url_num, self.bookid, chapter_id)\r\n # content = self.getUtil(url)['data']\r\n # \"\"\"跳过屏蔽章节\"\"\"\r\n # if \"\\\\n\\\\n 编辑正在手打中,稍后点击右上角刷新当前章节!\" not in content:\r\n # print(book_title)\r\n # content_title = \"\\n\\n{}\\n{}\".format(book_title, content_(content))\r\n # self.write_txt(content_title, book_title, chapter_id_num)\r\n # else:\r\n # print(f\"{self.chapter_list[chapter_id_num]}这是屏蔽章节,跳过下载\")\r\n\r\n # with open(os.path.join(\"Download\", self.bookName + '.txt'), 'w', encoding='utf-8') as f:\r\n # self.filedir()\r\n # print(f'\\n小说 {self.bookName} 下载完成')\r\n", "id": "5721192", "language": "Python", "matching_score": 2.9294862747192383, "max_stars_count": 2, "path": "book.py" }, { "content": "from API import HttpUtil, UrlConstants\r\nfrom book import BOOK\r\nfrom instance import *\r\n\r\n\r\nclass Categorys(object):\r\n def __init__(self, Categor_url):\r\n self.Categor_info = HttpUtil.get(Categor_url)\r\n self.Categor_code = self.Categor_info.get('code')\r\n self.Categor_msg = self.Categor_info.get('msg')\r\n self.Categor_data = self.Categor_info.get('data')\r\n\r\n def test(self):\r\n if self.Categor_info.get('code') == 1:\r\n if self.Categor_info != []:\r\n # print(self.Categor_data)\r\n return 200\r\n else:\r\n return 0\r\n else:\r\n print(self.Categor_msg, '获取失败')\r\n return 404\r\n\r\n def Category_download(self):\r\n for Categor_data_info in self.Categor_data:\r\n book_id = Categor_data_info.get('book_id')\r\n book_status = Categor_data_info.get('book_status')\r\n book_type = Categor_data_info.get('book_type')\r\n book_desc = Categor_data_info.get('book_desc')\r\n chapter_count = Categor_data_info.get('chapter_count')\r\n book_hits = Categor_data_info.get('book_hits')\r\n BOOK(HttpUtil.get(UrlConstants.BOOK_INDEX.format(book_id))).book_show()\r\n\r\n # def class_list(self):\r\n # class_list_bookid = []\r\n # for Categor_url in self.Categor_url_list:\r\n # if not HttpUtil.get(Categor_url).get('data'):\r\n # print('排行榜已经下载完毕')\r\n # break\r\n # for data in HttpUtil.get(Categor_url)['data']:\r\n # self.bookName = data['book_title']\r\n # bookid = str(data['book_id'])\r\n # print(self.bookName)\r\n # class_list_bookid.append(bookid)\r\n # print(class_list_bookid[-1])\r\n # return class_list_bookid\r\n", "id": "10031404", "language": "Python", "matching_score": 3.8144211769104004, "max_stars_count": 2, "path": "function/Category.py" }, { "content": "\nfrom instance import *\nfrom API import HttpUtil, UrlConstants\nimport book\n\n\nclass SearchBooks:\n\n def __init__(self, url):\n self.search_url = url\n self.search_info = HttpUtil.get(self.search_url)\n self.search_info_msg = self.search_info.get('msg')\n self.search_info_code = self.search_info.get('code')\n self.search_info_data = self.search_info.get('data')\n self.book_id_list = list()\n\n def test_data_list(self):\n if self.search_info_msg == 'OK':\n if self.search_info_data != []:\n print(self.search_info)\n return 200\n else:\n return 0\n else:\n return 404\n\n def get_seach_info(self):\n for info_data in self.search_info_data:\n book_id = info_data.get('book_id')\n book_status = info_data.get('book_status')\n book_type = info_data.get('book_type')\n book_desc = info_data.get('book_desc')\n chapter_count = info_data.get('chapter_count')\n book_hits = info_data.get('book_hits')\n self.book_id_list.append(book_id)\n book.BOOK(HttpUtil.get(UrlConstants.BOOK_INDEX.format(book_id))\n ).book_show()\n\n # if not self.search_info_data:\n # return\n\n # for url in book_info_url_list:\n # if not HttpUtil.get(url)['data']:\n # break\n # \"\"\"存储bookid进列表中\"\"\"\n # search_book = [data['book_id']\n # for data in HttpUtil.get(url)['data']]\n # return search_book\n", "id": "8586273", "language": "Python", "matching_score": 1.7857517004013062, "max_stars_count": 2, "path": "function/Search.py" }, { "content": "import fire\nimport book\nfrom instance import *\nfrom function import Search, userlogin, Category\nfrom API import LaoMaoxsAPI, Settings, HttpUtil, UrlConstants\n\nclass Shell(object):\n def bookid(self, bookid=None):\n if bookid is None:\n bookid = get('请输入Bookid:').strip()\n if str(bookid).isdigit():\n book_info_url = UrlConstants.BOOK_INDEX.format(bookid)\n book.BOOK(HttpUtil.get(book_info_url)).book_show()\n else:\n print('输入内容不是数字')\n\n\n def login(self, usernames=None, passwords=None):\n user_setting = False\n # if usernames != None and passwords != None:\n # if len(str(usernames)) <= 6:\n # print(\"账号不能小于6位!\")\n # usernames, user_setting = None, False\n # if len(str(passwords)) <= 6:\n # print(\"密码不能小于6位!\")\n # passwords. user_setting = None, False\n # else:\n # user_setting = True\n if usernames is None or len(str(usernames)) <= 6:\n usernames = get('请输入usernames:').strip()\n while len(str(usernames)) <= 6:\n print(\"账号不能小于6位!\")\n usernames = get('请输入usernames:').strip()\n if passwords is None or len(str(passwords)) <= 6:\n passwords = get('请输入passwords:').strip()\n while len(str(passwords)) <= 6:\n print(\"密码不能小于6位!\")\n passwords = get('请输入passwords:').strip()\n else:\n userlogin.Login(usernames, passwords).Login_account()\n userlogin.Login(usernames, passwords).Login_account()\n\n def max(self, max_num=None):\n if max_num is None:\n max_num = get('请输入线程数目:').strip()\n if str(max_num).isdigit():\n max_workers_number = 12 if int(max_num) > 12 else int(max_num)\n Vars.cfg.data['max_workers_number'] = max_workers_number\n print(\"线程已经设置为\", Vars.cfg.data.get('max_workers_number'))\n Vars.cfg.save()\n else:\n print(max_num, \"不是数字!\")\n\n \n\n def name(self, bookName=None):\n if bookName is None:\n bookName = get('请输入bookName:').strip()\n search_result_url_list = [\n UrlConstants.SERCH_BOOK.format(bookName, i) for i in range(20)]\n \n for list_num, url in enumerate(search_result_url_list):\n Search_ = Search.SearchBooks(url)\n if Search_.test_data_list() == 200:\n print(f'开始下载第{list_num}页')\n Search_.get_seach_info()\n \n elif Search_.test_data_list() == 0:\n print('已下载完所有搜索的书籍')\n break\n elif Search_.test_data_list() == 404:\n print('搜结果不存在这本书!')\n # search_bookid_list = Download.SearchBook(bookName)\n # else:\n # search_bookid_list = Download.SearchBook(bookName)\n # for bookid in search_bookid_list:\n # book_info_url = UrlConstants.BOOK_INDEX.format(bookid)\n # book.BOOK(HttpUtil.get(book_info_url)).book_show()\n\n\n def tag(self, Categor_num=None):\n if Categor_num is None:\n Categor_num = get('请输入tag:').strip()\n \n Categor_url_list = [UrlConstants.CATEGOR_URL.format(\n i, Categor_num) for i in range(10000)]\n for list_num, Categor_url in enumerate(Categor_url_list):\n Category_ = Category.Categorys(Categor_url)\n if Category_.test() == 200:\n print(f'开始下载第{list_num}页')\n Category.Categorys(Categor_url).Category_download()\n elif Category_.test() == 0:\n print('分类已经下载完毕')\n return\n elif Category_.test() == 404:\n print('获取失败')\n\n\n def rank(self):\n for bookid in Download.ranking():\n book_info_url = UrlConstants.BOOK_INDEX.format(bookid)\n book.BOOK(HttpUtil.get(book_info_url)).book_show()\n\n\n def help(self):\n print(Vars.cfg.data.get('help'))\n\n\nif __name__ == '__main__':\n Vars.cfg.load()\n Settings.setup_config()\n Download = LaoMaoxsAPI.Download()\n fire.Fire(Shell)\n", "id": "10914658", "language": "Python", "matching_score": 4.911128520965576, "max_stars_count": 2, "path": "run.py" }, { "content": "\nfrom instance import *\nfrom API import LaoMaoxsAPI, Settings, HttpUtil, userlogin, UrlConstants\n\n\nclass SearchBooks:\n\n\n def SearchBook(self, bookname):\n urls = [UrlConstants.SERCH_BOOK.format(\n bookname, i) for i in range(100)]\n \n # print(url)\n for url in urls:\n if not HttpUtil.get(url)['data']:\n break\n \"\"\"存储bookid进列表中\"\"\"\n search_book = [data['book_id']\n for data in HttpUtil.get(url)['data']]\n return search_book\n", "id": "7481611", "language": "Python", "matching_score": 0.4016379117965698, "max_stars_count": 2, "path": "API/Search.py" }, { "content": "from rich import print\r\nimport requests\r\nimport sys\r\n\r\nmax_retry = 10\r\nheaders = {\r\n 'User-Agent': 'boluobao/4.5.52(iOS;14.0)/appStore',\r\n 'Host': 'api.sfacg.com',\r\n 'Authorization': 'Basic <KEY>\r\n}\r\n\r\n\r\ndef get(api_url):\r\n api_url = 'https://api.sfacg.com/' + api_url.replace('https://api.sfacg.com/', '')\r\n for i in range(max_retry):\r\n try:\r\n return requests.get(api_url, headers=headers).json()\r\n except (OSError, TimeoutError, IOError) as error:\r\n print(\"\\nGet Error Retry: \" + api_url)\r\n\r\n\r\ndef get_dict_value(date, keys=None, default=None):\r\n if keys is None:\r\n print(date)\r\n else:\r\n keys_list = keys.split('.')\r\n if isinstance(date, dict):\r\n dictionary = dict(date)\r\n for i in keys_list:\r\n try:\r\n if dictionary.get(i) is not None:\r\n dict_values = dictionary.get(i)\r\n dictionary = dict_values\r\n elif dictionary.get(i) is None:\r\n dict_values = dictionary.get(int(i))\r\n dictionary = dict_values\r\n except:\r\n return default\r\n return dictionary\r\n else:\r\n try:\r\n dictionary = dict(eval(date))\r\n if isinstance(dictionary, dict):\r\n for i in keys_list:\r\n try:\r\n if dictionary.get(i) is not None:\r\n dict_values = dictionary.get(i)\r\n dictionary = dict_values\r\n # 如果键对应的值不为空,返回对应的值\r\n elif dictionary.get(i) is None:\r\n dict_values = dictionary.get(int(i))\r\n dictionary = dict_values\r\n # 如果键对应的值为空,将字符串型的键转换为整数型,返回对应的值\r\n except:\r\n return default\r\n # 如果字符串型的键转换整数型错误,返回None\r\n return dictionary\r\n except:\r\n return default\r\n\r\n\r\ninput_api = sys.argv[1:]\r\ntry:\r\n if len(sys.argv) >= 3:\r\n print(get_dict_value(get(input_api[0]), input_api[1]))\r\n else:\r\n print(get_dict_value(get(input_api[0])))\r\nexcept IndexError:\r\n print('python run.py url key')\r\n", "id": "8895710", "language": "Python", "matching_score": 2.45137619972229, "max_stars_count": 0, "path": "api.py" }, { "content": "import requests\nimport json\n\n\nclass Config:\n\n def __init__(self, file):\n self.file = file\n\n def save(self, data):\n with open(self.file, 'w') as outfile:\n json.dump(data, outfile)\n\n def read(self):\n with open(self.file) as json_file:\n return json.load(json_file)\n\n\njson_info = Config('api_url.json').read()\n\nheaders = {\n 'Host': 'api.sfacg.com',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'Accept': 'application/vnd.sfacg.api+json;version=1',\n 'User-Agent': json_info.get(\"User-Agent\"),\n 'Authorization': json_info.get(\"Authorization\")\n}\n\n\ndef get(url: str):\n api_url = json_info.get(\"Host\") + url.replace(json_info.get(\"Host\"), '')\n for retry in range(json_info.get(\"max_retry\")):\n try:\n return requests.get(api_url, headers=headers).json()\n except (OSError, TimeoutError, IOError) as error:\n print(\"Get Error Retry: \" + retry)\n\n\ndef post(url: str):\n api_url = json_info.get(\"Host\") + url.replace(json_info.get(\"Host\"), '')\n for retry in range(json_info.get(\"max_retry\")):\n try:\n return requests.post(api_url, headers=headers).json()\n except (OSError, TimeoutError, IOError) as error:\n print(\"Get Error Retry: \" + retry)\n", "id": "11393621", "language": "Python", "matching_score": 2.0761938095092773, "max_stars_count": 0, "path": "search/headers.py" }, { "content": "import requests\r\nfrom instance import *\r\nimport functools\r\nfrom fake_useragent import UserAgent\r\n\r\nsession = requests.session()\r\n\r\n\r\ndef MaxRetry(func, max_retry=5):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n for retry in range(max_retry):\r\n response = func(*args, **kwargs)\r\n if not isinstance(response, bool):\r\n return response\r\n else:\r\n print(\"尝试第:{}次\".format(retry + 1))\r\n\r\n return wrapper\r\n\r\n\r\ndef headers():\r\n return {\r\n \"Keep-Alive\": \"300\",\r\n \"Connection\": \"Keep-Alive\",\r\n \"Cache-Control\": \"no-cache\",\r\n \"Accept-Encoding\": \"gzip\",\r\n 'User-Agent': UserAgent(verify_ssl=False).random,\r\n }\r\n\r\n\r\ndef get(api_url: str, params=None, max_retry: int = 5, **kwargs):\r\n for retry in range(max_retry):\r\n try:\r\n response = requests.get(api_url, headers=headers(), params=params, **kwargs)\r\n if response.status_code == 200:\r\n return response\r\n except requests.exceptions.RequestException as error:\r\n if retry >= 2:\r\n print(\"\\nGet url:{} Error:{}\".format(api_url, error))\r\n\r\n\r\n@MaxRetry\r\ndef post(api_url: str, data=None, **kwargs):\r\n try:\r\n response = requests.post(api_url, headers=headers(), params=data, **kwargs)\r\n if response.status_code == 200:\r\n return response\r\n else:\r\n return False\r\n except requests.exceptions.RequestException as error:\r\n print(\"\\nGet url:{} Error:{}\".format(api_url, error))\r\n return False\r\n\r\n\r\n@MaxRetry\r\ndef put(api_url: str, data=None, **kwargs):\r\n try:\r\n response = requests.put(api_url, headers=headers(), params=data, **kwargs)\r\n if response.status_code == 200:\r\n return response\r\n else:\r\n return False\r\n except requests.exceptions.RequestException as error:\r\n print(\"\\nGet url:{} Error:{}\".format(api_url, error))\r\n return False\r\n", "id": "4562126", "language": "Python", "matching_score": 3.3319122791290283, "max_stars_count": 1, "path": "API/HttpUtil.py" }, { "content": "from typing import Union\nimport requests\nfrom instance import *\nimport functools\n\n\ndef max_retry(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n for retry in range(5):\n response = func(*args, **kwargs)\n if not isinstance(response, bool):\n return response\n else:\n time.sleep(retry * 0.5)\n\n return wrapper\n\n\ndef get_api(\n api_url: str,\n params: Union[dict, str] = None,\n headers: dict = \"app\",\n return_type: str = \"json\",\n **kwargs) -> Union[dict, bool, bytes, str]:\n try:\n if return_type == \"json\":\n return requests.get(api_url, headers=headers, params=params, **kwargs).json()\n if return_type == \"content\":\n return requests.get(api_url, headers=headers, params=params, **kwargs).content\n if return_type == \"text\":\n return requests.get(api_url, headers=headers, params=params, **kwargs).text\n except requests.exceptions.RequestException as error:\n print(\"HttpUtil.get error:\", error)\n\n\ndef post_api(\n api_url: str,\n data: Union[dict, str] = None,\n headers: dict = \"app\",\n return_type: str = \"json\",\n **kwargs) -> Union[dict, bool, bytes, str, None]:\n try:\n if return_type == \"json\":\n return requests.post(api_url, headers=headers, data=data, **kwargs).json()\n if return_type == \"content\":\n return requests.post(api_url, headers=headers, data=data, **kwargs).content\n if return_type == \"text\":\n return requests.post(api_url, headers=headers, data=data, **kwargs).text\n except requests.exceptions.RequestException as error:\n print(\"HttpUtil.get error:\", error)\n\n\ndef put_api(\n api_url: str,\n data: Union[dict, str] = None,\n headers: dict = \"app\",\n return_type: str = \"json\",\n **kwargs) -> Union[dict, bool, bytes, str, None]:\n try:\n if return_type == \"json\":\n return requests.put(api_url, headers=headers, data=data, **kwargs).json()\n if return_type == \"content\":\n return requests.put(api_url, headers=headers, data=data, **kwargs).content\n if return_type == \"text\":\n return requests.put(api_url, headers=headers, data=data, **kwargs).text\n except requests.exceptions.RequestException as error:\n print(\"HttpUtil.get error:\", error)\n", "id": "6169117", "language": "Python", "matching_score": 1.5500719547271729, "max_stars_count": 0, "path": "PixivAPI/HttpUtil.py" }, { "content": "import requests\r\nfrom fake_useragent import UserAgent\r\nfrom rich import print\r\n\r\nfrom instance import *\r\n\r\n\r\ndef headers():\r\n return {\r\n 'referer': 'https://www.bilibili.com',\r\n 'Accept': 'application/json',\r\n 'User-Agent': \"Mozilla/5.0 BiliDroid/6.37.1 (<EMAIL>)\",\r\n }\r\n\r\n\r\ndef progress(data: str, title: str, size: int, content_size: int):\r\n bar = '%s%.2f%%' % (\"■\" * int(size * 50 / content_size), float(size / content_size * 100))\r\n print('[下载进度]:', bar, end='\\r')\r\n with open(f\"{title}.flv\", 'ab+') as file: # 显示进度条\r\n file.write(data)\r\n\r\n\r\ndef download(url: str, title: str, max_retry=10, params=None):\r\n response = requests.get(url, headers=headers(), params=params, stream=True) # stream=True必须写上\r\n size = 0 # 初始化已下载大小\r\n content_size = int(response.headers['content-length']) # 下载文件总大小\r\n print('Start download,[File size]:{size:.2f} MB'.format(size=content_size / 1024 / 1024))\r\n if response.status_code != 200: # 判断是否响应成功\r\n print(response.status_code)\r\n return response.status_code\r\n for index, data in enumerate(response.iter_content(chunk_size=1024)):\r\n size += len(data)\r\n progress(data, title, size, content_size)\r\n\r\n\r\ndef get(url: str, params=None, max_retry=10, *args, **kwargs):\r\n for retry in range(max_retry):\r\n result = requests.get(url=url, headers=headers(), params=params, *args, **kwargs)\r\n if result.status_code == 200:\r\n return result\r\n print(\"{}请求失败,第{}次重新请求:\".format(url, retry))\r\n\r\n\r\ndef post(url, data=None, *args, **kwargs):\r\n for retry in range(int(Vars.cfg.data(\"headers\", \"retry\"))):\r\n result = requests.post(url=url, headers=headers(), data=data, *args, **kwargs)\r\n if result.status_code == 200:\r\n return result\r\n print(\"{}请求失败,第{}次重新请求:\".format(url, retry))\r\n\r\n\r\ndef put(url, data=None, *args, **kwargs):\r\n for retry in range(int(Vars.cfg.data(\"headers\", \"retry\"))):\r\n result = requests.put(url=url, headers=headers(), data=data, *args, **kwargs)\r\n if result.status_code == 200:\r\n return result\r\n print(\"{}请求失败,第{}次重新请求:\".format(url, retry))\r\n", "id": "8483711", "language": "Python", "matching_score": 4.364121913909912, "max_stars_count": 0, "path": "BilibiliAPP/HttpUtil.py" }, { "content": "import requests\r\nimport BilibiliAPP\r\nfrom fake_useragent import UserAgent\r\nfrom rich import print\r\nimport time\r\n\r\n\r\n\r\ndef download(url, params=None):\r\n start = time.time() # 下载开始时间\r\n headers = {\r\n 'referer': 'https://www.bilibili.com',\r\n 'Accept': 'application/json',\r\n 'User-Agent': \"Mozilla/5.0 BiliDroid/6.37.1 (<EMAIL>)\",\r\n }\r\n response = requests.get(url, headers=headers, params=params, stream=True) # stream=True必须写上\r\n size = 0 # 初始化已下载大小\r\n print(response.headers)\r\n content_size = int(response.headers['content-length']) # 下载文件总大小\r\n if response.status_code == 200: # 判断是否响应成功\r\n print('Start download,[File size]:{size:.2f} MB'.format(size=content_size / 1024 / 1024)) # 开始下载,显示下载文件大小\r\n with open(\"ssp.flv\", 'wb') as file: # 显示进度条\r\n for data in response.iter_content(chunk_size=1024):\r\n file.write(data)\r\n size += len(data)\r\n print('[下载进度]:%s%.2f%%' %\r\n ('>' * int(size * 50 / content_size), float(size / content_size * 100)), end='\\r')\r\n end = time.time() # 下载结束时间\r\n print('Download completed!,times: %.2f秒' % (end - start)) # 输出下载用时时间\r\n\r\n\r\nBilibiliAPP.get_video_url()\r\nif __name__ == '__main__':\r\n Transformation = BilibiliAPP.Transformation()\r\n print(Transformation.AV(\"BV11D4y1c7nP\"))\r\n # print(enc(722602127))\r\n", "id": "9897424", "language": "Python", "matching_score": 2.11230206489563, "max_stars_count": 0, "path": "run.py" }, { "content": "import BilibiliAPP\r\nfrom instance import *\r\n\r\ndef shell_downlaod_video(inputs):\r\n start = time.time() # 下载开始时间\r\n if len(inputs) >= 2:\r\n video_url, title = BilibiliAPP.video_download_id(str(inputs[1]))\r\n BilibiliAPP.HttpUtil.download(video_url, title)\r\n print('Download completed!,times: %.2f秒' % (time.time() - start)) # 输出下载用时时间\r\n else:\r\n print(\"没有输入bilibiliId\")\r\n\r\n\r\ndef shell():\r\n if len(sys.argv) > 1 and type(sys.argv) is list:\r\n command_line = True\r\n inputs = sys.argv[1:]\r\n else:\r\n command_line = False\r\n inputs = re.split('\\\\s+', input_('>').strip())\r\n while True:\r\n if inputs[0] == 'q' or inputs[0] == 'quit':\r\n sys.exit(\"已退出程序\")\r\n elif inputs[0] == 'h' or inputs[0] == 'help':\r\n print(\"help\")\r\n elif inputs[0] == 'd' or inputs[0] == 'download':\r\n shell_downlaod_video(inputs)\r\n else:\r\n print(inputs[0], \"为无效指令\")\r\n if command_line is True:\r\n sys.exit(1)\r\n inputs = re.split('\\\\s+', input_('>').strip())\r\n\r\n\r\nif __name__ == '__main__':\r\n shell()\r\n", "id": "4498662", "language": "Python", "matching_score": 1.159988522529602, "max_stars_count": 0, "path": "main.py" }, { "content": "from BilibiliAPP import HttpUtil, UrlConstant\r\nfrom instance import *\r\n\r\n\r\ndef input_bili_id(bili_id: str) -> dict:\r\n bili_id = re.findall(r'video/(.*?)/?', bili_id)[0] if 'http' in bili_id else bili_id\r\n if re.findall(bili_id, \"av\") != -1 or bili_id.isdigit() is True:\r\n return UrlConstant.AID_INFO_API.format(re.sub(r\"av\", \"\", bili_id))\r\n if re.findall(bili_id, \"BV\") != -1:\r\n return UrlConstant.AID_INFO_API.format(Transformation().AV(bili_id))\r\n\r\n\r\ndef video_download_id(bilibili_id: str, max_retry=10):\r\n for index, retry in enumerate(range(max_retry)):\r\n response = HttpUtil.get(input_bili_id(bilibili_id)).json()\r\n if response.get(\"code\") == 0:\r\n bv_id = response.get(\"data\")['bvid']\r\n aid = response.get(\"data\")['aid']\r\n title = response.get(\"data\")['title']\r\n cid = response.get(\"data\")['cid']\r\n return get_video_url(bv_id, cid, \"112\", title)\r\n print(f\"retry:{index}\\t\", response.get(\"message\"))\r\n\r\n\r\ndef get_video_url(bid, cid, qn, title) -> str:\r\n for index, retry in enumerate(range(10)):\r\n params = {\r\n 'bvid': bid, 'qn': qn, 'cid': cid,\r\n 'fnval': '0', 'fnver': '0', 'fourk': '1',\r\n }\r\n response = HttpUtil.get(UrlConstant.VIDEO_API, params=params).json()\r\n if response.get(\"code\") == 0:\r\n video_url = [durl['url'] for durl in response.get(\"data\")['durl']]\r\n return video_url[0], re_book_name(title)\r\n print(f\"retry:{index}\\t\", response.get(\"message\"))\r\n\r\n\r\nclass Transformation:\r\n @staticmethod\r\n def AV(bv_id: str):\r\n key = '<KEY>'\r\n result = sum([{key[i]: i for i in range(58)}[bv_id[[11, 10, 3, 8, 4, 6][i]]] * 58 ** i for i in range(6)])\r\n return (result - 8728348608) ^ 177451812\r\n\r\n @staticmethod\r\n def BV(av_id: int):\r\n key = '<KEY>'\r\n x = (av_id ^ 177451812) + 8728348608\r\n r = list('BV1 4 1 7 ')\r\n for i in range(6):\r\n r[[11, 10, 3, 8, 4, 6][i]] = key[x // 58 ** i % 58]\r\n return ''.join(r)\r\n\r\n # print(AV('BV17x411w7KC'))\r\n # print(BV(722602127))\r\n", "id": "12588350", "language": "Python", "matching_score": 2.7910804748535156, "max_stars_count": 0, "path": "BilibiliAPP/__init__.py" }, { "content": "AID_INFO_API = \"https://api.bilibili.com/x/web-interface/view?aid={}\"\r\nVIDEO_API = \"https://api.bilibili.com/x/player/playurl\"", "id": "9712595", "language": "Python", "matching_score": 0.3549037575721741, "max_stars_count": 0, "path": "BilibiliAPP/UrlConstant.py" }, { "content": "import os\nimport json\nimport requests\nimport math\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:56.0) Gecko/20100101 Firefox/56.0',\n 'Host': 'api.bilibili.com',\n 'Cookie': ''\n }\n\ndef getAid(BVid):\n url = f\"http://api.bilibili.com/x/web-interface/view?bvid={BVid}\"\n response = requests.get(url, headers=headers).json()\n AVID = response['data']['aid']\n return AVID\n\n\ndef getReplyPageNum(oid):\n url = f\"https://api.bilibili.com/x/v2/reply?&jsonp=jsonp&pn=1\" + \\\n \"&type=1&oid={str(oid)}&sort=2\"\n respond = requests.get(url, headers=headers).json()\n replyNum = int(respond['data']['page']['acount'])\n replyPageCount = int(respond['data']['page']['count'])\n replyPageSize = int(respond['data']['page']['size'])\n replyPageNum = math.ceil(replyPageCount/replyPageSize)\n # 返回评论的页数replyPageNum\n return replyPageNum\n\n\ndef pinglun(number, avid):\n url = f'https://api.bilibili.com/x/v2/reply?&jsonp=jsonp&pn={number}&type=1&oid={avid}&sort=2'\n respond = requests.get(url, headers=headers).json()\n for data in respond['data']['replies']:\n data = data['member']\n mid = data['mid'] # uid\n uname = data['uname'] # 名字\n sex = data['sex'] # 性别\n sign = data['sign'] # 个人简介\n avatar = data['uname'] # 头像\n for k, y in data.items():\n print(k, '--->', y)\n\n\ndeta = \"\"\"{'rpid': 5433549997,\n\t'oid': 250579623,\n\t'type': 1,\n\t'mid': 357221321,\n\t'root': 0,\n\t'parent': 0,\n\t'dialog': 0,\n\t'count': 0,\n\t'rcount': 0,\n\t'state': 0,\n\t'fansgrade': 1,\n\t'attr': 0,\n\t'ctime': 1632139797,\n\t'rpid_str': '5433549997',\n\t'root_str': '0',\n\t'parent_str': '0',\n\t'like': 0,\n\t'action': 0,\n\t'member': {\n\t\t'mid': '357221321',\n\t\t'uname': 'Leon_X_',\n\t\t'sex': '男',\n\t\t'sign': 'GGGGGGGGGGGGGGGGGGGFUNK',\n\t\t'avatar': 'http://i0.hdslb.com/bfs/face/52d178af4428d382ccfc03f6bda59fc4b017b6a2.jpg',\n\t\t'rank': '10000',\n\t\t'DisplayRank': '0',\n\t\t'level_info': {\n\t\t\t'current_level': 4,\n\t\t\t'current_min': 0,\n\t\t\t'current_exp': 0,\n\t\t\t'next_exp': 0\n\t\t},\n\t\t'pendant': {\n\t\t\t'pid': 0,\n\t\t\t'name': '',\n\t\t\t'image': '',\n\t\t\t'expire': 0,\n\t\t\t'image_enhance': '',\n\t\t\t'image_enhance_frame': ''\n\t\t},\n\t\t'nameplate': {\n\t\t\t'nid': 0,\n\t\t\t'name': '',\n\t\t\t'image': '',\n\t\t\t'image_small': '',\n\t\t\t'level': '',\n\t\t\t'condition': ''\n\t\t},\n\t\t'official_verify': {\n\t\t\t'type': -1,\n\t\t\t'desc': ''\n\t\t},\n\t\t'vip': {\n\t\t\t'vipType': 0,\n\t\t\t'vipDueDate': 0,\n\t\t\t'dueRemark': '',\n\t\t\t'accessStatus': 0,\n\t\t\t'vipStatus': 0,\n\t\t\t'vipStatusWarn': '',\n\t\t\t'themeType': 0,\n\t\t\t'label': {\n\t\t\t\t'path': '',\n\t\t\t\t'text': '',\n\t\t\t\t'label_theme': '',\n\t\t\t\t'text_color': '',\n\t\t\t\t'bg_style': 0,\n\t\t\t\t'bg_color': '',\n\t\t\t\t'border_color': ''\n\t\t\t},\n\t\t\t'avatar_subscript': 0,\n\t\t\t'nickname_color': ''\n\t\t},\n\t\t'fans_detail': {\n\t\t\t'uid': 357221321,\n\t\t\t'medal_id': 229056,\n\t\t\t'medal_name': 'LOOSE',\n\t\t\t'score': 0,\n\t\t\t'level': 3,\n\t\t\t'intimacy': 0,\n\t\t\t'master_status': 1,\n\t\t\t'is_receive': 1,\n\t\t\t'medal_color': 643602062,\n\t\t\t'medal_color_end': 643602062,\n\t\t\t'medal_color_border': 4284257934,\n\t\t\t'medal_color_name': 4284257934,\n\t\t\t'medal_color_level': 4284257934,\n\t\t\t'guard_level': 0\n\t\t},\n\t\t'following': 0,\n\t\t'is_followed': 0,\n\t\t'user_sailing': {\n\t\t\t'pendant': None,\n\t\t\t'cardbg': None,\n\t\t\t'cardbg_with_focus': None\n\t\t},\n\t\t'is_contractor': False\n\t},\n\t'content': {\n\t\t'message': '乌鸦哥发福了?',\n\t\t'plat': 0,\n\t\t'device': '',\n\t\t'members': [],\n\t\t'jump_url': {},\n\t\t'max_line': 6\n\t},\n\t'replies': [],\n\t'assist': 0,\n\t'folder': {\n\t\t'has_folded': False,\n\t\t'is_folded': False,\n\t\t'rule': 'https://www.bilibili.com/blackboard/foldingreply.html'\n\t},\n\t'up_action': {\n\t\t'like': False,\n\t\t'reply': False\n\t},\n\t'show_follow': False,\n\t'invisible': False,\n\t'reply_control': {}\n}\"\"\"\n\n\n\n\nif __name__ == '__main__':\n BV_number = input('输入BV号,注意不用加上BV\\n')\n avid = getAid(BV_number)\n number = getReplyPageNum(avid)\n pinglun(str(number), str(avid))\n\n\n", "id": "6335249", "language": "Python", "matching_score": 2.339327335357666, "max_stars_count": 0, "path": "bilibiliTesting.py" }, { "content": "# coding:utf-8\r\nimport requests\r\nimport re\r\nimport datetime\r\n\r\npreorder_url_list = []\r\ndays_number = 0\r\n\r\nfor i in range(100):\r\n days_number += 7\r\n result = datetime.datetime(2022, 1, 1) + datetime.timedelta(days=-days_number)\r\n time_ = result.strftime(\"%Y-%m-%d\").replace('-', '')\r\n preorder_url_list.append(\r\n [result.strftime(\"%Y-%m-%d\"),\r\n f'https://pages.sfacg.com/ajax/act/PreOrder.ashx?op=getPreOrderNovels&date={time_}']\r\n )\r\n\r\nheaders = {\r\n 'User-Agent': 'User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, '\r\n 'like Gecko) Version/5.1 Safari/534.50 '\r\n}\r\n\r\n\r\ndef dealData(time_data, url):\r\n result_ = requests.get(url, headers=headers).json()\r\n if result_.get('status') == 200:\r\n result_data = result_.get('data')\r\n for data in result_data:\r\n novel_name = data.get('novelName')\r\n book_id = data.get('novelId')\r\n cover = data.get('cover')\r\n is_main_push = data.get('isMainPush')\r\n tags = ','.join(data.get('tags'))\r\n intro = re.sub('<br/>', '\\n', data.get('intro'))\r\n\r\n show_info = '书名:{}\\n序号:{}\\n封面:{}\\n是否上推:{}\\n标签:{}\\n简介:{}\\n'\r\n show_info = show_info.format(\r\n novel_name, book_id, cover, is_main_push, tags, intro\r\n )\r\n print(show_info)\r\n open(str(time_data) + '.txt', 'a', encoding='utf-8').write(show_info)\r\n\r\n\r\nfor url in preorder_url_list:\r\n dealData(url[0], url[1])\r\n", "id": "6022684", "language": "Python", "matching_score": 2.296086072921753, "max_stars_count": 1, "path": "main.py" }, { "content": "import click\r\nimport os\r\nimport re\r\nimport sys\r\nimport json\r\nimport headers\r\n\r\n\r\ndef tag_(tag):\r\n return ', '.join([tags['tagName'] for tags in tag])\r\n\r\n\r\ndef re_novel_id(book_id: str):\r\n book_id = book_id if 'http' not in book_id else \\\r\n re.findall(r'/([0-9]+)/?', book_id)[0]\r\n if book_id.isdigit():\r\n return book_id, 200\r\n else:\r\n return f'输入信息 {book_id} 不是数字!', 403\r\n\r\n\r\ndef search_book_id(novel_id: str):\r\n response = headers.get(headers.json_info[\"NovelInfo\"].format(novel_id))\r\n if response['status']['httpCode'] == 200:\r\n print('书籍名称:', response['data']['novelName'])\r\n print('书籍序号:', response['data']['novelId'])\r\n print('书籍作者:', response['data']['authorName'])\r\n print('书籍字数:', response['data']['charCount'])\r\n print('签约状态:', response['data']['signStatus'])\r\n print('书籍标签:', tag_(response['data']['expand']['sysTags']))\r\n print('最新章节:', response['data']['expand']['latestChapter']['title'],\r\n '\\t章节序号:', response['data']['expand']['latestChapter']['chapId'])\r\n print('更新时间:', response['data']['lastUpdateTime'])\r\n print('全订价格:', response['data']['expand']['originTotalNeedFireMoney'])\r\n\r\n else:\r\n print(response['status']['msg'])\r\n\r\n\r\ndef shell_book_name(inputs):\r\n response = headers.get(headers.json_info.get(\"Search\").format(inputs[1]))\r\n for key, Value in response[0].items():\r\n if key == 'weight':\r\n continue\r\n if key == 'expand':\r\n if Value.get('tags'):\r\n print(\"{0:<{2}}{1}\".format('tags', ','.join(Value.get('tags')), 50))\r\n print(\"{0:<{2}}{1}\".format('typeName', Value.get('typeName'), 50))\r\n print(\"{0:<{2}}{1}\".format('sysTags', tag_(Value.get('sysTags')), 50))\r\n print(\"{0:<{2}}{1}\".format('intro', Value.get('intro'), 50))\r\n continue\r\n print(\"{0:<{2}}{1}\".format(key, Value, 50))\r\n\r\n\r\ndef bookshelf():\r\n response = headers.get(headers.json_info.get(\"Pockets\"))\r\n # print(response)\r\n if response['status']['httpCode'] != 200:\r\n return \"[Ⅹ]cookie information is invalidated!\"\r\n for data in bookshelf['data']:\r\n for novels in data['expand']['novels']:\r\n authorName = novels['authorName']\r\n novelName = novels['novelName']\r\n novelId = novels['novelId']\r\n bookshelfs = \"\\n书名:{}\\n作者:{}\\n序号:{}\".format(novelName, authorName, novelId)\r\n print(bookshelfs)\r\n\r\n\r\ndef search_json(novel_id, code):\r\n http_mode = True if not os.path.exists(\"bookInfo.json\") else False\r\n if http_mode:\r\n search_book_id(novel_id)\r\n return\r\n if code != 200:\r\n print(novel_id)\r\n else:\r\n read_json = open('bookInfo.json', 'r', encoding='utf-8').read()\r\n book_info = json.loads(read_json)['BOOKID'][str(novel_id)]\r\n print('书籍名称:', book_info['novelName'])\r\n print('书籍序号:', book_info['novelId'])\r\n print('书籍作者:', book_info['authorName'])\r\n print('书籍字数:', book_info['charCount'])\r\n print('签约状态:', book_info['signStatus'])\r\n print('书籍标签:', tag_(book_info['expand']['sysTags']))\r\n print('最新章节:', book_info['expand']['latestChapter']['title'],\r\n '\\t章节序号:', book_info['expand']['latestChapter']['chapId'])\r\n print('更新时间:', book_info['lastUpdateTime'])\r\n print('全订价格:', book_info['expand']['originTotalNeedFireMoney'])\r\n\r\n\r\ndef main():\r\n inputs = sys.argv[1:]\r\n if inputs[0] == \"s\" or inputs[0] == \"search\":\r\n search_json(*re_novel_id(inputs[1]))\r\n # elif inputs[0].startswith('fx'):\r\n # direction.WebRecommendation().wind_show_info()\r\n elif inputs[0].startswith('n'):\r\n shell_book_name(inputs)\r\n elif inputs[0] == \"sf\" or inputs[0] == \"bookshelf\":\r\n bookshelf()\r\n # thumbs_up.Support().run_script() if mode else thumbs_up.Support()\r\n # elif inputs[0].startswith('up'):\r\n # show_book_up_data(inputs)\r\n else:\r\n print(inputs[0], \"不是有效命令\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "id": "9226318", "language": "Python", "matching_score": 1.6355607509613037, "max_stars_count": 0, "path": "search/search_book.py" }, { "content": "import requests\nimport json, os, time\nfrom datetime import datetime, timedelta, timezone\nutc_dt = datetime.utcnow().replace(tzinfo=timezone.utc)\nbj_dt = utc_dt.astimezone(timezone(timedelta(hours=8)))\ntimestamp = bj_dt.timestamp()\nreadingDate = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')\n\n\nheaders = {\n 'Host': 'api.sfacg.com',\n 'accept-charset': 'UTF-8',\n 'authorization': 'Basic <KEY>\n 'accept': 'application/vnd.sfacg.api+json;version=1',\n 'user-agent': 'boluobao/4.6.36(android;22)/OPPO',\n 'sfsecurity': 'nonce=EE94F4D4-CC0B-43B6-BFF2-6CB72CE8698B&timestamp=' + str(int(timestamp)) + '&devicetoken=<PASSWORD>&sign=7778A67648C9D95483E466D9D341FEA1',\n 'accept-encoding': 'gzip',}\n\n\n# 创建cookie.txt文本\nif not os.path.exists(\"cookie.txt\"):\n open(\"cookie.txt\", \"a\")\n\ndef read_cookie():\n with open('cookie.txt') as fb:\n cookie = fb.read() \n headers['Cookie'] = cookie\n return headers\n \n \ndef postrequests(api, headers, data):\n read_cookie()\n post_responed = requests.post(api, headers=headers, data=data).json()\n return post_responed\n \ndef getrequests(api):\n read_cookie()\n return requests.get(url=api, headers=headers).json()\n \ndef putrequests(api, put_headers, data):\n read_cookie()\n return requests.put(api, headers=put_headers, data=data).json()\n \n \n\ndef task():\n read_cookie()\n print(\"运行时间:\",readingDate)\n ReadTime = {\n \"seconds\":3605,\n \"readingDate\":readingDate,\n \"entityType\":2\n }\n ListenTime = {\n \"seconds\": 3605,\n \"readingDate\": readingDate,\n \"entityType\": 3\n }\n ReadData = json.dumps(ReadTime)\n ListenData = json.dumps(ListenTime)\n \n result = getrequests(\"https://api.sfacg.com/user/signInfo\")\n print(result['status']['msg'])\n if '需要登录才能访问该资源' == result['status']['msg']:\n return result['status']['msg']\n if '签到系统在每日凌晨0~1点之间进行维护,请您选在其他时间签到哦' == result['status']['msg']:\n return result['status']['msg']\n if result['status']['msg'] == '您今天已经签过到了,请明天再来':\n return result['status']['msg']\n put_headers = headers\n put_headers['accept-encoding'] = 'gzip'\n put_headers['content-length'] = '57'\n put_headers['content-type'] = 'application/json; charset=UTF-8'\n print(\"开始执行任务\")\n responed = putrequests('https://api.sfacg.com/user/readingtime', put_headers, data=ListenData)\n postrequests('https://api.sfacg.com/user/tasks/4', headers,data=ListenData)\n postrequests('https://api.sfacg.com/user/tasks/5', headers,data=ListenData)\n postrequests('https://api.sfacg.com/user/tasks/17', headers, data=ListenData)\n for i in range(3):\n r = putrequests('https://api.sfacg.com/user/readingtime', put_headers, ReadData)\n print(r)\n time.sleep(0.5)\n putrequests('https://api.sfacg.com/user/tasks/5', put_headers, data=ListenData)\n putrequests('https://api.sfacg.com/user/tasks/4', put_headers, data=ListenData)\n putrequests('https://api.sfacg.com/user/tasks/17', put_headers, data=ListenData)\n\n\ndef check_cookie(): # 验证cookie信息是否失效\n read_cookie()\n result = requests.get('https://api.sfacg.com/user?', headers=headers).json()\n money = requests.get('https://api.sfacg.com/user/money',headers=headers).json()\n # if '需要登录才能访问该资源' not in result['status']['msg']:\n try:\n nick_Name = result['data']['nickName']\n fireMoneyRemain = money['data']['fireMoneyRemain']\n user_vipLevel = money['data']['vipLevel']\n print(\"账号名称:\", nick_Name, \"\\t火卷余额:\", fireMoneyRemain, \"\\tVIP:\", user_vipLevel)\n print(\"Cookie 凭证有效!\")\n except:\n print('Cookie凭证失效 httpCode:',str(result['status']['httpCode']))\n session_APP = input(\"Please input you session_APP:\")\n SFCommunity = input(\"Please input you SFCommunity:\")\n cookie = \"session_APP={};.SFCommunity={}\".format(session_APP, SFCommunity)\n with open(\"cookie.txt\", 'w', encoding='utf-8') as file:\n file.write(cookie)\n print(\"退出程序\");quit()\n\n\n\n# def checkin():\n # print(\"运行时间:\",readingDate)\n # date_warn = \"签到日期: {}年{}月{}日\"\n # for data in getrequests('https://api.sfacg.com/user/signInfo', headers)['data']:\n # print(date_warn.format(data['year'], data['month'], data['day']))\n # put_response = requests.put('https://api.sfacg.com/user/signInfo', headers=headers).json()\n # if \"您今天已经签过到了,请明天再来\" in put_response['status']:\n # print(\"签到提醒: {}\".format(put_response['status']['msg']))\n # print(\"退出程序\");quit()\n # else:\n # print(\"检测到今天还未签到,已自动签到和完成任务\")\n # print(put_response['status']['msg'])\n # task()\n \ncheck_cookie()\n# checkin()\ntask()\n\n", "id": "10653581", "language": "Python", "matching_score": 1.7067275047302246, "max_stars_count": 6, "path": "CheckIn.py" }, { "content": "from requests_html import HTML, HTMLSession\nfrom aiohttp import ClientResponse\nfrom functools import partial\nfrom cchardet import detect\nimport json, aiohttp, asyncio, ctypes\nfrom rich import print\nfrom rich.progress import track\n\n__all__ = (\n 'map', 'Session',\n 'get', 'options', 'head', 'post', 'put', 'patch', 'delete', 'session', 'request'\n)\n\n\nclass Session():\n def __init__(self, *args, **kwargs):\n self.session = self\n self.headers = HTMLSession().headers\n self.cookies = {}\n self.request_pool = []\n\n def __getattr__(self, name):\n if name in ['get', 'options', 'head', 'post', 'put', 'patch', 'delete']:\n new_req = AsyncRequestTask(headers=self.headers, session=self.session)\n new_req.__getattr__(name)\n self.request_pool.append(new_req)\n return new_req.get_params\n\n def __repr__(self):\n return f\"<Ahttp Session [id:{id(self.session)} client]>\"\n\n\nclass AsyncRequestTask():\n def __init__(self, *args, session=None, headers=None, **kwargs):\n self.session = session\n self.headers = headers\n self.cookies = None\n self.kw = kwargs\n self.method = None\n\n def __getattr__(self, name):\n if name in ['get', 'options', 'head', 'post', 'put', 'patch', 'delete']:\n self.method = name\n return self.get_params\n\n def __repr__(self):\n return f\"<AsyncRequestTask session:[{id(self.session)}] req:[{self.method.upper()}:{self.url}]>\"\n\n def get_params(self, *args, **kw):\n self.url = args[0]\n self.args = args[1:]\n if \"callback\" in kw:\n self.callback = kw['callback']\n kw.pop(\"callback\")\n else:\n self.callback = None\n if \"headers\" in kw:\n self.headers = kw['headers']\n kw.pop(\"headers\")\n self.kw = kw\n return self\n\n def run(self):\n future = asyncio.ensure_future(single_req(self))\n loop = asyncio.get_event_loop()\n loop.run_until_complete(future)\n new_res = AhttpResponse(self.result, self.content, self)\n return [new_res, self.callback and self.callback(new_res)][0]\n\n\nclass AhttpResponse():\n def __init__(self, res, content, req, *args, **kwargs):\n self.content = content\n self.req = req\n self.raw = self.clientResponse = res\n\n @property\n def text(self):\n code_type = detect(self.content)\n return self.content.decode(code_type['encoding'])\n\n @property\n def url(self):\n return self.clientResponse.url\n\n @property\n def cookies(self):\n return self.clientResponse.cookies\n\n @property\n def headers(self):\n return self.clientResponse.headers\n\n def json(self):\n return json.loads(self.text)\n\n @property\n def status(self):\n return self.clientResponse.status\n\n @property\n def method(self):\n return self.clientResponse.method\n\n @property\n def html(self):\n return self.dom\n\n @property\n def dom(self):\n \"\"\"\n 返回一个requests_html对象,\n 支持所有requests_html的html对象的操作。例如find, xpath, render(先安装chromium浏览器)\n \"\"\"\n html = HTML(html=self.text)\n html.url = self.raw.url\n return html\n\n def __repr__(self):\n return f\"<AhttpResponse status[{self.status}] url=[{self.url}]>\"\n\n\ndef run(tasks, pool=2, max_try=3, callback=None, order=False):\n if not isinstance(tasks, list):\n raise \"the tasks of run must be a list object\"\n conn = aiohttp.TCPConnector(use_dns_cache=True, loop=asyncio.get_event_loop(), ssl=False)\n sem = asyncio.Semaphore(pool)\n result = []\n loop = asyncio.get_event_loop()\n loop.run_until_complete(multi_req(tasks, conn, sem, max_try, callback, result))\n if not order:\n return result\n rid = [*map(lambda x: id(x), tasks)]\n new_res = [*rid]\n for i in result:\n index = rid.index(id(i.req))\n rid[index] = 0\n new_res[index] = i\n return new_res\n\n\ndef wrap_headers(headers):\n new_headers = {}\n for k, v in headers.items():\n new_headers[k] = str(v)\n return new_headers\n\n\nasync def single_req(self):\n async with aiohttp.ClientSession(cookies=self.cookies) as session:\n async with session.request(self.method, self.url, *self.args, ssl=False,\n headers=wrap_headers(self.headers or self.session.headers), **self.kw) as resp:\n res = await resp.read()\n self.result, self.content = resp, res\n\n\nasync def multi_req(tasks, conn, sem, max_try, callback, result):\n sessions_list = {}\n new_tasks = []\n print(\"开始加载请求...\")\n for i in tasks:\n if id(i.session) not in sessions_list:\n sessions_list[id(i.session)] = aiohttp.ClientSession(connector_owner=False, connector=conn,\n cookies=i.session.cookies)\n new_tasks.append(asyncio.ensure_future(control_sem(sem, i, sessions_list[id(i.session)], result)))\n\n await asyncio.wait(new_tasks)\n await asyncio.wait([asyncio.ensure_future(v.close()) for k, v in sessions_list.items()])\n await conn.close() # 关闭tcp连接器\n\n\nasync def control_sem(sem, i, session, result):\n # 限制信号量\n async with sem:\n await fetch(i, session, result)\n\n\nasync def fetch(i, session, result):\n headers = wrap_headers(i.headers or ctypes.cast(i.session, ctypes.py_object).value.headers)\n async with session.request(i.method, i.url, *i.args, headers=headers, **i.kw) as resp:\n res = await resp.read()\n r = AhttpResponse(resp, res, i)\n result.append(r)\n if i.callback:\n i.callback(r)\n\n\ndef create_session(method, *args, **kw):\n sess = Session()\n return {\"get\": sess.get, \"post\": sess.post, \"options\": sess.options, \"head\": sess.head, \"put\": sess.put,\n \"patch\": sess.patch, \"delete\": sess.delete}[method](*args, **kw)\n\n\nget = partial(create_session, \"get\")\npost = partial(create_session, \"post\")\noptions = partial(create_session, \"options\")\nhead = partial(create_session, \"head\")\nput = partial(create_session, \"put\")\npatch = partial(create_session, \"patch\")\ndelete = partial(create_session, \"delete\")", "id": "12197164", "language": "Python", "matching_score": 0.9121111631393433, "max_stars_count": 1, "path": "API/ahttp.py" }, { "content": "from base64 import urlsafe_b64encode\r\nfrom hashlib import sha256\r\nfrom instance import *\r\nfrom secrets import token_urlsafe\r\nfrom urllib.parse import urlencode\r\nfrom webbrowser import open as open_url\r\nimport requests\r\n\r\n\r\ndef s256(data):\r\n \"\"\"S256 transformation method.\"\"\"\r\n return urlsafe_b64encode(sha256(data).digest()).rstrip(b\"=\").decode(\"ascii\")\r\n\r\n\r\ndef oauth_pkce(transform):\r\n \"\"\"Proof Key for Code Exchange by OAuth Public Clients (RFC7636).\"\"\"\r\n code_verifier = token_urlsafe(32)\r\n code_challenge = transform(code_verifier.encode(\"ascii\"))\r\n return code_verifier, code_challenge\r\n\r\n\r\ndef open_browser():\r\n code_verifier, code_challenge = oauth_pkce(s256)\r\n login_params = {\r\n \"code_challenge\": code_challenge,\r\n \"code_challenge_method\": \"S256\",\r\n \"client\": \"pixiv-android\",\r\n }\r\n open_url(f\"https://app-api.pixiv.net/web/v1/login?{urlencode(login_params)}\")\r\n return code_verifier\r\n\r\n\r\ndef login(code_verifier, code_information: str):\r\n response = requests.post(\r\n \"https://oauth.secure.pixiv.net/auth/token\",\r\n data={\r\n \"client_id\": \"MOBrBDS8blbauoSck0ZfDbtuzpyT\",\r\n \"client_secret\": \"<KEY>\",\r\n \"code\": code_information,\r\n \"code_verifier\": code_verifier,\r\n \"grant_type\": \"authorization_code\",\r\n \"include_policy\": \"true\",\r\n \"redirect_uri\": \"https://app-api.pixiv.net/web/v1/users/auth/pixiv/callback\",\r\n },\r\n headers={\"User-Agent\": \"PixivAndroidApp/5.0.234 (Android 11; Pixel 5)\"},\r\n ).json()\r\n\r\n if response.get(\"errors\") is not None:\r\n print(\"errors:\", response['errors']['system']['message'])\r\n else:\r\n save_token(response)\r\n return True\r\n\r\n\r\ndef refresh(refresh_token):\r\n response = requests.post(\r\n \"https://oauth.secure.pixiv.net/auth/token\",\r\n data={\r\n \"client_id\": \"MOBrBDS8blbauoSck0ZfDbtuzpyT\",\r\n \"client_secret\": \"<KEY>\",\r\n \"grant_type\": \"refresh_token\",\r\n \"include_policy\": \"true\",\r\n \"refresh_token\": refresh_token,\r\n },\r\n headers={\"User-Agent\": \"PixivAndroidApp/5.0.234 (Android 11; Pixel 5)\"},\r\n ).json()\r\n\r\n if response.get(\"errors\") is not None:\r\n print(\"errors:\", response['errors']['system']['message'])\r\n else:\r\n save_token(response)\r\n return True\r\n\r\n\r\ndef save_token(response):\r\n Vars.cfg.data[\"user_info\"] = {\r\n 'id': response[\"user\"][\"id\"],\r\n 'name': response[\"user\"][\"name\"],\r\n 'account': response[\"user\"][\"account\"],\r\n 'mail_address': response[\"user\"][\"mail_address\"],\r\n }\r\n Vars.cfg.data[\"access_token\"] = response[\"access_token\"]\r\n Vars.cfg.data[\"refresh_token\"] = response[\"refresh_token\"]\r\n Vars.cfg.save()\r\n", "id": "5216514", "language": "Python", "matching_score": 0.8602505922317505, "max_stars_count": 0, "path": "PixivAPI/login_pixiv.py" }, { "content": "IMAGE_INFORMATION = \"https://api.obfs.dev/api/pixiv/illust\"\n\n# PIXIV_APP_API\nFOLLOWING_INFORMATION = \"user/following\"\nBOOKMARK_INFORMATION = \"user/bookmarks/illust\"\nRECOMMENDED_INFORMATION = \"illust/recommended\"\nAUTHOR_INFORMATION = \"user/illusts\"\nACCOUNT_INFORMATION = \"user/detail\"\nRANKING_INFORMATION = \"illust/ranking\"\nSEARCH_INFORMATION = \"search/illust\"\n\nPIXIV_HOST = \"https://app-api.pixiv.net/v1/\"\n\n", "id": "2669162", "language": "Python", "matching_score": 0.7015122771263123, "max_stars_count": 1, "path": "PixivAPI/UrlConstant.py" }, { "content": "\r\n\r\nclass ranking:\r\n \r\n \r\n def ranking(self):\r\n ranking_list_bookid = []\r\n for i in range(10000):\r\n url = f'https://api.laomaoxs.com/novel/ranking?sex=2&page={i}&order=0'\r\n if not HttpUtil.get(url)['data']:\r\n print('分类已经下载完毕')\r\n break\r\n for data in HttpUtil.get(url)['data']:\r\n self.bookName = data['book_title']\r\n print(self.bookName)\r\n ranking_list_bookid.append(data['book_id'])\r\n return ranking_list_bookid", "id": "9575252", "language": "Python", "matching_score": 1.6108171939849854, "max_stars_count": 2, "path": "function/rank.py" }, { "content": "from instance import *\n\ndef SAVE_FILE(bookName, number, book_title)\n return os.path.join(Vars.cfg.data.get('save_dir'), bookName, f\"{}.{}.txt\"),\n\ndef OUT_FILE(bookName)\n return os.path.join(Vars.cfg.data.get('output_dir'), f'{bookName}.txt'), 'a', line)\n", "id": "12765366", "language": "Python", "matching_score": 1.3194580078125, "max_stars_count": 2, "path": "API/path.py" }, { "content": "from instance import *\n\n\ndef setup_config():\n Vars.cfg.load()\n config_change = False\n if type(Vars.cfg.data.get('help')) is not str or Vars.cfg.data.get('help') == \"\":\n Vars.cfg.data['help'] = \"输入 - 加上首字母\\nh | help\\t\\t\\t\\t\\t\\t--- 显示说明\\nq | quit\\t\\t\\t\\t\\t\\t--- 退出正在运作的程序\\nc | cookie\\t\\t\\t\\t\\t\\t--- 检测本地的cookie凭证\\nb | b + bookid\\t\\t\\t\\t\\t\\t--- 下载指定小说章节文本\\nu | u + url\\t\\t\\t\\t\\t\\t--- 下载指定小说章节文本\\nn | n + bookname\\t\\t\\t\\t\\t--- 输入书名下载小说文本\\nt | t + tagname\\t\\t\\t\\t\\t\\t--- 下载全站标签书籍信息\"\n config_change = True\n\n if type(Vars.cfg.data.get('key')) is not str or Vars.cfg.data.get('key') == \"\":\n Vars.cfg.data['key'] = \"<KEY>\"\n config_change = True\n\n # if type(Vars.cfg.data.get('iv')) is not str or Vars.cfg.data.get('iv') == \"\":\n # Vars.cfg.data['iv'] = b'8yeywyJ45esysW8M'\n # config_change = True\n\n if type(Vars.cfg.data.get('output_dir')) is not str or Vars.cfg.data.get('output_dir') == \"\":\n Vars.cfg.data['output_dir'] = \"Download\"\n config_change = True\n if type(Vars.cfg.data.get('save_dir')) is not str or Vars.cfg.data.get('save_dir') == \"\":\n Vars.cfg.data['save_dir'] = \"Config\"\n config_change = True\n\n if type(Vars.cfg.data.get('Open_ThreadPool')) is not bool:\n Vars.cfg.data['Open_ThreadPool'] = True\n config_change = True\n\n if type(Vars.cfg.data.get('tocken')) is not str or Vars.cfg.data.get('tocken') == \"\":\n Vars.cfg.data['tocken'] = \"\"\n config_change = True\n if type(Vars.cfg.data.get('shield')) is not str or Vars.cfg.data.get('shield') == \"\":\n Vars.cfg.data['shield'] = \"编辑正在手打中\"\n config_change = True\n if type(Vars.cfg.data.get('max_workers_number')) is not int:\n Vars.cfg.data['max_workers_number'] = 16\n config_change = True\n\n if config_change:\n Vars.cfg.save()\n", "id": "6389362", "language": "Python", "matching_score": 0.6627069115638733, "max_stars_count": 2, "path": "API/Settings.py" }, { "content": "from setuptools import setup\r\n\r\nsetup(\r\n name='seach_book',\r\n version='1.0',\r\n packages=['search'],\r\n url='',\r\n license='',\r\n author='Elaina',\r\n author_email='<EMAIL>',\r\n include_package_data=True,\r\n zip_safe=True,\r\n install_requires=[],\r\n entry_points={\r\n 'console_scripts': [\r\n 'search = search.search_book:main'\r\n ]\r\n },\r\n description=''\r\n)\r\n", "id": "10486045", "language": "Python", "matching_score": 0.005738681647926569, "max_stars_count": 0, "path": "setup.py" }, { "content": "import threading\nimport Image\nfrom instance import *\n\n\nclass Multithreading:\n def __init__(self):\n self.threading_list = []\n self.threading_page = 0\n self.images_info_obj_list = []\n self.pool_length = 0\n self.max_thread = Vars.cfg.data.get(\"max_thread\")\n self.semaphore = threading.Semaphore(self.max_thread)\n\n def add_image_info_obj(self, image_info_obj):\n self.images_info_obj_list.append(image_info_obj) # add image_info_obj to threading pool\n self.pool_length += 1 # pool length + 1 if add image_info_obj to threading pool\n\n def handling_threads(self):\n if len(self.images_info_obj_list) != 0:\n print(\"download length:{}\".format(self.pool_length))\n self.threading_list = [\n threading.Thread(target=self.download_images, args=(images_info,))\n for images_info in self.images_info_obj_list\n ]\n for thread_ing in self.threading_list: # start threading pool for download images\n thread_ing.start() # start threading pool for download images\n\n for thread_ing in self.threading_list: # wait for all threading pool finish download\n thread_ing.join() # wait for all threading pool finish download\n self.threading_list.clear()\n else:\n print(\"threading pool is empty, no need to start download threading pool.\")\n self.images_info_obj_list.clear() # clear threading pool and semaphore for next download\n\n def download_images(self, images_info):\n self.semaphore.acquire() # acquire semaphore to limit threading pool\n self.threading_page += 1 # threading page count + 1\n images_info.show_images_information(thread_status=True)\n if images_info.page_count == 1:\n images_info.save_image(images_info.original_url)\n else:\n images_info.save_image(images_info.original_url_list)\n # print(images_info.image_name, \"的作品下载完毕\")\n print(\"下载进度:{}/{}\".format(self.threading_page, len(self.images_info_obj_list)), end=\"\\r\")\n self.semaphore.release() # release semaphore when threading pool is empty\n\n def executing_multithreading(self, image_info_list: list):\n if isinstance(image_info_list, list) and len(image_info_list) != 0: # if image_info_list is not empty list\n for illusts in image_info_list: # add illusts to threading pool for download\n self.add_image_info_obj(Image.ImageInfo(illusts))\n self.handling_threads() # start download threading pool for download images\n else:\n return print(\"get works list error:\", image_info_list)\n", "id": "3622841", "language": "Python", "matching_score": 4.238495349884033, "max_stars_count": 1, "path": "complex_image.py" }, { "content": "import threading\nfrom instance import *\n\n\nclass Complex:\n def __init__(self):\n self.images_info_obj_list = []\n self.threading_list = []\n self.threading_page = 0\n self.max_thread = Vars.cfg.data.get(\"max_thread\")\n self.semaphore = threading.Semaphore(self.max_thread)\n\n def add_image_info_obj(self, image_info_obj):\n self.images_info_obj_list.append(image_info_obj)\n\n def start_download_threading(self):\n print(\"插画列表加载完毕...\")\n if len(self.images_info_obj_list) != 0:\n print(\"开始下载, 一共:\", len(self.images_info_obj_list), \"幅插画\\n\\n\")\n self.threading_list = [threading.Thread(target=self.thread_download_images, args=(images_info,))\n for images_info in self.images_info_obj_list]\n for thread_ing in self.threading_list:\n thread_ing.start()\n\n for thread_ing in self.threading_list:\n thread_ing.join()\n self.threading_list.clear()\n else:\n print(\"线程队列为空,没有可下载的插画!\")\n self.images_info_obj_list.clear()\n\n def thread_download_images(self, images_info):\n self.semaphore.acquire()\n self.threading_page += 1\n images_info.show_images_information(thread_status=True)\n if images_info.page_count == 1:\n images_info.save_image(images_info.original_url)\n else:\n images_info.save_image(images_info.original_url_list)\n # print(images_info.image_name, \"的作品下载完毕\")\n print(\"下载进度:{}/{}\".format(self.threading_page, len(self.images_info_obj_list)), end=\"\\r\")\n self.semaphore.release()\n", "id": "5274744", "language": "Python", "matching_score": 2.556016445159912, "max_stars_count": 0, "path": "complex_image.py" }, { "content": "import argparse\r\nimport json\r\nimport sys\r\nimport Image\r\nfrom instance import *\r\nfrom rich.progress import track\r\nimport PixivAPI\r\nimport complex_image\r\n\r\n\r\ndef update():\r\n download_test = False\r\n response = PixivAPI.get(\"https://raw.githubusercontent.com/Elaina-Alex/pixiv_crawler/main/update.json\")\r\n if not os.path.exists('update.json'):\r\n json.dump(response, open('update.json', 'w'))\r\n download_test = True\r\n data = json.loads(open('update.json', 'r').read())\r\n if data['version'] < response['version']:\r\n print(\"检测到有新版本\", response['version'], \"是否进行更新?[yes/no]\")\r\n choice = PixivAPI.input_str('>').strip()\r\n if choice == \"yes\":\r\n download_test = True\r\n print(\"开始更新\", response['version'], \"版本\")\r\n else:\r\n download_test = False\r\n\r\n if download_test:\r\n with open(data['name'] + \".exe\", 'wb') as file:\r\n print(response['download_url'].format(response['version']))\r\n file.write(PixivAPI.get(response['download_url'].format(response['version']), types=\"content\"))\r\n print(data['name'] + \".exe\", \"下载完毕\")\r\n json.dump(response, open('update.json', 'w'))\r\n print(\"三秒后自动退出脚本...\")\r\n sys.exit()\r\n\r\n\r\ndef shell_author_works(author_id: str, next_url: str = \"\"): # download author images save to local\r\n while True:\r\n if next_url is None: # if next_url is None, it means that it is download complete\r\n return print(\"the end of author_works list\")\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download author works list\r\n response_list, next_url = PixivAPI.PixivApp.author_information(author_id=author_id)\r\n else: # if next_url is not empty, it means it is the next time to download author works list\r\n response_list, next_url = PixivAPI.PixivApp.author_information(api_url=next_url)\r\n # if response_list is not list, it means that it is download complete\r\n multi_threading_image_pool: complex_image.Complex = complex_image.Complex() # new threading pool\r\n if isinstance(response_list, list) and len(response_list) != 0:\r\n for illusts in response_list: # add illusts to threading pool for download\r\n multi_threading_image_pool.add_image_info_obj(Image.ImageInfo(illusts))\r\n multi_threading_image_pool.start_download_threading() # start download threading pool for download\r\n else:\r\n return print(\"get author works list error:\", response_list)\r\n\r\n\r\n@count_time\r\ndef shell_illustration(inputs):\r\n if len(inputs) >= 2:\r\n Vars.images_info = PixivAPI.PixivApp.images_information(PixivAPI.rec_id(inputs[1]))\r\n if isinstance(Vars.images_info, dict):\r\n Vars.images_info = Image.ImageInfo(Vars.images_info)\r\n Vars.images_info.show_images_information()\r\n if Vars.images_info.page_count == 1:\r\n Vars.images_info.save_image(Vars.images_info.original_url)\r\n else:\r\n Vars.images_info.save_image(Vars.images_info.original_url_list)\r\n else:\r\n print(\"没有找到相应的作品!\")\r\n else:\r\n print(\"你没有输入id或者链接\")\r\n\r\n\r\n@count_time\r\ndef shell_search(inputs: list):\r\n if len(inputs) < 2:\r\n print(\"没有输入搜索信息\")\r\n return False\r\n response_list = PixivAPI.Tag.search_information(png_name=inputs[1])\r\n if isinstance(response_list, list) and len(response_list) != 0:\r\n threading_image_pool = complex_image.Complex()\r\n for image_info in response_list:\r\n threading_image_pool.add_image_info_obj(Image.ImageInfo(image_info))\r\n threading_image_pool.start_download_threading()\r\n else:\r\n print(\"没有找到相应的作品!\")\r\n\r\n\r\n@count_time\r\ndef shell_download_follow_author():\r\n follow_information_list = PixivAPI.PixivApp.follow_information()\r\n if isinstance(follow_information_list, list):\r\n print(\"共有\", len(follow_information_list), \"个关注\")\r\n for follow_information in follow_information_list:\r\n print(\"开始下载\", follow_information['user']['name'], \"的作品\")\r\n threading_image_pool = complex_image.Complex()\r\n for illusts in follow_information['illusts']:\r\n threading_image_pool.add_image_info_obj(Image.ImageInfo(illusts))\r\n threading_image_pool.start_download_threading()\r\n print(follow_information['user']['name'], \"的作品下载完毕\")\r\n\r\n\r\n@count_time\r\ndef shell_download_rank():\r\n response_list = PixivAPI.PixivApp.rank_information()\r\n if not isinstance(response_list, list):\r\n print(\"排行榜下载失败\")\r\n elif len(response_list) == 0:\r\n print(\"排行榜获取完毕!\")\r\n else:\r\n threading_image_pool = complex_image.Complex()\r\n for illusts in response_list:\r\n threading_image_pool.add_image_info_obj(Image.ImageInfo(illusts))\r\n threading_image_pool.start_download_threading()\r\n\r\n\r\n@count_time\r\ndef shell_read_text_id():\r\n default_file_name = \"pixiv_id_list.txt\"\r\n if not os.path.exists(default_file_name):\r\n open(default_file_name, 'w').close()\r\n image_id_list = []\r\n for line in open(default_file_name, 'r', encoding='utf-8', newline=\"\").readlines():\r\n if line.startswith(\"#\") or line.strip() == \"\":\r\n continue\r\n image_id = re.findall(r'^(\\d{1,8})', line)\r\n if image_id and len(image_id) >= 5:\r\n image_id_list.append(image_id[0])\r\n if isinstance(image_id_list, list) and len(image_id_list) != 0:\r\n threading_image_pool = complex_image.Complex()\r\n for image_id in track(image_id_list, description=\"本地插画集加载中...\"):\r\n Vars.images_info = PixivAPI.PixivApp.images_information(image_id)\r\n if isinstance(Vars.images_info, dict):\r\n threading_image_pool.add_image_info_obj(Image.ImageInfo(Vars.images_info))\r\n else:\r\n return print(\"无法进行下载,ERROR:\", Vars.images_info)\r\n threading_image_pool.start_download_threading()\r\n\r\n\r\ndef shell_test_pixiv_token():\r\n if Vars.cfg.data.get(\"refresh_token\") == \"\":\r\n print(\"检测到本地档案没有令牌,请登入网站获取code来请求token,也可以将token自行写入本地档案\")\r\n code_verifier = PixivAPI.login_pixiv.open_browser()\r\n if PixivAPI.login_pixiv.login(code_verifier, PixivAPI.input_str('code:').strip()):\r\n print(f\"code信息验证成功!,token信息已经保存在本地档案,请继续使用\")\r\n else:\r\n print(f\"输入code无效,请重新尝试获取code!\")\r\n shell_test_pixiv_token()\r\n if not PixivAPI.PixivApp.get_user_info(show_start=True):\r\n PixivAPI.refresh_pixiv_token()\r\n\r\n\r\ndef shell_download_recommend(next_url: str = \"\"): # download recommend images from pixiv api and save to local\r\n while True:\r\n if next_url is None: # if next_url is None, it means that it is download complete\r\n return print(\"the end of recommend list\")\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download recommend list\r\n response_list, next_url = PixivAPI.PixivApp.recommend_images()\r\n else: # if next_url is not empty, it means it is the next time to download recommend list\r\n response_list, next_url = PixivAPI.PixivApp.recommend_images(api_url=next_url)\r\n\r\n # if response_list is not list, it means that it is download complete\r\n multi_threading_image_pool: complex_image.Complex = complex_image.Complex() # new threading pool\r\n if isinstance(response_list, list) and len(response_list) != 0:\r\n for illusts in response_list: # add illusts to threading pool for download\r\n multi_threading_image_pool.add_image_info_obj(Image.ImageInfo(illusts))\r\n multi_threading_image_pool.start_download_threading() # start download threading pool for download\r\n else:\r\n return print(\"get recommend list error:\", response_list)\r\n\r\n\r\ndef shell_download_stars(next_url: str = \"\"): # get stars list and download all the images in the list\r\n while True:\r\n if next_url is None:\r\n return print(\"the end of stars list\") # if next_url is None, it means that it is download complete\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download stars list\r\n response_list, next_url = PixivAPI.PixivApp.start_images()\r\n else: # if next_url is not empty, it means it is the next time to download stars list\r\n response_list, next_url = PixivAPI.PixivApp.start_images(api_url=next_url)\r\n multi_threading_image_pool: complex_image.Complex = complex_image.Complex() # new threading pool for download\r\n if isinstance(response_list, list) and len(response_list) != 0:\r\n for illusts in response_list: # add illusts to threading pool for download\r\n multi_threading_image_pool.add_image_info_obj(Image.ImageInfo(illusts))\r\n multi_threading_image_pool.start_download_threading() # start download threading pool for download\r\n else:\r\n return print(\"get star list error:\", response_list)\r\n\r\n\r\ndef start_parser() -> argparse.Namespace: # start parser for command line arguments and start download process\r\n parser = argparse.ArgumentParser() # create parser object for command line arguments\r\n parser.add_argument(\r\n \"-l\", \"--login\",\r\n dest=\"login\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"登录账号\"\r\n ) # add login argument to parser object for command line arguments\r\n parser.add_argument(\r\n \"-d\",\r\n \"--download\",\r\n dest=\"downloadbook\",\r\n nargs=1,\r\n default=None,\r\n help=\"输入image-id\"\r\n ) # add download argument to parser object for command line arguments for download image\r\n parser.add_argument(\r\n \"-m\", \"--max\",\r\n dest=\"threading_max\",\r\n default=None,\r\n help=\"更改线程\"\r\n ) # add max argument to parser object for command line arguments for change threading max\r\n parser.add_argument(\r\n \"-n\", \"--name\",\r\n dest=\"name\",\r\n nargs=1,\r\n default=None,\r\n help=\"输入搜搜信息\"\r\n ) # add name argument to parser object for command line arguments for search\r\n parser.add_argument(\r\n \"-u\",\r\n \"--update\",\r\n dest=\"update\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"下载本地档案\"\r\n ) # add update argument to parser object for command line arguments for download local file\r\n parser.add_argument(\r\n \"-s\", \"--stars\",\r\n dest=\"stars\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"下载收藏插画\"\r\n ) # add stars argument to parser object for command line arguments for download stars\r\n parser.add_argument(\r\n \"-r\", \"--recommend\",\r\n dest=\"recommend\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"下载推荐插画\"\r\n ) # add recommend argument to parser object for command line arguments for download recommend\r\n parser.add_argument(\r\n \"-k\", \"--ranking\",\r\n dest=\"ranking\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"下载排行榜插画\"\r\n ) # add ranking argument to parser object for command line arguments for download ranking\r\n parser.add_argument(\r\n \"-c\",\r\n \"--clear_cache\",\r\n dest=\"clear_cache\",\r\n default=False,\r\n action=\"store_true\"\r\n ) # add clear_cache argument to parser object for command line arguments for clear cache\r\n parser.add_argument(\r\n \"-a\",\r\n \"--author\",\r\n dest=\"author\",\r\n nargs=1,\r\n default=None,\r\n help=\"输入作者-id\"\r\n ) # add author argument to parser object for command line arguments for download author\r\n return parser.parse_args() # return parser object for command line arguments and return it as a tuple\r\n\r\n\r\ndef shell_parser():\r\n args, shell_console = start_parser(), False\r\n if args.recommend:\r\n shell_download_recommend()\r\n shell_console = True\r\n\r\n if args.ranking:\r\n shell_download_rank()\r\n shell_console = True\r\n\r\n if args.stars:\r\n shell_download_stars()\r\n shell_console = True\r\n\r\n if args.update:\r\n shell_read_text_id()\r\n shell_console = True\r\n\r\n if args.clear_cache:\r\n Vars.cfg.data.clear(), set_config()\r\n Vars.cfg.save()\r\n sys.exit(3)\r\n\r\n if args.threading_max:\r\n Vars.cfg.data['max_thread'] = int(args.max)\r\n\r\n if args.name:\r\n shell_search(['n'] + args.name)\r\n shell_console = True\r\n\r\n if args.downloadbook:\r\n shell_illustration(['d'] + args.downloadbook)\r\n shell_console = True\r\n\r\n if args.author:\r\n shell_author_works(args.author[0])\r\n shell_console = True\r\n\r\n if args.login:\r\n shell_test_pixiv_token()\r\n shell_console = True\r\n\r\n if not shell_console:\r\n for info in Msg.msg_help:\r\n print('[帮助]', info)\r\n while True:\r\n shell(re.split('\\\\s+', PixivAPI.input_str('>').strip()))\r\n\r\n\r\ndef shell(inputs: list):\r\n if inputs[0] == 'q' or inputs[0] == 'quit':\r\n sys.exit(\"已退出程序\")\r\n elif inputs[0] == 'h' or inputs[0] == 'help':\r\n for msg_help in Msg.msg_help:\r\n print('[帮助]', msg_help)\r\n elif inputs[0] == 'l' or inputs[0] == 'login':\r\n shell_test_pixiv_token()\r\n elif inputs[0] == 'd' or inputs[0] == 'download':\r\n shell_illustration(inputs)\r\n elif inputs[0] == 's' or inputs[0] == 'stars':\r\n shell_download_stars()\r\n elif inputs[0] == 'n' or inputs[0] == 'name':\r\n shell_search(inputs)\r\n elif inputs[0] == 't' or inputs[0] == 'recommend':\r\n shell_download_recommend()\r\n elif inputs[0] == 'u' or inputs[0] == 'update':\r\n shell_read_text_id(inputs)\r\n elif inputs[0] == 'r' or inputs[0] == 'rank':\r\n shell_download_rank()\r\n elif inputs[0] == 'f' or inputs[0] == 'follow':\r\n shell_download_follow_author()\r\n else:\r\n print(inputs[0], \"为无效指令\")\r\n\r\n\r\nif __name__ == '__main__':\r\n set_config()\r\n # update()\r\n try:\r\n shell_test_pixiv_token()\r\n shell_parser()\r\n except KeyboardInterrupt:\r\n print(\"已手动退出程序\")\r\n sys.exit(1)\r\n except Exception as error:\r\n print(\"程序意外退出,ERROR:\", error)\r\n", "id": "4844662", "language": "Python", "matching_score": 7.366781711578369, "max_stars_count": 0, "path": "main.py" }, { "content": "import argparse\r\nimport json\r\nimport sys\r\nimport Image\r\nfrom instance import *\r\nfrom rich.progress import track\r\nimport PixivAPI\r\nimport complex_image\r\n\r\n\r\ndef update():\r\n download_test = False\r\n response = PixivAPI.get(\"https://raw.githubusercontent.com/Elaina-Alex/pixiv_crawler/main/update.json\")\r\n if not os.path.exists('update.json'):\r\n json.dump(response, open('update.json', 'w'))\r\n download_test = True\r\n data = json.loads(open('update.json', 'r').read())\r\n if data['version'] < response['version']:\r\n print(\"检测到有新版本\", response['version'], \"是否进行更新?[yes/no]\")\r\n choice = PixivAPI.input_str('>').strip()\r\n if choice == \"yes\":\r\n download_test = True\r\n print(\"开始更新\", response['version'], \"版本\")\r\n else:\r\n download_test = False\r\n\r\n if download_test:\r\n with open(data['name'] + \".exe\", 'wb') as file:\r\n print(response['download_url'].format(response['version']))\r\n file.write(PixivAPI.get(response['download_url'].format(response['version']), types=\"content\"))\r\n print(data['name'] + \".exe\", \"下载完毕\")\r\n json.dump(response, open('update.json', 'w'))\r\n print(\"三秒后自动退出脚本...\")\r\n sys.exit()\r\n\r\n\r\ndef shell_author_works(author_id: str, next_url: str = \"\"): # download author images save to local\r\n while True:\r\n if next_url is None: # if next_url is None, it means that it is download complete\r\n return print(\"the end of author_works list\")\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download author works list\r\n image_info_list, next_url = PixivAPI.PixivApp.author_information(author_id=author_id)\r\n else: # if next_url is not empty, it means it is the next time to download author works list\r\n image_info_list, next_url = PixivAPI.PixivApp.author_information(api_url=next_url)\r\n # # start download threading pool for download images from author works list\r\n complex_image.Multithreading().executing_multithreading(image_info_list)\r\n\r\n\r\n@count_time\r\ndef shell_illustration(inputs):\r\n if len(inputs) >= 2:\r\n Vars.images_info = PixivAPI.PixivApp.images_information(PixivAPI.rec_id(inputs[1]))\r\n if isinstance(Vars.images_info, dict):\r\n Vars.images_info = Image.ImageInfo(Vars.images_info)\r\n Vars.images_info.show_images_information()\r\n if Vars.images_info.page_count == 1:\r\n Vars.images_info.save_image(Vars.images_info.original_url)\r\n else:\r\n Vars.images_info.save_image(Vars.images_info.original_url_list)\r\n else:\r\n print(\"没有找到相应的作品!\")\r\n else:\r\n print(\"你没有输入id或者链接\")\r\n\r\n\r\n@count_time\r\ndef shell_search(inputs: list):\r\n if len(inputs) < 2: # if there is no search keyword input\r\n return print(\"没有输入搜索信息\") # print error message\r\n # start download threading pool for download images from search list and save to local\r\n complex_image.Multithreading().executing_multithreading(PixivAPI.Tag.search_information(png_name=inputs[1]))\r\n\r\n\r\n@count_time\r\ndef shell_download_follow_author(next_url: str = \"\"):\r\n while True:\r\n if next_url is None: # if next_url is None, it means that it is download complete\r\n return print(\"the end of follow list\")\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download author works list\r\n follow_list, next_url = PixivAPI.PixivApp.follow_information()\r\n else: # if next_url is not empty, it means it is the next time to download author works list\r\n follow_list, next_url = PixivAPI.PixivApp.follow_information(api_url=next_url) # get next follow list\r\n for follow_info in follow_list: # start download threading pool for download images from author works list\r\n print(\"start download author {} works\".format(follow_info['user_name'])) # print author name\r\n shell_author_works(follow_info.get(\"user\").get(\"id\")) # download author works list and save to local\r\n\r\n\r\n@count_time\r\ndef shell_download_rank(next_url: str = \"\"):\r\n while True:\r\n if next_url is None: # if next_url is None, it means that it is download complete\r\n return print(\"the end of follow list\")\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download author works list\r\n image_info_list, next_url = PixivAPI.PixivApp.get_ranking_info()\r\n else: # if next_url is not empty, it means it is the next time to download author works list\r\n image_info_list, next_url = PixivAPI.PixivApp.get_ranking_info(api_url=next_url) # get next follow list\r\n # start download threading pool for download images from author works list\r\n complex_image.Multithreading().executing_multithreading(image_info_list)\r\n\r\n\r\n@count_time\r\ndef shell_read_text_id():\r\n default_file_name = \"pixiv_id_list.txt\"\r\n if not os.path.exists(default_file_name):\r\n open(default_file_name, 'w').close()\r\n image_id_list = []\r\n for line in open(default_file_name, 'r', encoding='utf-8', newline=\"\").readlines():\r\n if line.startswith(\"#\") or line.strip() == \"\":\r\n continue\r\n image_id = re.findall(r'^(\\d{1,8})', line)\r\n if image_id and len(image_id) >= 5:\r\n image_id_list.append(image_id[0])\r\n if isinstance(image_id_list, list) and len(image_id_list) != 0:\r\n threading_image_pool = complex_image.Multithreading()\r\n for image_id in track(image_id_list, description=\"本地插画集加载中...\"):\r\n Vars.images_info = PixivAPI.PixivApp.images_information(image_id)\r\n if isinstance(Vars.images_info, dict):\r\n threading_image_pool.add_image_info_obj(Image.ImageInfo(Vars.images_info))\r\n else:\r\n return print(\"无法进行下载,ERROR:\", Vars.images_info)\r\n threading_image_pool.handling_threads()\r\n\r\n\r\ndef shell_test_pixiv_token():\r\n if Vars.cfg.data.get(\"refresh_token\") == \"\":\r\n print(\"检测到本地档案没有令牌,请登入网站获取code来请求token,也可以将token自行写入本地档案\")\r\n code_verifier, browser = PixivAPI.PixivLogin.open_browser()\r\n if PixivAPI.PixivLogin.login(code_verifier, PixivAPI.input_str('code:').strip()):\r\n print(f\"code信息验证成功!,token信息已经保存在本地档案,请继续使用\")\r\n else:\r\n print(f\"输入code无效,请重新尝试获取code!\")\r\n shell_test_pixiv_token()\r\n if not PixivAPI.PixivApp.get_user_info(show_start=True):\r\n PixivAPI.refresh_pixiv_token()\r\n\r\n\r\ndef shell_download_recommend(next_url: str = \"\"): # download recommend images from pixiv api and save to local\r\n while True:\r\n if next_url is None: # if next_url is None, it means that it is download complete\r\n return print(\"the end of recommend list\")\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download recommend list\r\n image_info_list, next_url = PixivAPI.PixivApp.recommend_images()\r\n else: # if next_url is not empty, it means it is the next time to download recommend list\r\n image_info_list, next_url = PixivAPI.PixivApp.recommend_images(api_url=next_url)\r\n # start download threading pool for download images from recommend list and save to local\r\n complex_image.Multithreading().executing_multithreading(image_info_list)\r\n\r\n\r\ndef shell_download_stars(next_url: str = \"\"): # get stars list and download all the images in the list\r\n while True:\r\n if next_url is None:\r\n return print(\"the end of stars list\") # if next_url is None, it means that it is download complete\r\n if next_url == \"\": # if next_url is empty, it means it is the first time to download stars list\r\n image_info_list, next_url = PixivAPI.PixivApp.start_images()\r\n else: # if next_url is not empty, it means it is the next time to download stars list\r\n image_info_list, next_url = PixivAPI.PixivApp.start_images(api_url=next_url)\r\n # start download threading pool for download images from stars list and save to local\r\n complex_image.Multithreading().executing_multithreading(image_info_list)\r\n\r\n\r\ndef start_parser() -> argparse.Namespace: # start parser for command line arguments and start download process\r\n parser = argparse.ArgumentParser() # create parser object for command line arguments\r\n parser.add_argument(\r\n \"-l\",\r\n \"--login\",\r\n dest=\"login\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"登录账号\"\r\n ) # add login argument to parser object for command line arguments\r\n parser.add_argument(\r\n \"-d\",\r\n \"--download\",\r\n dest=\"downloadbook\",\r\n nargs=1,\r\n default=None,\r\n help=\"输入image-id\"\r\n ) # add download argument to parser object for command line arguments for download image\r\n parser.add_argument(\r\n \"-m\", \"--max\",\r\n dest=\"threading_max\",\r\n default=None,\r\n help=\"更改线程\"\r\n ) # add max argument to parser object for command line arguments for change threading max\r\n parser.add_argument(\r\n \"-n\", \"--name\",\r\n dest=\"name\",\r\n nargs=1,\r\n default=None,\r\n help=\"输入搜搜信息\"\r\n ) # add name argument to parser object for command line arguments for search\r\n parser.add_argument(\r\n \"-u\",\r\n \"--update\",\r\n dest=\"update\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"下载本地档案\"\r\n ) # add update argument to parser object for command line arguments for download local file\r\n parser.add_argument(\r\n \"-s\", \"--stars\",\r\n dest=\"stars\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"download stars list and download all the images in the list\"\r\n ) # add stars argument to parser object for command line arguments for download stars\r\n parser.add_argument(\r\n \"-r\", \"--recommend\",\r\n dest=\"recommend\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"download pixiv recommend images\"\r\n ) # add recommend argument to parser object for command line arguments for download recommend\r\n parser.add_argument(\r\n \"-k\", \"--ranking\",\r\n dest=\"ranking\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"download ranking images\"\r\n ) # add ranking argument to parser object for command line arguments for download ranking\r\n parser.add_argument(\r\n \"-f\",\r\n \"--follow\",\r\n dest=\"follow\",\r\n default=False,\r\n action=\"store_true\",\r\n help=\"download follow author images\"\r\n )\r\n parser.add_argument(\r\n \"-c\",\r\n \"--clear_cache\",\r\n dest=\"clear_cache\",\r\n default=False,\r\n action=\"store_true\"\r\n ) # add clear_cache argument to parser object for command line arguments for clear cache\r\n parser.add_argument(\r\n \"-a\",\r\n \"--author\",\r\n dest=\"author\",\r\n nargs=1,\r\n default=None,\r\n help=\"enter author id\"\r\n ) # add author argument to parser object for command line arguments for download author\r\n return parser.parse_args() # return parser object for command line arguments and return it as a tuple\r\n\r\n\r\ndef shell_parser():\r\n args, shell_console = start_parser(), False\r\n if args.recommend:\r\n shell_download_recommend()\r\n shell_console = True\r\n\r\n if args.ranking:\r\n shell_download_rank()\r\n shell_console = True\r\n\r\n if args.stars:\r\n shell_download_stars()\r\n shell_console = True\r\n\r\n if args.follow:\r\n shell_download_follow_author()\r\n shell_console = True\r\n\r\n if args.update:\r\n shell_read_text_id()\r\n shell_console = True\r\n\r\n if args.clear_cache:\r\n Vars.cfg.data.clear(), set_config()\r\n Vars.cfg.save()\r\n sys.exit(3) # exit with code 3 to clear cache\r\n\r\n if args.threading_max:\r\n Vars.cfg.data['max_thread'] = int(args.max)\r\n\r\n if args.name:\r\n shell_search(['n'] + args.name)\r\n shell_console = True\r\n\r\n if args.downloadbook:\r\n shell_illustration(['d'] + args.downloadbook)\r\n shell_console = True\r\n\r\n if args.author:\r\n shell_author_works(args.author[0])\r\n shell_console = True\r\n\r\n if args.login:\r\n shell_test_pixiv_token()\r\n shell_console = True\r\n\r\n if not shell_console:\r\n for info in Msg.msg_help:\r\n print_lang('[帮助]', info)\r\n while True:\r\n shell(re.split('\\\\s+', PixivAPI.input_str('>').strip()))\r\n\r\n\r\ndef shell(inputs: list):\r\n if inputs[0] == 'q' or inputs[0] == 'quit':\r\n sys.exit(\"已退出程序\")\r\n elif inputs[0] == 'l' or inputs[0] == 'login':\r\n shell_test_pixiv_token()\r\n elif inputs[0] == 'd' or inputs[0] == 'download':\r\n shell_illustration(inputs)\r\n elif inputs[0] == 's' or inputs[0] == 'stars':\r\n shell_download_stars()\r\n elif inputs[0] == 'n' or inputs[0] == 'name':\r\n shell_search(inputs)\r\n elif inputs[0] == 'r' or inputs[0] == 'recommend':\r\n shell_download_recommend()\r\n elif inputs[0] == 'u' or inputs[0] == 'update':\r\n shell_read_text_id(inputs)\r\n elif inputs[0] == 'k' or inputs[0] == 'rank':\r\n shell_download_rank()\r\n elif inputs[0] == 'f' or inputs[0] == 'follow':\r\n shell_download_follow_author()\r\n else:\r\n print(inputs[0], \"为无效指令\")\r\n\r\n\r\ndef print_lang(*args) -> None: # print message in language set in config file\r\n from zhconv import convert # import zhconv module for chinese conversion\r\n msg = \"\" # create empty string for message to be printed\r\n if len(args) >= 1: # if there is message to be printed\r\n for arg in args: # for each message in args\r\n msg += str(arg) # add message to string for printing\r\n else: # if there is no message to be printed\r\n msg += args[0] if len(args) == 1 else msg # if there is only one message to be printed, print it directly\r\n if Vars.cfg.data.get(\"lang\") is None: # if language is not set in config file\r\n print(convert(str(msg), 'zh-hant')) # print message in chinese\r\n else: # if language is set in config file\r\n print(msg)\r\n\r\n\r\nif __name__ == '__main__':\r\n # update()\r\n try:\r\n set_config()\r\n shell_test_pixiv_token()\r\n shell_parser()\r\n except KeyboardInterrupt:\r\n quit(\"已手动退出程序\")\r\n except Exception as error:\r\n print(\"程序意外退出,ERROR:\", error)\r\n", "id": "83630", "language": "Python", "matching_score": 3.727379322052002, "max_stars_count": 1, "path": "main.py" }, { "content": "from fake_useragent import UserAgent\r\nfrom instance import *\r\nfrom PixivAPI import HttpUtil, UrlConstant\r\n\r\ncommon_params = {\"filter\": \"for_android\"}\r\n\r\n\r\ndef return_headers(headers: str = \"app\"):\r\n if headers == \"app\":\r\n return {\r\n 'Host': 'app-api.pixiv.net ',\r\n 'user-agent': 'PixivAndroidApp/6.46.0',\r\n 'authorization': \"Bearer \" + Vars.cfg.data.get(\"access_token\"),\r\n 'app-version': '6.46.0 ',\r\n }\r\n if headers == \"login\":\r\n return {\"User-Agent\": \"PixivAndroidApp/5.0.234 (Android 11; Pixel 5)\"}\r\n if headers == \"png\":\r\n return {'Referer': 'https://www.pixiv.net/', 'User-Agent': UserAgent(verify_ssl=False).random}\r\n else:\r\n return {'User-Agent': UserAgent(verify_ssl=False).random}\r\n\r\n\r\ndef get(\r\n api_url: str,\r\n params: [dict, str] = None,\r\n head: str = \"app\",\r\n types: str = \"json\",\r\n params_clear: bool = False,\r\n request_mode: str = \"GET\"\r\n) -> [dict, bytes, str, None]: # return json or bytes or str or None (if error)\r\n if params_clear:\r\n params = params.clear()\r\n if head == \"app\":\r\n if params is not None:\r\n params.update(common_params)\r\n api_url = UrlConstant.PIXIV_HOST + api_url.replace(UrlConstant.PIXIV_HOST, '')\r\n try:\r\n if request_mode == \"GET\":\r\n return HttpUtil.get_api(api_url, params=params, return_type=types, headers=return_headers(head))\r\n elif request_mode == \"POST\":\r\n return HttpUtil.post_api(api_url, data=params, return_type=types, headers=return_headers(head))\r\n elif request_mode == \"PUT\":\r\n return HttpUtil.put_api(api_url, params=params, return_type=types, headers=return_headers(head))\r\n except Exception as error:\r\n print(\"post error:\", error)\r\n\r\n\r\ndef refresh_pixiv_token(error_info: str = \"\") -> None:\r\n if error_info != \"\" and error_info is not None:\r\n print(\"[error]:\", error_info)\r\n if PixivLogin.refresh(Vars.cfg.data.get(\"refresh_token\")):\r\n print(\"refresh token success, new token:\", Vars.cfg.data.get(\"access_token\"))\r\n else:\r\n print(\"refresh token failed, please login again\")\r\n\r\n\r\nclass PixivApp:\r\n\r\n @staticmethod\r\n def get_user_info(show_start: bool = False) -> bool:\r\n params = {\"user_id\": Vars.cfg.data['user_info']['id']}\r\n response = get(api_url=UrlConstant.ACCOUNT_INFORMATION, params=params).get('user')\r\n if response is not None:\r\n if show_start is True:\r\n print(f\"用户名:{response.get('name')}\\t\\t用户id:{response.get('id')}\")\r\n return True\r\n\r\n @staticmethod\r\n def images_information(works_id: str) -> dict:\r\n response = get(UrlConstant.IMAGE_INFORMATION, params={'id': works_id}, head=\"web\")\r\n if isinstance(response, dict) and response.get('illust') is not None:\r\n return response[\"illust\"]\r\n else:\r\n print(response)\r\n\r\n @staticmethod\r\n def start_images(\r\n api_url: str = UrlConstant.BOOKMARK_INFORMATION,\r\n user_id: [int, str] = None,\r\n restrict: str = \"public\",\r\n params_clear: bool = False,\r\n max_retry: int = 3\r\n ) -> [list, str, None]: # get account start information and return a list of p_id\r\n if user_id is None: # if user_id is None, get the user_id from config file\r\n user_id = Vars.cfg.data['user_info']['id']\r\n\r\n if api_url != UrlConstant.BOOKMARK_INFORMATION: # if api_url is not bookmark, clear to params dict\r\n params_clear = True\r\n response = get(api_url=api_url, params={\"user_id\": user_id, \"restrict\": restrict}, params_clear=params_clear)\r\n if response.get('illusts') is not None:\r\n return response.get('illusts'), response.get('next_url')\r\n if max_retry <= 3:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.start_images(api_url, user_id, restrict) # if get error, try to refresh token and retry\r\n max_retry += 1\r\n\r\n @staticmethod\r\n def recommend_images(\r\n api_url: str = UrlConstant.RECOMMENDED_INFORMATION,\r\n params_clear: bool = False,\r\n include_ranking_illusts: str = \"true\",\r\n include_privacy_policy: str = \"true\",\r\n max_retry: int = 3\r\n ) -> [list, str, None]: # get account recommend images and return a list of p_id\r\n\r\n if api_url != UrlConstant.RECOMMENDED_INFORMATION: # if api_url is not recommended, clear to params dict\r\n params_clear = True\r\n\r\n params = {\"include_ranking_illusts\": include_ranking_illusts, \"include_privacy_policy\": include_privacy_policy}\r\n response: dict = get(api_url=api_url, params=params, params_clear=params_clear)\r\n if response.get('illusts') is not None:\r\n return response.get(\"illusts\"), response.get('next_url')\r\n if max_retry <= 3: # if max_retry is less than 3, try to refresh token and retry\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.recommend_images(api_url=api_url) # if get error, try to refresh token and retry\r\n max_retry += 1 # add retry count\r\n\r\n @staticmethod\r\n def follow_information(\r\n api_url: str = UrlConstant.FOLLOWING_INFORMATION,\r\n user_id: [int, str] = None,\r\n restrict: str = \"public\",\r\n params_clear: bool = False,\r\n max_retry: int = 3\r\n ) -> [list, str]: # get account follow information and return a list of AUTHOR_ID\r\n \"\"\"获取指定 user_id 关注的所有画师信息\"\"\"\r\n if user_id is None: # if user_id is None, get the user_id from config file\r\n user_id = Vars.cfg.data['user_info']['id'] # get user_id from config file and set to user_id\r\n if api_url != UrlConstant.FOLLOWING_INFORMATION: # if api_url is not recommended, clear to params dict\r\n params_clear = True\r\n response = get(api_url=api_url, params={\"user_id\": user_id, \"restrict\": restrict}, params_clear=params_clear)\r\n if response.get('user_previews') is not None:\r\n return response[\"user_previews\"], response.get('next_url')\r\n if max_retry <= 3:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.follow_information(user_id, restrict) # if get error, try to refresh token and retry\r\n max_retry += 1\r\n\r\n @staticmethod\r\n def author_information(\r\n api_url: str = UrlConstant.AUTHOR_INFORMATION,\r\n author_id: str = \"\",\r\n params_clear: bool = False,\r\n max_retry: int = 3\r\n ) -> [list, str, None]: # get author information and return a list of p_id\r\n\r\n if api_url != UrlConstant.AUTHOR_INFORMATION: # if api_url is not author, clear to params dict\r\n params_clear = True\r\n response = get(api_url=api_url, params={\"user_id\": author_id, \"type\": \"illust\"}, params_clear=params_clear)\r\n if response.get('illusts') is not None: # get success, return a list of p_id and next_url (if not None)\r\n return response.get('illusts'), response.get('next_url')\r\n if max_retry <= 3:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.author_information(api_url=api_url, author_id=author_id) # try to refresh token and retry\r\n max_retry += 1\r\n\r\n @staticmethod\r\n def get_ranking_info(\r\n api_url: str = UrlConstant.RANKING_INFORMATION,\r\n params_clear: bool = False,\r\n max_retry: int = 5\r\n ) -> [list, str]: # 作品排行信息\r\n mode_list = [\r\n \"day\", \"week\", \"month\", \"day_male\",\r\n \"day_female\", \"week_original\", \"week_rookie\",\r\n \"day_manga\", \"day_r18\", \"day_male_r18\",\r\n \"day_female_r18\", \"week_r18\", \"week_r18g\"\r\n ]\r\n if api_url == UrlConstant.RANKING_INFORMATION: # if api_url is not author, clear to params dict\r\n for index, mode in enumerate(mode_list): # for each mode, get the ranking information\r\n print(\"index:\", index, \"\\t\\tmode_name:\", mode) # print mode_name\r\n mode_type = mode_list[input_int(\">\", len(mode_list))] # input mode_type from user\r\n else:\r\n params_clear, mode_type = True, None # clear to params dict and set mode_type to None\r\n response = get(api_url=api_url, params={\"mode\": mode_type}, params_clear=params_clear)\r\n if response.get('illusts') is not None:\r\n return response.get('illusts'), response.get('next_url')\r\n if max_retry <= 3: # if max_retry is less than 3, try to refresh token and retry\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.get_ranking_info(api_url=api_url) # if get error, try to refresh token and retry\r\n max_retry += 1 # add retry count to max_retry\r\n\r\n\r\nclass Tag:\r\n \"\"\"\r\n search_target\r\n partial_match_for_tags\texact_match_for_tags title_and_caption\r\n 标签部分一致 标签完全一致 标题说明文\r\n\r\n sort\r\n date_desc\t date_asc popular_desc\r\n 按日期倒序 按日期正序 受欢迎降序(Premium功能)\r\n\r\n search_duration\r\n \"within_last_day\" \"within_last_week\" \"within_last_month\"\r\n \"\"\"\r\n\r\n @staticmethod\r\n def search_tag_information(png_name: str, sort: str = \"popular_desc\", max_retry: int = 5) -> list:\r\n params = {\r\n \"include_translated_tag_results\": \"true\",\r\n \"merge_plain_keyword_results\": \"true\",\r\n \"word\": png_name,\r\n \"sort\": sort,\r\n \"search_target\": \"exact_match_for_tags\",\r\n }\r\n response = get(api_url=UrlConstant.SEARCH_INFORMATION, params=params)\r\n if response.get('illusts') is not None:\r\n return response[\"illusts\"]\r\n if max_retry <= 3:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\"))\r\n Tag.search_tag_information(png_name, sort)\r\n max_retry += 1\r\n\r\n @staticmethod\r\n def search_information(png_name: str, sort: str = \"date_desc\", max_retry: int = 5) -> list:\r\n params = {\r\n \"include_translated_tag_results\": \"true\",\r\n \"merge_plain_keyword_results\": \"true\",\r\n \"word\": png_name,\r\n \"sort\": sort,\r\n \"search_target\": \"partial_match_for_tags\",\r\n }\r\n response = get(api_url=UrlConstant.SEARCH_INFORMATION, params=params)\r\n if response.get('illusts') is not None:\r\n return response[\"illusts\"]\r\n if max_retry <= 3:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\"))\r\n Tag.search_information(png_name, sort)\r\n max_retry += 1\r\n\r\n\r\nclass PixivLogin:\r\n\r\n @staticmethod\r\n def oauth_pkce() -> [str, str]:\r\n from secrets import token_urlsafe\r\n from base64 import urlsafe_b64encode\r\n from hashlib import sha256\r\n \"\"\"S256 transformation method. Proof Key for Code Exchange by OAuth Public Clients (RFC7636).\"\"\"\r\n code_verifier = token_urlsafe(32) # generate code_verifier from secrets.token_urlsafe\r\n code_challenge = urlsafe_b64encode(sha256(code_verifier.encode(\"ascii\")).digest()). \\\r\n rstrip(b\"=\").decode(\"ascii\") # remove padding characters from base64 encoding and decode to ascii\r\n return code_verifier, code_challenge\r\n\r\n @staticmethod\r\n def open_browser(client: str = \"pixiv-android\") -> [str, None]:\r\n from webbrowser import open as open_url\r\n from urllib.parse import urlencode\r\n code_verifier, code_challenge = PixivLogin.oauth_pkce()\r\n login_params = {\"code_challenge\": code_challenge, \"code_challenge_method\": \"S256\", \"client\": client}\r\n return code_verifier, open_url(f\"https://app-api.pixiv.net/web/v1/login?{urlencode(login_params)}\")\r\n\r\n @staticmethod\r\n def login(code_verifier: str, code_information: str) -> bool: # login with code_information\r\n response = get(\r\n api_url=\"https://oauth.secure.pixiv.net/auth/token\",\r\n head=\"login\",\r\n request_mode=\"POST\",\r\n params={\r\n \"client_id\": \"MOBrBDS8blbauoSck0ZfDbtuzpyT\",\r\n \"client_secret\": \"<KEY>\",\r\n \"code\": code_information,\r\n \"code_verifier\": code_verifier,\r\n \"grant_type\": \"authorization_code\",\r\n \"include_policy\": \"true\",\r\n \"redirect_uri\": \"https://app-api.pixiv.net/web/v1/users/auth/pixiv/callback\",\r\n },\r\n )\r\n if response.get(\"errors\") is not None: # if get error, return False and print error message\r\n print(\"errors:\", response['errors'])\r\n else:\r\n PixivLogin.save_token(response)\r\n return True\r\n\r\n @staticmethod\r\n def refresh(refresh_token: str) -> bool: # refresh token and save to file\r\n response = get(\r\n api_url=\"https://oauth.secure.pixiv.net/auth/token\",\r\n head=\"login\",\r\n request_mode=\"POST\",\r\n params={\r\n \"client_id\": \"<KEY>\",\r\n \"client_secret\": \"<KEY>\",\r\n \"grant_type\": \"refresh_token\",\r\n \"include_policy\": \"true\",\r\n \"refresh_token\": refresh_token,\r\n }\r\n )\r\n\r\n if response.get(\"errors\") is not None:\r\n print(\"errors:\", response['errors'])\r\n else:\r\n PixivLogin.save_token(response)\r\n return True\r\n\r\n @staticmethod\r\n def save_token(response: dict) -> None: # save token to file for later use\r\n if isinstance(response, dict): # if response is a dict\r\n Vars.cfg.data[\"user_info\"] = response[\"user\"] # save user_id to config\r\n Vars.cfg.data[\"access_token\"] = response[\"access_token\"] # save access_token to config\r\n Vars.cfg.data[\"refresh_token\"] = response[\"refresh_token\"] # save refresh_token to config\r\n Vars.cfg.save() # save config to file\r\n print(\"login success, user_id:\", response[\"user\"][\"id\"], \"access_token:\", response[\"access_token\"])\r\n else:\r\n print(\"response is not dict type\")\r\n", "id": "10338036", "language": "Python", "matching_score": 9.075886726379395, "max_stars_count": 1, "path": "PixivAPI/__init__.py" }, { "content": "from fake_useragent import UserAgent\r\nfrom instance import *\r\nfrom PixivAPI import login_pixiv, HttpUtil, UrlConstant\r\n\r\ncommon_params = {\"filter\": \"for_android\"}\r\n\r\n\r\ndef return_headers(headers: str = \"app\"):\r\n if headers == \"app\":\r\n return {\r\n 'Host': 'app-api.pixiv.net ',\r\n 'user-agent': 'PixivAndroidApp/6.46.0',\r\n 'authorization': \"Bearer \" + Vars.cfg.data.get(\"access_token\"),\r\n 'app-version': '6.46.0 ',\r\n }\r\n if headers == \"png\":\r\n return {'Referer': 'https://www.pixiv.net/', 'User-Agent': UserAgent(verify_ssl=False).random}\r\n else:\r\n return {'User-Agent': UserAgent(verify_ssl=False).random}\r\n\r\n\r\ndef get(\r\n api_url: str,\r\n params: [dict, str] = None,\r\n head: str = \"app\",\r\n types: str = \"json\",\r\n params_clear: bool = False,\r\n request_mode: str = \"GET\") -> [dict, bytes, str, None]:\r\n if params_clear:\r\n params = params.clear()\r\n if head == \"app\":\r\n params.update(common_params)\r\n api_url = UrlConstant.PIXIV_HOST + api_url.replace(UrlConstant.PIXIV_HOST, '')\r\n try:\r\n if request_mode == \"GET\":\r\n return HttpUtil.get_api(api_url, params=params, return_type=types, headers=return_headers(head))\r\n elif request_mode == \"POST\":\r\n return HttpUtil.post_api(api_url, params=params, return_type=types, headers=return_headers(head))\r\n elif request_mode == \"PUT\":\r\n return HttpUtil.put_api(api_url, params=params, return_type=types, headers=return_headers(head))\r\n except Exception as error:\r\n print(\"post error:\", error)\r\n\r\n\r\ndef refresh_pixiv_token(error_info: str = \"\") -> None:\r\n if error_info != \"\" and error_info is not None:\r\n print(\"[error]:\", error_info)\r\n if login_pixiv.refresh(Vars.cfg.data.get(\"refresh_token\")):\r\n print(\"refresh token success, new token:\", Vars.cfg.data.get(\"access_token\"))\r\n else:\r\n print(\"refresh token failed, please login again\")\r\n\r\n\r\nclass PixivApp:\r\n\r\n @staticmethod\r\n def get_user_info(show_start: bool = False) -> bool:\r\n params = {\"user_id\": Vars.cfg.data['user_info']['id']}\r\n response = get(api_url=UrlConstant.ACCOUNT_INFORMATION, params=params).get('user')\r\n if response is not None:\r\n if show_start is True:\r\n print(f\"用户名:{response.get('name')}\\t\\t用户id:{response.get('id')}\")\r\n return True\r\n\r\n @staticmethod\r\n def images_information(works_id: str) -> dict:\r\n response = get(UrlConstant.IMAGE_INFORMATION, params={'id': works_id}, head=\"web\")\r\n if isinstance(response, dict) and response.get('illust') is not None:\r\n return response[\"illust\"]\r\n else:\r\n print(response)\r\n\r\n @staticmethod\r\n def start_images(\r\n api_url: str = UrlConstant.BOOKMARK_INFORMATION,\r\n user_id: [int, str] = None,\r\n restrict: str = \"public\",\r\n params_clear: bool = False\r\n ) -> [list, str, None]: # get account start information and return a list of p_id\r\n if user_id is None: # if user_id is None, get the user_id from config file\r\n user_id = Vars.cfg.data['user_info']['id']\r\n\r\n if api_url != UrlConstant.BOOKMARK_INFORMATION: # if api_url is not bookmark, clear to params dict\r\n params_clear = True\r\n response = get(api_url=api_url, params={\"user_id\": user_id, \"restrict\": restrict}, params_clear=params_clear)\r\n if response.get('illusts') is not None:\r\n return response.get('illusts'), response.get('next_url')\r\n else:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.start_images(api_url, user_id, restrict) # if get error, try to refresh token and retry\r\n\r\n @staticmethod\r\n def recommend_images(\r\n api_url: str = UrlConstant.RECOMMENDED_INFORMATION,\r\n params_clear: bool = False\r\n ) -> [list, str, None]: # get account recommend images and return a list of p_id\r\n if api_url != UrlConstant.RECOMMENDED_INFORMATION: # if api_url is not recommended, clear to params dict\r\n params_clear = True\r\n params = {\"include_ranking_illusts\": \"true\", \"include_privacy_policy\": \"true\"}\r\n response = get(api_url=api_url, params=params, params_clear=params_clear)\r\n if response.get('illusts') is not None:\r\n return response[\"illusts\"], response.get('next_url')\r\n else:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.recommend_images(api_url=api_url) # if get error, try to refresh token and retry\r\n\r\n @staticmethod\r\n def follow_information(user_id: [int, str] = None, restrict: str = \"public\", max_retry: int = 5) -> list:\r\n \"\"\"获取指定 user_id 关注的所有画师信息\"\"\"\r\n if user_id is None:\r\n user_id = Vars.cfg.data['user_info']['id']\r\n for retry in range(1, max_retry):\r\n params = {\"user_id\": user_id, \"restrict\": restrict}\r\n response = get(api_url=UrlConstant.FOLLOWING_INFORMATION, params=params)\r\n if response.get('user_previews') is not None:\r\n return response[\"user_previews\"]\r\n else:\r\n print(\"Retry:{} follow_infor error:{}\".format(retry, response.get(\"error\").get(\"message\")))\r\n refresh_pixiv_token()\r\n\r\n @staticmethod\r\n def author_information(\r\n api_url: str = UrlConstant.AUTHOR_INFORMATION,\r\n author_id: str = \"\",\r\n params_clear: bool = False\r\n ) -> [list, str, None]: # get author information and return a list of p_id\r\n\r\n if api_url != UrlConstant.AUTHOR_INFORMATION: # if api_url is not author, clear to params dict\r\n params_clear = True\r\n response = get(api_url=api_url, params={\"user_id\": author_id, \"type\": \"illust\"}, params_clear=params_clear)\r\n if response.get('illusts') is not None: # get success, return a list of p_id and next_url (if not None)\r\n return response.get('illusts'), response.get('next_url')\r\n else:\r\n refresh_pixiv_token(response.get(\"error\").get(\"message\")) # refresh token\r\n PixivApp.author_information(api_url=api_url, author_id=author_id) # try to refresh token and retry\r\n\r\n @staticmethod\r\n def rank_information(max_page: int = 100, max_retry: int = 5) -> list: # 作品排行信息\r\n mode_list = [\"day\", \"week\", \"month\", \"day_male\", \"day_female\", \"week_original\", \"week_rookie\", \"day_manga\",\r\n \"day_r18\", \"day_male_r18\", \"day_female_r18\", \"week_r18\", \"week_r18g\"]\r\n for index, mode in enumerate(mode_list):\r\n print(\"index:\", index, \"\\t\\tmode_name:\", mode)\r\n mode_type = mode_list[input_int(\">\", len(mode_list))]\r\n for index, page in enumerate(range(max_page), start=1):\r\n params = {\"offset\": index * 30, \"mode\": mode_type, \"data\": time.strftime(\"%Y-%m-%d\", time.localtime())}\r\n for retry in range(1, max_retry):\r\n response = get(api_url=UrlConstant.RANKING_INFORMATION, params=params)\r\n if response.get('illusts') is not None:\r\n return response[\"illusts\"]\r\n else:\r\n print(\"rank_information error:{}\".format(retry, response.get(\"error\").get(\"message\")))\r\n refresh_pixiv_token()\r\n\r\n\r\nclass Tag:\r\n \"\"\"\r\n search_target\r\n partial_match_for_tags\texact_match_for_tags title_and_caption\r\n 标签部分一致 标签完全一致 标题说明文\r\n\r\n sort\r\n date_desc\t date_asc popular_desc\r\n 按日期倒序 按日期正序 受欢迎降序(Premium功能)\r\n\r\n search_duration\r\n \"within_last_day\" \"within_last_week\" \"within_last_month\"\r\n \"\"\"\r\n\r\n @staticmethod\r\n def search_tag_information(png_name: str, sort: str = \"popular_desc\", max_retry: int = 5) -> list:\r\n params = {\r\n \"include_translated_tag_results\": \"true\",\r\n \"merge_plain_keyword_results\": \"true\",\r\n \"word\": png_name,\r\n \"sort\": sort,\r\n \"search_target\": \"exact_match_for_tags\",\r\n }\r\n for retry in range(1, max_retry):\r\n response = get(api_url=UrlConstant.SEARCH_INFORMATION, params=params)\r\n if response.get('illusts') is not None:\r\n return response[\"illusts\"]\r\n else:\r\n print(\"Retry:{} search error:{}\".format(retry, response.get(\"error\").get(\"message\")))\r\n refresh_pixiv_token()\r\n\r\n @staticmethod\r\n def search_information(png_name: str, sort: str = \"date_desc\", max_retry: int = 5) -> list:\r\n params = {\r\n \"include_translated_tag_results\": \"true\",\r\n \"merge_plain_keyword_results\": \"true\",\r\n \"word\": png_name,\r\n \"sort\": sort,\r\n \"search_target\": \"partial_match_for_tags\",\r\n }\r\n for retry in range(1, max_retry):\r\n response = get(api_url=UrlConstant.SEARCH_INFORMATION, params=params)\r\n if response.get('illusts') is not None:\r\n return response[\"illusts\"]\r\n else:\r\n print(\"Retry:{} search error:{}\".format(retry, response.get(\"error\").get(\"message\")))\r\n refresh_pixiv_token()\r\n", "id": "804245", "language": "Python", "matching_score": 3.155552864074707, "max_stars_count": 0, "path": "PixivAPI/__init__.py" }, { "content": "from API import HttpUtil, UrlConstants, ahttp\n\n\ndef get(api_url: str, params: dict = None, max_retry: int = 3, **kwargs):\n for retry in range(max_retry):\n try:\n api_url = UrlConstants.WEB_SITE + api_url.replace(UrlConstants.WEB_SITE, '')\n return HttpUtil.get(api_url=api_url, params=params, **kwargs).json()\n except Exception as error:\n print(error)\n\n\nclass Book:\n\n @staticmethod\n def novel_info(novel_id: int):\n response = get(UrlConstants.BOOK_INFO_API.format(novel_id))\n if response.get('_id') is not None:\n return response\n\n @staticmethod\n def catalogue(novel_id: int, max_retry=5):\n for retry in range(max_retry):\n response = get(UrlConstants.BOOK_CATALOGUE.format(novel_id))\n if response.get('mixToc').get('chapters') is not None:\n return response.get('mixToc').get('chapters')\n\n @staticmethod\n def search_book(novel_name: str):\n return get(UrlConstants.SEARCH_API.format(novel_name)).get('books')\n\n\nclass Chapter:\n @staticmethod\n def download_chapter(chapter_id: str):\n api_url = UrlConstants.WEB_SITE + UrlConstants.CHAPTER_API.format(chapter_id)\n response = get(api_url)['chapter']\n return response['title'], response['body']\n\n\nclass Cover:\n @staticmethod\n def download_cover(max_retry=10) -> str:\n for retry in range(max_retry):\n params = {'type': 'moe', 'size': '1920x1080'}\n response = HttpUtil.get('https://api.yimian.xyz/img', params=params)\n if response.status_code == 200:\n return HttpUtil.get(response.url).content\n else:\n print(\"msg:\", response.text)\n\n\nclass Tag:\n @staticmethod\n def get_type():\n type_dict = {}\n response = get(UrlConstants.GET_TYPE_INFO)\n for number, sort in enumerate(response['male']):\n number += 1\n major = sort.get('major')\n type_dict[number] = major\n return type_dict\n\n @staticmethod\n def tag_info(tag_id, tag_name, page):\n book_list = get(UrlConstants.TAG_API.format(tag_id, tag_name, page))\n if book_list['books']:\n return book_list['books']\n\n @staticmethod\n def ranking(ranking_num):\n return get(UrlConstants.RANKING_API.format(ranking_num))\n", "id": "3726016", "language": "Python", "matching_score": 5.286143779754639, "max_stars_count": 1, "path": "API/__init__.py" }, { "content": "WEB_SITE = \"http://api.aixdzs.com/\"\nTAG_API = \"book-sort?gender={}&type=hot&major={}&minor=&start={}&limit=20\"\nBOOK_INFO_API = 'book/{}'\nBOOK_CATALOGUE = 'content/{}?view=chapter'\nGET_TYPE_INFO = 'sort/lv2'\nCHAPTER_API = 'chapter/{}'\nSEARCH_API = 'book/search?query={}'\nRANKING_API = 'ranking/{}'\n\n", "id": "7537425", "language": "Python", "matching_score": 0.3714710474014282, "max_stars_count": 1, "path": "API/UrlConstants.py" }, { "content": "from ebooklib.plugins.base import BasePlugin\nfrom ebooklib.utils import parse_html_string\n\n\nclass BooktypeLinks(BasePlugin):\n NAME = 'Booktype Links'\n\n def __init__(self, booktype_book):\n self.booktype_book = booktype_book\n\n def html_before_write(self, book, chapter):\n from lxml import etree\n\n try:\n from urlparse import urlparse, urljoin\n except ImportError:\n from urllib.parse import urlparse, urljoin\n\n try:\n tree = parse_html_string(chapter.content)\n except Exception as error:\n print(error)\n return\n\n root = tree.getroottree()\n\n if len(root.find('body')) != 0:\n body = tree.find('body')\n\n # should also be aware to handle\n # ../chapter/\n # ../chapter/#reference\n # ../chapter#reference\n\n for _link in body.xpath('//a'):\n # This is just temporary for the footnotes\n if _link.get('href', '').find('InsertNoteID') != -1:\n _ln = _link.get('href', '')\n i = _ln.find('#')\n _link.set('href', _ln[i:])\n\n continue\n\n _u = urlparse(_link.get('href', ''))\n\n # Let us care only for internal links at the moment\n if _u.scheme == '':\n if _u.path != '':\n _link.set('href', '%s.xhtml' % _u.path)\n\n if _u.fragment != '':\n _link.set('href', urljoin(_link.get('href'), '#%s' % _u.fragment))\n\n if _link.get('name') is not None:\n _link.set('id', _link.get('name'))\n etree.strip_attributes(_link, 'name')\n\n chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8')\n\n\nclass BooktypeFootnotes(BasePlugin):\n NAME = 'Booktype Footnotes'\n\n def __init__(self, booktype_book):\n self.booktype_book = booktype_book\n\n def html_before_write(self, book, chapter):\n from lxml import etree\n\n from ebooklib import epub\n\n try:\n tree = parse_html_string(chapter.content)\n except Exception as error:\n print(error)\n return\n\n root = tree.getroottree()\n\n if len(root.find('body')) != 0:\n body = tree.find('body')\n\n # <span id=\"InsertNoteID_1_marker1\" class=\"InsertNoteMarker\"><sup><a\n # href=\"#InsertNoteID_1\">1</a></sup><span> <ol id=\"InsertNote_NoteList\"><li id=\"InsertNoteID_1\">prvi\n # footnote <span id=\"InsertNoteID_1_LinkBacks\"><sup><a\n # href=\"#InsertNoteID_1_marker1\">^</a></sup></span></li>\n\n # <a epub:type=\"noteref\" href=\"#n1\">1</a></p>\n # <aside epub:type=\"footnote\" id=\"n1\"><p>These have been corrected in this EPUB3 edition.</p></aside>\n for footnote in body.xpath('//span[@class=\"InsertNoteMarker\"]'):\n footnote_id = footnote.get('id')[:-8]\n a = footnote.getchildren()[0].getchildren()[0]\n\n footnote_text = body.xpath('//li[@id=\"%s\"]' % footnote_id)[0]\n\n a.attrib['{%s}type' % epub.NAMESPACES['EPUB']] = 'noteref'\n ftn = etree.SubElement(body, 'aside', {'id': footnote_id})\n ftn.attrib['{%s}type' % epub.NAMESPACES['EPUB']] = 'footnote'\n ftn_p = etree.SubElement(ftn, 'p')\n ftn_p.text = footnote_text.text\n\n old_footnote = body.xpath('//ol[@id=\"InsertNote_NoteList\"]')\n if len(old_footnote) > 0:\n body.remove(old_footnote[0])\n\n chapter.content = etree.tostring(tree, pretty_print=True, encoding='utf-8')\n", "id": "4912234", "language": "Python", "matching_score": 2.2728357315063477, "max_stars_count": 1, "path": "epub_novel/plugins/booktype.py" }, { "content": "import six\nimport subprocess\nfrom ebooklib.plugins.base import BasePlugin\n\n# Recommend usage of\n# - https://github.com/w3c/tidy-html5\n\ndef tidy_cleanup(content, **extra):\n cmd = []\n\n for k, v in six.iteritems(extra):\n\n if v:\n cmd.append('--%s' % k)\n cmd.append(v)\n else:\n cmd.append('-%s' % k)\n\n # must parse all other extra arguments\n try:\n p = subprocess.Popen(['tidy'] + cmd, shell=False,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n except OSError:\n return 3, None\n\n p.stdin.write(content)\n\n (cont, p_err) = p.communicate()\n\n # 0 - all ok\n # 1 - there were warnings\n # 2 - there were errors\n # 3 - exception\n\n return p.returncode, cont\n\n\nclass TidyPlugin(BasePlugin):\n NAME = 'Tidy HTML'\n OPTIONS = {'char-encoding': 'utf8',\n 'tidy-mark': 'no'\n }\n\n def __init__(self, extra=None):\n if extra is None:\n extra = {}\n self.options = dict(self.OPTIONS)\n self.options.update(extra)\n\n def html_before_write(self, book, chapter):\n if not chapter.content:\n return None\n\n (_, chapter.content) = tidy_cleanup(chapter.content, **self.options)\n\n return chapter.content\n\n def html_after_read(self, book, chapter):\n if not chapter.content:\n return None\n\n (_, chapter.content) = tidy_cleanup(chapter.content, **self.options)\n\n return chapter.content\n", "id": "10121516", "language": "Python", "matching_score": 2.822089672088623, "max_stars_count": 1, "path": "epub_novel/plugins/tidyhtml.py" }, { "content": "class BasePlugin(object):\n def before_write(self, book):\n \"Processing before save\"\n return True\n\n def after_write(self, book):\n \"Processing after save\"\n return True\n\n def before_read(self, book):\n \"\"\"Processing before save\"\"\"\n return True\n\n def after_read(self, book):\n \"\"\"Processing after save\"\"\"\n return True\n\n def item_after_read(self, book, item):\n \"\"\"Process general item after read.\"\"\"\n return True\n\n def item_before_write(self, book, item):\n \"\"\"Process general item before write.\"\"\"\n return True\n\n def html_after_read(self, book, chapter):\n \"\"\"Processing HTML before read.\"\"\"\n return True\n\n def html_before_write(self, book, chapter):\n \"\"\"Processing HTML before save.\"\"\"\n return True\n", "id": "8769994", "language": "Python", "matching_score": 2.4342422485351562, "max_stars_count": 1, "path": "epub_novel/plugins/base.py" } ]
2.272836
debernal
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Windows XML EventLog (EVTX) parser.\"\"\"\n\nimport unittest\n\nfrom plaso.lib import definitions\nfrom plaso.parsers import winevtx\n\nfrom tests.parsers import test_lib\n\n\nclass WinEvtxParserTest(test_lib.ParserTestCase):\n \"\"\"Tests for the Windows XML EventLog (EVTX) parser.\"\"\"\n\n def testParse(self):\n \"\"\"Tests the Parse function.\"\"\"\n parser = winevtx.WinEvtxParser()\n storage_writer = self._ParseFile(['System.evtx'], parser)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n\n # Windows Event Viewer Log (EVTX) information:\n # Version : 3.1\n # Number of records : 1601\n # Number of recovered records : 0\n # Log type : System\n\n self.assertEqual(storage_writer.number_of_events, 3202)\n\n events = list(storage_writer.GetEvents())\n\n # Event number : 12049\n # Written time : Mar 14, 2012 04:17:43.354562700 UTC\n # Event level : Information (4)\n # Computer name : WKS-WIN764BITB.shieldbase.local\n # Provider identifier : {fc65ddd8-d6ef-4962-83d5-6e5cfe9ce148}\n # Source name : Microsoft-Windows-Eventlog\n # Event identifier : 0x00000069 (105)\n # Number of strings : 2\n # String: 1 : System\n # String: 2 : C:\\Windows\\System32\\Winevt\\Logs\\\n # : Archive-System-2012-03-14-04-17-39-932.evtx\n\n expected_string2 = (\n 'C:\\\\Windows\\\\System32\\\\Winevt\\\\Logs\\\\'\n 'Archive-System-2012-03-14-04-17-39-932.evtx')\n\n expected_event_values = {\n 'computer_name': 'WKS-WIN764BITB.shieldbase.local',\n 'data_type': 'windows:evtx:record',\n 'event_identifier': 105,\n 'event_level': 4,\n 'message_identifier': 105,\n 'record_number': 12049,\n 'source_name': 'Microsoft-Windows-Eventlog',\n 'strings': ['System', expected_string2]}\n\n self.CheckEventValues(storage_writer, events[0], expected_event_values)\n\n expected_xml_string = (\n '<Event xmlns=\"http://schemas.microsoft.com/win/2004/08/events/'\n 'event\">\\n'\n ' <System>\\n'\n ' <Provider Name=\"Service Control Manager\" '\n 'Guid=\"{555908d1-a6d7-4695-8e1e-26931d2012f4}\" '\n 'EventSourceName=\"Service Control Manager\"/>\\n'\n ' <EventID Qualifiers=\"16384\">7036</EventID>\\n'\n ' <Version>0</Version>\\n'\n ' <Level>4</Level>\\n'\n ' <Task>0</Task>\\n'\n ' <Opcode>0</Opcode>\\n'\n ' <Keywords>0x8080000000000000</Keywords>\\n'\n ' <TimeCreated SystemTime=\"2012-03-14T04:17:38.276340200Z\"/>\\n'\n ' <EventRecordID>12050</EventRecordID>\\n'\n ' <Correlation/>\\n'\n ' <Execution ProcessID=\"548\" ThreadID=\"1340\"/>\\n'\n ' <Channel>System</Channel>\\n'\n ' <Computer>WKS-WIN764BITB.shieldbase.local</Computer>\\n'\n ' <Security/>\\n'\n ' </System>\\n'\n ' <EventData>\\n'\n ' <Data Name=\"param1\">Windows Modules Installer</Data>\\n'\n ' <Data Name=\"param2\">stopped</Data>\\n'\n ' <Binary>540072007500730074006500640049006E007300740061006C006C00'\n '650072002F0031000000</Binary>\\n'\n ' </EventData>\\n'\n '</Event>\\n')\n\n expected_event_values = {\n 'computer_name': 'WKS-WIN764BITB.shieldbase.local',\n 'data_type': 'windows:evtx:record',\n 'event_level': 4,\n 'record_number': 12050,\n 'source_name': 'Service Control Manager',\n 'strings': ['Windows Modules Installer', 'stopped', (\n '540072007500730074006500640049006E007300740061006C006C00650072002F'\n '0031000000')],\n 'timestamp': '2012-03-14 04:17:38.276340',\n 'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN,\n 'xml_string': expected_xml_string}\n\n self.CheckEventValues(storage_writer, events[2], expected_event_values)\n\n def testParseTruncated(self):\n \"\"\"Tests the Parse function on a truncated file.\"\"\"\n parser = winevtx.WinEvtxParser()\n # Be aware of System2.evtx file, it was manually shortened so it probably\n # contains invalid log at the end.\n storage_writer = self._ParseFile(['System2.evtx'], parser)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n self.assertEqual(storage_writer.number_of_events, 388)\n\n events = list(storage_writer.GetEvents())\n\n expected_event_values = {\n 'data_type': 'windows:evtx:record',\n 'event_identifier': 4624,\n 'message_identifier': 4624}\n\n self.CheckEventValues(storage_writer, events[356], expected_event_values)\n\n expected_event_values = {\n 'data_type': 'windows:evtx:record',\n 'event_identifier': 4648,\n 'message_identifier': 4648}\n\n self.CheckEventValues(storage_writer, events[360], expected_event_values)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "10807627", "language": "Python", "matching_score": 3.2820870876312256, "max_stars_count": 2, "path": "tests/parsers/winevtx.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Windows IIS log parser.\"\"\"\n\nimport unittest\n\nfrom plaso.parsers import iis\n\nfrom tests.parsers import test_lib\n\n\nclass WinIISUnitTest(test_lib.ParserTestCase):\n \"\"\"Tests for the Windows IIS parser.\"\"\"\n\n def testParse(self):\n \"\"\"Tests the Parse function with an IIS 6 log file.\"\"\"\n parser = iis.WinIISParser()\n storage_writer = self._ParseFile(['iis6.log'], parser)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n self.assertEqual(storage_writer.number_of_events, 12)\n\n events = list(storage_writer.GetEvents())\n\n expected_event_values = {\n 'data_type': 'iis:log:line',\n 'dest_ip': '10.10.10.100',\n 'dest_port': 80,\n 'http_method': 'GET',\n 'http_status': 200,\n 'requested_uri_stem': '/some/image/path/something.jpg',\n 'source_ip': '10.10.10.100',\n 'timestamp': '2013-07-30 00:00:00.000000',\n 'user_agent': (\n 'Mozilla/4.0+(compatible;+Win32;+WinHttp.WinHttpRequest.5)')}\n\n self.CheckEventValues(storage_writer, events[0], expected_event_values)\n\n expected_event_values = {\n 'data_type': 'iis:log:line',\n 'http_method': 'GET',\n 'http_status': 200,\n 'requested_uri_stem': '/some/image/path/something.jpg',\n 'timestamp': '2013-07-30 00:00:05.000000'}\n\n self.CheckEventValues(storage_writer, events[5], expected_event_values)\n\n expected_event_values = {\n 'data_type': 'iis:log:line',\n 'dest_ip': '10.10.10.100',\n 'dest_port': 80,\n 'http_method': 'GET',\n 'http_status': 404,\n 'requested_uri_stem': '/some/image/path/something.htm',\n 'source_ip': '22.22.22.200',\n 'timestamp': '2013-07-30 00:00:03.000000',\n 'user_agent': (\n 'Mozilla/5.0+(Macintosh;+Intel+Mac+OS+X+10_6_8)+AppleWebKit/'\n '534.57.2+(KHTML,+like+Gecko)+Version/5.1.7+Safari/534.57.2')}\n\n self.CheckEventValues(storage_writer, events[1], expected_event_values)\n\n expected_event_values = {\n 'cs_uri_query': 'ID=ERROR[`cat%20passwd|echo`]',\n 'data_type': 'iis:log:line'}\n\n self.CheckEventValues(storage_writer, events[11], expected_event_values)\n\n def testParseWithIIS7SQLIFile(self):\n \"\"\"Tests the Parse function with an IIS 7 log file with SQLI.\"\"\"\n parser = iis.WinIISParser()\n storage_writer = self._ParseFile(['iis7_sqli.log'], parser)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n self.assertEqual(storage_writer.number_of_events, 2)\n\n events = list(storage_writer.GetEvents())\n\n expected_event_values = {\n 'data_type': 'iis:log:line',\n 'dest_ip': '172.16.17.32',\n 'dest_port': 443,\n 'http_method': 'GET',\n 'http_status': 500,\n 'requested_uri_stem': '/foo/bar/baz.asp',\n 'source_ip': '192.168.127.12',\n 'timestamp': '2015-10-16 13:01:02.000000',\n 'user_agent': (\n 'Mozilla/5.0+(Macintosh;+Intel+Mac+OS+X+10_9_2)+AppleWebKit/'\n '537.36+(KHTML,+like+Gecko)+Chrome/34.0.1847.131+Safari/537.36')}\n\n self.CheckEventValues(storage_writer, events[0], expected_event_values)\n\n def testParseWithIIS7OWAFile(self):\n \"\"\"Tests the Parse function with an IIS 7 OWA log file.\"\"\"\n parser = iis.WinIISParser()\n storage_writer = self._ParseFile(['iis7_owa.log'], parser)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n self.assertEqual(storage_writer.number_of_events, 3)\n\n events = list(storage_writer.GetEvents())\n\n expected_event_values = {\n 'data_type': 'iis:log:line',\n 'dest_ip': '10.11.2.3',\n 'dest_port': 443,\n 'http_method': 'GET',\n 'http_status': 200,\n 'requested_uri_stem': '/owa/',\n 'source_ip': '192.168.127.12',\n 'timestamp': '2015-12-31 00:19:48.000000',\n 'user_agent': (\n 'Mozilla/5.0+(Windows+NT+6.1;+WOW64)+AppleWebKit/537.36+'\n '(KHTML,+like+Gecko)+Chrome/39.0.2171.95+Safari/537.36')}\n\n self.CheckEventValues(storage_writer, events[0], expected_event_values)\n\n def testParseWithoutDate(self):\n \"\"\"Tests the Parse function with logs without a date column.\"\"\"\n parser = iis.WinIISParser()\n storage_writer = self._ParseFile(['iis_without_date.log'], parser)\n\n self.assertEqual(storage_writer.number_of_warnings, 0)\n self.assertEqual(storage_writer.number_of_events, 11)\n\n events = list(storage_writer.GetEvents())\n\n expected_event_values = {\n 'data_type': 'iis:log:line',\n 'protocol_version': 'HTTP/1.1',\n 'timestamp': '2013-07-30 00:00:03.000000'}\n\n self.CheckEventValues(storage_writer, events[1], expected_event_values)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "12610405", "language": "Python", "matching_score": 0.876983106136322, "max_stars_count": 2, "path": "tests/parsers/iis.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the tag_linux.txt tagging file.\"\"\"\n\nimport unittest\n\nfrom plaso.containers import events\nfrom plaso.lib import definitions\nfrom plaso.parsers import bash_history\nfrom plaso.parsers import docker\nfrom plaso.parsers import dpkg\nfrom plaso.parsers import selinux\nfrom plaso.parsers import syslog\nfrom plaso.parsers import utmp\nfrom plaso.parsers import zsh_extended_history\nfrom plaso.parsers.syslog_plugins import cron\n\nfrom tests.data import test_lib\n\n\nclass LinuxTaggingFileTest(test_lib.TaggingFileTestCase):\n \"\"\"Tests the tag_linux.txt tagging file.\n\n In the tests below the EventData classes are used to catch failing tagging\n rules in case event data types are renamed.\n \"\"\"\n\n _TAG_FILE = 'tag_linux.txt'\n\n def testRuleApplicationExecution(self):\n \"\"\"Tests the application_execution tagging rule.\"\"\"\n # Test: data_type is 'bash:history:command'\n attribute_values_per_name = {}\n self._CheckTaggingRule(\n bash_history.BashHistoryEventData, attribute_values_per_name,\n ['application_execution'])\n\n # Test: data_type is 'docker:json:layer'\n attribute_values_per_name = {}\n self._CheckTaggingRule(\n docker.DockerJSONLayerEventData, attribute_values_per_name,\n ['application_execution'])\n\n # Test: data_type is 'selinux:line' AND (audit_type is 'EXECVE' OR\n # audit_type is 'USER_CMD')\n attribute_values_per_name = {\n 'audit_type': ['EXECVE', 'USER_CMD']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['application_execution'])\n\n # Test: data_type is 'shell:zsh:history'\n attribute_values_per_name = {}\n self._CheckTaggingRule(\n zsh_extended_history.ZshHistoryEventData, attribute_values_per_name,\n ['application_execution'])\n\n # Test: data_type is 'syslog:cron:task_run'\n attribute_values_per_name = {}\n self._CheckTaggingRule(\n cron.CronTaskRunEventData, attribute_values_per_name,\n ['application_execution'])\n\n # Test: reporter is 'sudo' AND body contains 'COMMAND='\n attribute_values_per_name = {\n 'body': ['test if my COMMAND=bogus'],\n 'reporter': ['sudo']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['application_execution'])\n\n # Test: reporter is 'CROND' AND body contains 'CMD'\n attribute_values_per_name = {\n 'body': ['test if my CMD bogus'],\n 'reporter': ['CROND']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['application_execution'])\n\n def testRuleLogin(self):\n \"\"\"Tests the login tagging rule.\"\"\"\n # Test: data_type is 'linux:utmp:event' AND type == 7\n attribute_values_per_name = {\n 'type': [7]}\n self._CheckTaggingRule(\n utmp.UtmpEventData, attribute_values_per_name,\n ['login'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'LOGIN'\n attribute_values_per_name = {\n 'audit_type': ['LOGIN']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['login'])\n\n # Test: reporter is 'login' AND (body contains 'logged in' OR\n # body contains 'ROOT LOGIN' OR body contains 'session opened')\n attribute_values_per_name = {\n 'body': ['logged in', 'ROOT LOGIN', 'session opened'],\n 'reporter': ['login']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login'])\n\n # Test: reporter is 'sshd' AND (body contains 'session opened' OR\n # body contains 'Starting session')\n attribute_values_per_name = {\n 'body': ['session opened', 'Starting session'],\n 'reporter': ['sshd']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login'])\n\n # Test: reporter is 'dovecot' AND body contains 'imap-login: Login:'\n attribute_values_per_name = {\n 'body': ['imap-login: Login:'],\n 'reporter': ['dovecot']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login'])\n\n # Test: reporter is 'postfix/submission/smtpd' AND body contains 'sasl_'\n attribute_values_per_name = {\n 'body': ['sasl_method=PLAIN, sasl_username='],\n 'reporter': ['postfix/submission/smtpd']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login'])\n\n def testRuleLoginFailed(self):\n \"\"\"Tests the login_failed tagging rule.\"\"\"\n # Test: data_type is 'selinux:line' AND audit_type is 'ANOM_LOGIN_FAILURES'\n attribute_values_per_name = {\n 'audit_type': ['ANOM_LOGIN_FAILURES']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['login_failed'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'USER_LOGIN' AND\n # body contains 'res=failed'\n attribute_values_per_name = {\n 'audit_type': ['USER_LOGIN'],\n 'body': ['res=failed']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['login_failed'])\n\n # Test: data_type is 'syslog:line' AND body contains 'pam_tally2'\n attribute_values_per_name = {\n 'body': ['pam_tally2']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login_failed'])\n\n # Test: (reporter is 'sshd' OR\n # reporter is 'login' OR\n # reporter is 'postfix/submission/smtpd' OR\n # reporter is 'sudo') AND\n # body contains 'uthentication fail'\n attribute_values_per_name = {\n 'body': ['authentication failed', 'authentication failure',\n 'Authentication failure'],\n 'reporter': ['login', 'postfix/submission/smtpd', 'sshd', 'sudo']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login_failed'])\n\n # Test: (reporter is 'xscreensaver' or\n # reporter is 'login') AND\n # body contains 'FAILED LOGIN'\n attribute_values_per_name = {\n 'body': ['FAILED LOGIN'],\n 'reporter': ['login', 'xscreensaver']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login_failed'])\n\n # Test: reporter is 'su' AND body contains 'DENIED'\n attribute_values_per_name = {\n 'body': ['DENIED su from'],\n 'reporter': ['su']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login_failed'])\n\n # Test: reporter is 'nologin'\n attribute_values_per_name = {\n 'reporter': ['nologin']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['login_failed'])\n\n def testRuleUserAdd(self):\n \"\"\"Tests the useradd tagging rule.\"\"\"\n # Test: reporter is 'useradd' AND body contains 'new user'\n attribute_values_per_name = {\n 'reporter': ['useradd'],\n 'body': ['new user']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['useradd'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'ADD_USER'\n attribute_values_per_name = {\n 'audit_type': ['ADD_USER']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['useradd'])\n\n def testRuleGroupAdd(self):\n \"\"\"Tests the groupadd tagging rule.\"\"\"\n # Test: reporter is 'useradd' AND body contains 'new group'\n attribute_values_per_name = {\n 'reporter': ['useradd'],\n 'body': ['new group']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['groupadd'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'ADD_GROUP'\n attribute_values_per_name = {\n 'audit_type': ['ADD_GROUP']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['groupadd'])\n\n # Test: reporter is 'groupadd'\n attribute_values_per_name = {\n 'reporter': ['groupadd']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['groupadd'])\n\n def testRuleUserDel(self):\n \"\"\"Tests the userdel tagging rule.\"\"\"\n # Test: reporter is 'userdel' AND body contains 'delete user'\n attribute_values_per_name = {\n 'reporter': ['userdel'],\n 'body': ['delete user']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['userdel'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'DEL_USER'\n attribute_values_per_name = {\n 'audit_type': ['DEL_USER']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['userdel'])\n\n def testRuleGroupDel(self):\n \"\"\"Tests the groupdel tagging rule.\"\"\"\n # Test: reporter is 'userdel' AND body contains 'removed group'\n attribute_values_per_name = {\n 'reporter': ['userdel'],\n 'body': ['removed group']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['groupdel'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'DEL_GROUP'\n attribute_values_per_name = {\n 'audit_type': ['DEL_GROUP']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['groupdel'])\n\n # Test: reporter is 'groupdel'\n attribute_values_per_name = {\n 'reporter': ['groupdel']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['groupdel'])\n\n def testRuleFirewallChange(self):\n \"\"\"Tests the firewall_change tagging rule.\"\"\"\n # Test: data_type is 'selinux:line' AND audit_type is 'NETFILTER_CFG'\n attribute_values_per_name = {\n 'audit_type': ['NETFILTER_CFG']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['firewall_change'])\n\n def testRuleLogout(self):\n \"\"\"Tests the logout tagging rule.\"\"\"\n # Test: data_type is 'linux:utmp:event' AND type == 8 AND terminal != '' AND\n # pid != 0\n\n # Cannot use _CheckTaggingRule here because of terminal != ''\n event = events.EventObject()\n event.timestamp = self._TEST_TIMESTAMP\n event.timestamp_desc = definitions.TIME_DESCRIPTION_UNKNOWN\n\n event_data = utmp.UtmpEventData()\n event_data.type = 0\n event_data.terminal = 'tty1'\n event_data.pid = 1\n\n storage_writer = self._TagEvent(event, event_data, None)\n\n self.assertEqual(storage_writer.number_of_event_tags, 0)\n self._CheckLabels(storage_writer, [])\n\n event_data.type = 8\n event_data.terminal = ''\n\n storage_writer = self._TagEvent(event, event_data, None)\n\n self.assertEqual(storage_writer.number_of_event_tags, 0)\n self._CheckLabels(storage_writer, [])\n\n event_data.terminal = 'tty1'\n event_data.pid = 0\n\n storage_writer = self._TagEvent(event, event_data, None)\n\n self.assertEqual(storage_writer.number_of_event_tags, 0)\n self._CheckLabels(storage_writer, [])\n\n event_data.pid = 1\n\n storage_writer = self._TagEvent(event, event_data, None)\n\n self.assertEqual(storage_writer.number_of_event_tags, 1)\n self._CheckLabels(storage_writer, ['logout'])\n\n # Test: reporter is 'login' AND body contains 'session closed'\n attribute_values_per_name = {\n 'body': ['session closed'],\n 'reporter': ['login']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])\n\n # Test: reporter is 'sshd' AND (body contains 'session closed' OR\n # body contains 'Close session')\n attribute_values_per_name = {\n 'body': ['Close session', 'session closed'],\n 'reporter': ['sshd']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])\n\n # Test: reporter is 'systemd-logind' AND body contains 'logged out'\n attribute_values_per_name = {\n 'body': ['logged out'],\n 'reporter': ['systemd-logind']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])\n\n # Test: reporter is 'dovecot' AND body contains 'Logged out'\n attribute_values_per_name = {\n 'body': ['Logged out'],\n 'reporter': ['dovecot']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'USER_LOGOUT'\n attribute_values_per_name = {\n 'audit_type': ['USER_LOGOUT']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['logout'])\n\n def testRuleSessionStart(self):\n \"\"\"Tests the session_start tagging rule.\"\"\"\n # Test: reporter is 'systemd-logind' and body contains 'New session'\n attribute_values_per_name = {\n 'body': ['New session'],\n 'reporter': ['systemd-logind']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['session_start'])\n\n def testRuleSessionStop(self):\n \"\"\"Tests the session_stop tagging rule.\"\"\"\n # Test: reporter is 'systemd-logind' and body contains 'Removed session'\n attribute_values_per_name = {\n 'body': ['Removed session'],\n 'reporter': ['systemd-logind']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['session_stop'])\n\n def testRuleBoot(self):\n \"\"\"Tests the boot tagging rule.\"\"\"\n # Test: data_type is 'linux:utmp:event' AND type == 2 AND\n # terminal is 'system boot' AND username is 'reboot'\n attribute_values_per_name = {\n 'terminal': ['system boot'],\n 'type': [2],\n 'username': ['reboot']}\n self._CheckTaggingRule(\n utmp.UtmpEventData, attribute_values_per_name, ['boot'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_BOOT'\n attribute_values_per_name = {\n 'audit_type': ['SYSTEM_BOOT']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['boot'])\n\n def testRuleShutdown(self):\n \"\"\"Tests the shutdonw tagging rule.\"\"\"\n # Test: data_type is 'linux:utmp:event' AND type == 1 AND\n # (terminal is '~~' OR terminal is 'system boot') AND\n # username is 'shutdown'\n attribute_values_per_name = {\n 'terminal': ['~~', 'system boot'],\n 'type': [1],\n 'username': ['shutdown']}\n self._CheckTaggingRule(\n utmp.UtmpEventData, attribute_values_per_name, ['shutdown'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_SHUTDOWN'\n attribute_values_per_name = {\n 'audit_type': ['SYSTEM_SHUTDOWN']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['shutdown'])\n\n def testRuleRunlevel(self):\n \"\"\"Tests the runlevel tagging rule.\"\"\"\n # Test: data_type is 'linux:utmp:event' AND type == 1 AND\n # username is 'runlevel'\n attribute_values_per_name = {\n 'type': [1],\n 'username': ['runlevel']}\n self._CheckTaggingRule(\n utmp.UtmpEventData, attribute_values_per_name, ['runlevel'])\n\n # Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_RUNLEVEL'\n attribute_values_per_name = {\n 'audit_type': ['SYSTEM_RUNLEVEL']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['runlevel'])\n\n def testRuleDeviceConnection(self):\n \"\"\"Tests the device_connection tagging rule.\"\"\"\n # Test: reporter is 'kernel' AND body contains 'New USB device found'\n attribute_values_per_name = {\n 'body': ['New USB device found'],\n 'reporter': ['kernel']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['device_connection'])\n\n def testRuleDeviceDisconnection(self):\n \"\"\"Tests the device_disconnection tagging rule.\"\"\"\n # Test: reporter is 'kernel' AND body contains 'USB disconnect'\n attribute_values_per_name = {\n 'body': ['USB disconnect'],\n 'reporter': ['kernel']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['device_disconnection'])\n\n def testRuleApplicationInstall(self):\n \"\"\"Tests the application_install tagging rule.\"\"\"\n # Test: data_type is 'dpkg:line' AND body contains 'status installed'\n attribute_values_per_name = {\n 'body': ['status installed']}\n self._CheckTaggingRule(\n dpkg.DpkgEventData, attribute_values_per_name,\n ['application_install'])\n\n def testRuleServiceStart(self):\n \"\"\"Tests the service_start tagging rule.\"\"\"\n # Test: data_type is 'selinux:line' AND audit_type is 'SERVICE_START'\n attribute_values_per_name = {\n 'audit_type': ['SERVICE_START']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['service_start'])\n\n def testRuleServiceStop(self):\n \"\"\"Tests the service_stop tagging rule.\"\"\"\n # Test: data_type is 'selinux:line' AND audit_type is 'SERVICE_STOP'\n attribute_values_per_name = {\n 'audit_type': ['SERVICE_STOP']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['service_stop'])\n\n def testRulePromiscuous(self):\n \"\"\"Tests the promiscuous tagging rule.\"\"\"\n # Test: data_type is 'selinux:line' AND audit_type is 'ANOM_PROMISCUOUS'\n attribute_values_per_name = {\n 'audit_type': ['ANOM_PROMISCUOUS']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name,\n ['promiscuous'])\n\n # Test: reporter is 'kernel' AND body contains 'promiscuous mode'\n attribute_values_per_name = {\n 'body': ['promiscuous mode'],\n 'reporter': ['kernel']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name,\n ['promiscuous'])\n\n def testRuleCrach(self):\n \"\"\"Tests the crash tagging rule.\"\"\"\n # Test: data_type is 'selinux:line' AND audit_type is 'ANOM_ABEND'\n attribute_values_per_name = {\n 'audit_type': ['ANOM_ABEND']}\n self._CheckTaggingRule(\n selinux.SELinuxLogEventData, attribute_values_per_name, ['crash'])\n\n # Test: reporter is 'kernel' AND body contains 'segfault'\n attribute_values_per_name = {\n 'body': ['segfault'],\n 'reporter': ['kernel']}\n self._CheckTaggingRule(\n syslog.SyslogLineEventData, attribute_values_per_name, ['crash'])\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "6761075", "language": "Python", "matching_score": 1.3204020261764526, "max_stars_count": 2, "path": "tests/data/tag_linux.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the psort CLI tool.\"\"\"\n\nimport argparse\nimport io\nimport os\nimport unittest\n\ntry:\n import resource\nexcept ImportError:\n resource = None\n\nfrom plaso.cli import psort_tool\nfrom plaso.cli.helpers import interface as helpers_interface\nfrom plaso.cli.helpers import manager as helpers_manager\nfrom plaso.lib import errors\nfrom plaso.output import dynamic\nfrom plaso.output import manager as output_manager\n\nfrom tests import test_lib as shared_test_lib\nfrom tests.cli import test_lib\n\n\nclass TestInputReader(object):\n \"\"\"Test input reader.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the reader.\"\"\"\n super(TestInputReader, self).__init__()\n self.read_called = False\n\n def Read(self):\n \"\"\"Mock a read operation by user.\"\"\"\n self.read_called = True\n return 'foobar'\n\n\nclass TestOutputModuleArgumentHelper(helpers_interface.ArgumentsHelper):\n \"\"\"Test argument helper for the test output module.\"\"\"\n\n NAME = 'test_missing'\n\n @classmethod\n def AddArguments(cls, argument_group):\n \"\"\"Mock the add argument section.\"\"\"\n return\n\n # pylint: disable=arguments-differ\n @classmethod\n def ParseOptions(cls, options, output_module):\n \"\"\"Provide a test parse options section.\"\"\"\n if not isinstance(output_module, TestOutputModuleMissingParameters):\n raise errors.BadConfigObject((\n 'Output module is not an instance of '\n 'TestOutputModuleMissingParameters'))\n\n missing = getattr(options, 'missing', None)\n if missing:\n output_module.SetMissingValue('missing', missing)\n\n parameters = getattr(options, 'parameters', None)\n if parameters:\n output_module.SetMissingValue('parameters', parameters)\n\n\nclass TestOutputModuleMissingParameters(dynamic.DynamicOutputModule):\n \"\"\"Test output module that is missing some parameters.\"\"\"\n\n NAME = 'test_missing'\n\n # For test purpose assign these as class attributes.\n missing = None\n parameters = None\n\n def GetMissingArguments(self):\n \"\"\"Return a list of missing parameters.\"\"\"\n missing_parameters = []\n if self.missing is None:\n missing_parameters.append('missing')\n\n if self.parameters is None:\n missing_parameters.append('parameters')\n\n return missing_parameters\n\n @classmethod\n def SetMissingValue(cls, attribute, value):\n \"\"\"Set missing value.\"\"\"\n setattr(cls, attribute, value)\n\n\nclass PsortToolTest(test_lib.CLIToolTestCase):\n \"\"\"Tests for the psort tool.\"\"\"\n\n # pylint: disable=protected-access\n\n _EXPECTED_OUTPUT_TIME_ZONE_OPTION = \"\"\"\\\nusage: psort_test.py [--output_time_zone TIME_ZONE]\n\nTest argument parser.\n\noptional arguments:\n --output_time_zone TIME_ZONE, --output-time-zone TIME_ZONE\n time zone of date and time values written to the\n output, if supported by the output format. Output\n formats that support this are: dynamic and l2t_csv.\n Use \"list\" to see a list of available time zones.\n\"\"\"\n\n if resource is None:\n _EXPECTED_PROCESSING_OPTIONS = \"\"\"\\\nusage: psort_test.py [--temporary_directory DIRECTORY]\n [--worker_memory_limit SIZE] [--worker_timeout MINUTES]\n\nTest argument parser.\n\noptional arguments:\n --temporary_directory DIRECTORY, --temporary-directory DIRECTORY\n Path to the directory that should be used to store\n temporary files created during processing.\n --worker_memory_limit SIZE, --worker-memory-limit SIZE\n Maximum amount of memory (data segment and shared\n memory) a worker process is allowed to consume in\n bytes, where 0 represents no limit. The default limit\n is 2147483648 (2 GiB). If a worker process exceeds\n this limit it is killed by the main (foreman) process.\n --worker_timeout MINUTES, --worker-timeout MINUTES\n Number of minutes before a worker process that is not\n providing status updates is considered inactive. The\n default timeout is 15.0 minutes. If a worker process\n exceeds this timeout it is killed by the main\n (foreman) process.\n\"\"\"\n else:\n _EXPECTED_PROCESSING_OPTIONS = \"\"\"\\\nusage: psort_test.py [--process_memory_limit SIZE]\n [--temporary_directory DIRECTORY]\n [--worker_memory_limit SIZE] [--worker_timeout MINUTES]\n\nTest argument parser.\n\noptional arguments:\n --process_memory_limit SIZE, --process-memory-limit SIZE\n Maximum amount of memory (data segment) a process is\n allowed to allocate in bytes, where 0 represents no\n limit. The default limit is 4294967296 (4 GiB). This\n applies to both the main (foreman) process and the\n worker processes. This limit is enforced by the\n operating system and will supersede the worker memory\n limit (--worker_memory_limit).\n --temporary_directory DIRECTORY, --temporary-directory DIRECTORY\n Path to the directory that should be used to store\n temporary files created during processing.\n --worker_memory_limit SIZE, --worker-memory-limit SIZE\n Maximum amount of memory (data segment and shared\n memory) a worker process is allowed to consume in\n bytes, where 0 represents no limit. The default limit\n is 2147483648 (2 GiB). If a worker process exceeds\n this limit it is killed by the main (foreman) process.\n --worker_timeout MINUTES, --worker-timeout MINUTES\n Number of minutes before a worker process that is not\n providing status updates is considered inactive. The\n default timeout is 15.0 minutes. If a worker process\n exceeds this timeout it is killed by the main\n (foreman) process.\n\"\"\"\n\n # TODO: add test for _CreateOutputModule.\n # TODO: add test for _FormatStatusTableRow.\n # TODO: add test for _GetAnalysisPlugins.\n # TODO: add test for _ParseAnalysisPluginOptions.\n # TODO: add test for _ParseInformationalOptions.\n\n def testParseOutputTimeZoneOption(self):\n \"\"\"Tests the _ParseOutputTimeZoneOption function.\"\"\"\n test_tool = psort_tool.PsortTool()\n\n options = test_lib.TestOptions()\n\n test_tool._ParseOutputTimeZoneOption(options)\n self.assertIsNone(test_tool._output_time_zone)\n\n options.output_time_zone = 'list'\n test_tool._ParseOutputTimeZoneOption(options)\n self.assertIsNone(test_tool._output_time_zone)\n\n options.output_time_zone = 'CET'\n test_tool._ParseOutputTimeZoneOption(options)\n self.assertEqual(test_tool._output_time_zone, 'CET')\n\n # TODO: add test for _ParseProcessingOptions.\n # TODO: add test for _PrintStatusHeader.\n # TODO: add test for _PrintStatusUpdate.\n # TODO: add test for _PrintStatusUpdateStream.\n\n def testAddOutputTimeZoneOption(self):\n \"\"\"Tests the AddOutputTimeZoneOption function.\"\"\"\n argument_parser = argparse.ArgumentParser(\n prog='psort_test.py', description='Test argument parser.',\n add_help=False, formatter_class=test_lib.SortedArgumentsHelpFormatter)\n\n test_tool = psort_tool.PsortTool()\n test_tool.AddOutputTimeZoneOption(argument_parser)\n\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT_TIME_ZONE_OPTION)\n\n def testAddProcessingOptions(self):\n \"\"\"Tests the AddProcessingOptions function.\"\"\"\n argument_parser = argparse.ArgumentParser(\n prog='psort_test.py',\n description='Test argument parser.', add_help=False,\n formatter_class=test_lib.SortedArgumentsHelpFormatter)\n\n test_tool = psort_tool.PsortTool()\n test_tool.AddProcessingOptions(argument_parser)\n\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_PROCESSING_OPTIONS)\n\n def testListLanguageIdentifiers(self):\n \"\"\"Tests the ListLanguageIdentifiers function.\"\"\"\n output_writer = test_lib.TestOutputWriter(encoding='utf-8')\n test_tool = psort_tool.PsortTool(output_writer=output_writer)\n\n test_tool.ListLanguageIdentifiers()\n\n output = output_writer.ReadOutput()\n\n number_of_tables = 0\n lines = []\n for line in output.split('\\n'):\n line = line.strip()\n lines.append(line)\n\n if line.startswith('*****') and line.endswith('*****'):\n number_of_tables += 1\n\n self.assertIn('Language identifiers', lines[1])\n\n lines = frozenset(lines)\n\n self.assertEqual(number_of_tables, 1)\n\n expected_line = 'en : English'\n self.assertIn(expected_line, lines)\n\n def testParseArguments(self):\n \"\"\"Tests the ParseArguments function.\"\"\"\n output_writer = test_lib.TestOutputWriter(encoding='utf-8')\n test_tool = psort_tool.PsortTool(output_writer=output_writer)\n\n result = test_tool.ParseArguments([])\n self.assertFalse(result)\n\n # TODO: check output.\n # TODO: improve test coverage.\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n output_writer = test_lib.TestBinaryOutputWriter(encoding='utf-8')\n test_tool = psort_tool.PsortTool(output_writer=output_writer)\n\n options = test_lib.TestOptions()\n options.output_format = 'null'\n options.storage_file = self._GetTestFilePath(['psort_test.plaso'])\n\n test_tool.ParseOptions(options)\n\n options = test_lib.TestOptions()\n\n with self.assertRaises(errors.BadConfigOption):\n test_tool.ParseOptions(options)\n\n options = test_lib.TestOptions()\n options.storage_file = self._GetTestFilePath(['psort_test.plaso'])\n\n with self.assertRaises(errors.BadConfigOption):\n test_tool.ParseOptions(options)\n\n # TODO: improve test coverage.\n\n def testProcessStorageWithMissingParameters(self):\n \"\"\"Tests the ProcessStorage function with parameters missing.\"\"\"\n encoding = 'utf-8'\n input_reader = TestInputReader()\n output_writer = test_lib.TestOutputWriter(encoding=encoding)\n test_tool = psort_tool.PsortTool(\n input_reader=input_reader, output_writer=output_writer)\n\n options = test_lib.TestOptions()\n options.data_location = shared_test_lib.DATA_PATH\n options.storage_file = self._GetTestFilePath(['psort_test.plaso'])\n options.output_format = 'test_missing'\n\n output_manager.OutputManager.RegisterOutput(\n TestOutputModuleMissingParameters)\n helpers_manager.ArgumentHelperManager.RegisterHelper(\n TestOutputModuleArgumentHelper)\n\n lines = []\n with shared_test_lib.TempDirectory() as temp_directory:\n temp_file_name = os.path.join(temp_directory, 'output.txt')\n options.write = temp_file_name\n\n test_tool.ParseOptions(options)\n test_tool.ProcessStorage()\n\n with io.open(temp_file_name, 'rt', encoding=encoding) as file_object:\n lines = [line.strip() for line in file_object]\n\n self.assertTrue(input_reader.read_called)\n self.assertEqual(TestOutputModuleMissingParameters.missing, 'foobar')\n self.assertEqual(TestOutputModuleMissingParameters.parameters, 'foobar')\n\n expected_line = (\n '2021-04-11T05:15:34.607703823+00:00,Last Access Time,FILE,File stat,'\n 'OS:/tmp/test/test_data/syslog Type: file,filestat,'\n 'OS:/tmp/test/test_data/syslog,-')\n self.assertIn(expected_line, lines)\n\n output_manager.OutputManager.DeregisterOutput(\n TestOutputModuleMissingParameters)\n helpers_manager.ArgumentHelperManager.DeregisterHelper(\n TestOutputModuleArgumentHelper)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "7332985", "language": "Python", "matching_score": 2.739018678665161, "max_stars_count": 2, "path": "tests/cli/psort_tool.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"File containing a Windows Registry plugin to parse the AMCache.hve file.\"\"\"\n\nimport pyregf\n\nfrom dfdatetime import filetime as dfdatetime_filetime\nfrom dfdatetime import posix_time as dfdatetime_posix_time\n\nfrom dfwinreg import definitions as dfwinreg_definitions\n\nfrom plaso.containers import events\nfrom plaso.containers import time_events\nfrom plaso.containers import windows_events\nfrom plaso.lib import definitions\nfrom plaso.parsers import interface\nfrom plaso.parsers import manager\n\n\nclass AMCacheFileEventData(events.EventData):\n \"\"\"AMCache file event data.\n\n Attributes:\n company_name (str): company name that created product file belongs to.\n file_description (str): description of file.\n file_reference (str): file system file reference, for example 9-1 (MFT\n entry - sequence number).\n file_size (int): size of file in bytes.\n file_version (str): version of file.\n full_path (str): full path of file.\n language_code (int): language code of file.\n product_name (str): product name file belongs to.\n program_identifier (str): GUID of entry under Root/Program key file belongs\n to.\n sha1 (str): SHA-1 of file.\n \"\"\"\n\n DATA_TYPE = 'windows:registry:amcache'\n\n def __init__(self):\n \"\"\"Initializes event data.\"\"\"\n super(AMCacheFileEventData, self).__init__(data_type=self.DATA_TYPE)\n self.company_name = None\n self.file_description = None\n self.file_reference = None\n self.file_size = None\n self.file_version = None\n self.full_path = None\n self.language_code = None\n self.product_name = None\n self.program_identifier = None\n self.sha1 = None\n\n\nclass AMCacheProgramEventData(events.EventData):\n \"\"\"AMCache programs event data.\n\n Attributes:\n entry_type (str): type of entry (usually AddRemoveProgram).\n file_paths (str): file paths of installed program.\n files (str): list of files belonging to program.\n language_code (int): language_code of program.\n msi_package_code (str): MSI package code of program.\n msi_product_code (str): MSI product code of program.\n name (str): name of installed program.\n package_code (str): package code of program.\n product_code (str): product code of program.\n publisher (str): publisher of program.\n uninstall_key (str): unicode string of uninstall registry key for program.\n version (str): version of program.\n \"\"\"\n\n DATA_TYPE = 'windows:registry:amcache:programs'\n\n def __init__(self):\n \"\"\"Initializes event data.\"\"\"\n super(AMCacheProgramEventData, self).__init__(data_type=self.DATA_TYPE)\n self.entry_type = None\n self.file_paths = None\n self.files = None\n self.language_code = None\n self.msi_package_code = None\n self.msi_product_code = None\n self.name = None\n self.package_code = None\n self.product_code = None\n self.publisher = None\n self.uninstall_key = None\n self.version = None\n\n\nclass AMCacheParser(interface.FileObjectParser):\n \"\"\"AMCache Registry plugin for recently run programs.\"\"\"\n\n NAME = 'amcache'\n DATA_FORMAT = 'AMCache Windows NT Registry (AMCache.hve) file'\n\n # Contains: {value name: attribute name}\n _FILE_REFERENCE_KEY_VALUES = {\n '0': 'product_name',\n '1': 'company_name',\n '3': 'language_code',\n '5': 'file_version',\n '6': 'file_size',\n 'c': 'file_description',\n '15': 'full_path',\n '100': 'program_identifier',\n '101': 'sha1'}\n\n _AMCACHE_COMPILATION_TIME = 'f'\n _AMCACHE_FILE_MODIFICATION_TIME = '11'\n _AMCACHE_FILE_CREATION_TIME = '12'\n _AMCACHE_ENTRY_WRITE_TIME = '17'\n\n _AMCACHE_P_INSTALLATION_TIME = 'a'\n\n _AMCACHE_P_FILES = 'Files'\n\n _PRODUCT_KEY_VALUES = {\n '0': 'name',\n '1': 'version',\n '2': 'publisher',\n '3': 'language_code',\n '6': 'entry_type',\n '7': 'uninstall_key',\n 'd': 'file_paths',\n 'f': 'product_code',\n '10': 'package_code',\n '11': 'msi_product_code',\n '12': 'msi_package_code',\n }\n\n def _GetValueDataAsObject(self, parser_mediator, regf_value):\n \"\"\"Retrieves the value data as an object.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n regf_value (pyregf_value): value.\n\n Returns:\n object: data as a Python type or None if the value cannot be read.\n \"\"\"\n try:\n if regf_value.type in (\n dfwinreg_definitions.REG_SZ,\n dfwinreg_definitions.REG_EXPAND_SZ,\n dfwinreg_definitions.REG_LINK):\n value_data = regf_value.get_data_as_string()\n\n elif regf_value.type in (\n dfwinreg_definitions.REG_DWORD,\n dfwinreg_definitions.REG_DWORD_BIG_ENDIAN,\n dfwinreg_definitions.REG_QWORD):\n value_data = regf_value.get_data_as_integer()\n\n elif regf_value.type == dfwinreg_definitions.REG_MULTI_SZ:\n value_data = list(regf_value.get_data_as_multi_string())\n\n else:\n value_data = regf_value.data\n\n except (IOError, OverflowError) as exception:\n parser_mediator.ProduceExtractionWarning(\n 'Unable to read data from value: {0:s} with error: {1!s}'.format(\n regf_value.name, exception))\n return None\n\n return value_data\n\n def _GetValuesFromKey(\n self, parser_mediator, regf_key, names_to_skip=None):\n \"\"\"Retrieves the values from a Windows Registry key.\n\n Where:\n * the default value is represented as \"(default)\";\n * binary data values are represented as \"(# bytes)\", where # contains\n the number of bytes of the data;\n * empty values are represented as \"(empty)\".\n * empty multi value string values are represented as \"[]\".\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n regf_key (pyregf_key): key.\n names_to_skip (Optional[list[str]]): names of values that should\n be skipped.\n\n Returns:\n dict[str, object]: names and data of the values in the key.\n \"\"\"\n names_to_skip = [name.lower() for name in names_to_skip or []]\n\n values_dict = {}\n for regf_value in regf_key.values:\n value_name = regf_value.name or '(default)'\n if value_name.lower() in names_to_skip:\n continue\n\n if regf_value.data is None:\n value_string = '(empty)'\n else:\n value_object = self._GetValueDataAsObject(parser_mediator, regf_value)\n\n if regf_value.type == dfwinreg_definitions.REG_MULTI_SZ:\n value_string = '[{0:s}]'.format(', '.join(value_object or []))\n\n elif regf_value.type in (\n dfwinreg_definitions.REG_DWORD,\n dfwinreg_definitions.REG_DWORD_BIG_ENDIAN,\n dfwinreg_definitions.REG_EXPAND_SZ,\n dfwinreg_definitions.REG_LINK,\n dfwinreg_definitions.REG_QWORD,\n dfwinreg_definitions.REG_SZ):\n value_string = '{0!s}'.format(value_object)\n\n else:\n # Represent remaining types like REG_BINARY and\n # REG_RESOURCE_REQUIREMENT_LIST.\n value_string = '({0:d} bytes)'.format(len(value_object))\n\n values_dict[value_name] = value_string\n\n return values_dict\n\n def _ParseFileKey(self, parser_mediator, file_key):\n \"\"\"Parses a Root\\\\File key.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_key (pyregf.key): the File key.\n \"\"\"\n for volume_key in file_key.sub_keys:\n for file_reference_key in volume_key.sub_keys:\n self._ParseFileReferenceKey(parser_mediator, file_reference_key)\n\n def _ParseFileReferenceKey(self, parser_mediator, file_reference_key):\n \"\"\"Parses a file reference key (sub key of Root\\\\File\\\\%VOLUME%) for events.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_reference_key (pyregf.key): file reference key.\n \"\"\"\n event_data = AMCacheFileEventData()\n\n try:\n if '0000' in file_reference_key.name:\n # A NTFS file is a combination of MFT entry and sequence number.\n sequence_number, mft_entry = file_reference_key.name.split('0000')\n mft_entry = int(mft_entry, 16)\n sequence_number = int(sequence_number, 16)\n event_data.file_reference = '{0:d}-{1:d}'.format(\n mft_entry, sequence_number)\n else:\n # A FAT file is a single number.\n file_reference = int(file_reference_key.name, 16)\n event_data.file_reference = '{0:d}'.format(file_reference)\n\n except (ValueError, TypeError):\n pass\n\n for value_name, attribute_name in self._FILE_REFERENCE_KEY_VALUES.items():\n value = file_reference_key.get_value_by_name(value_name)\n if not value:\n continue\n\n value_data = self._GetValueDataAsObject(parser_mediator, value)\n if attribute_name == 'sha1' and value_data.startswith('0000'):\n # Strip off the 4 leading zero's from the sha1 hash.\n value_data = value_data[4:]\n\n setattr(event_data, attribute_name, value_data)\n\n amcache_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_ENTRY_WRITE_TIME)\n if amcache_time_value:\n timestamp = amcache_time_value.get_data_as_integer()\n amcache_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n amcache_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n creation_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_FILE_CREATION_TIME)\n if creation_time_value:\n timestamp = creation_time_value.get_data_as_integer()\n creation_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n creation_time, definitions.TIME_DESCRIPTION_CREATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n modification_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_FILE_MODIFICATION_TIME)\n if modification_time_value:\n timestamp = modification_time_value.get_data_as_integer()\n modification_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n modification_time, definitions.TIME_DESCRIPTION_MODIFICATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n compilation_time_value = file_reference_key.get_value_by_name(\n self._AMCACHE_COMPILATION_TIME)\n if compilation_time_value:\n timestamp = compilation_time_value.get_data_as_integer()\n link_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n link_time, definitions.TIME_DESCRIPTION_CHANGE)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n def _ParseProgramKey(self, parser_mediator, program_key):\n \"\"\"Parses a program key (a sub key of Root\\\\Programs) for events.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n program_key (pyregf_key): program key.\n \"\"\"\n event_data = AMCacheProgramEventData()\n\n for value_name, attribute_name in self._PRODUCT_KEY_VALUES.items():\n value = program_key.get_value_by_name(value_name)\n if not value:\n continue\n\n value_data = self._GetValueDataAsObject(parser_mediator, value)\n setattr(event_data, attribute_name, value_data)\n\n installation_time_value = program_key.get_value_by_name(\n self._AMCACHE_P_INSTALLATION_TIME)\n if installation_time_value:\n timestamp = installation_time_value.get_data_as_integer()\n installation_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n installation_time, definitions.TIME_DESCRIPTION_INSTALLATION)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n def _ParseProgramsKey(self, parser_mediator, programs_key):\n \"\"\"Parses a Root\\\\Programs key.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n programs_key (pyregf.key): the Programs key.\n \"\"\"\n for program_key in programs_key.sub_keys:\n self._ParseProgramKey(parser_mediator, program_key)\n\n def _ParseRootKey(self, parser_mediator, root_key):\n \"\"\"Parses a Root key.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n root_key (pyregf.key): the Root key.\n \"\"\"\n self._ProduceDefaultWindowsRegistryEvent(\n parser_mediator, root_key, '\\\\Root')\n\n key_path_segments = ['', 'Root']\n for sub_key in root_key.sub_keys:\n key_path_segments.append(sub_key.name)\n self._ParseSubKey(parser_mediator, sub_key, key_path_segments)\n key_path_segments.pop()\n\n if sub_key.name == 'File':\n self._ParseFileKey(parser_mediator, sub_key)\n\n elif sub_key.name == 'Programs':\n self._ParseProgramsKey(parser_mediator, sub_key)\n\n def _ParseSubKey(self, parser_mediator, regf_key, key_path_segments):\n \"\"\"Parses a sub key.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n regf_key (pyregf.key): the key.\n key_path_segments (list[str]): key path segments.\n \"\"\"\n key_path = '\\\\'.join(key_path_segments)\n self._ProduceDefaultWindowsRegistryEvent(\n parser_mediator, regf_key, key_path)\n\n for sub_key in regf_key.sub_keys:\n key_path_segments.append(sub_key.name)\n self._ParseSubKey(parser_mediator, sub_key, key_path_segments)\n key_path_segments.pop()\n\n def _ProduceDefaultWindowsRegistryEvent(\n self, parser_mediator, regf_key, key_path, names_to_skip=None):\n \"\"\"Produces a default Windows Registry event.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n regf_key (pyregf_key): key.\n key_path (str): key path.\n names_to_skip (Optional[list[str]]): names of values that should\n be skipped.\n \"\"\"\n values_dict = self._GetValuesFromKey(\n parser_mediator, regf_key, names_to_skip=names_to_skip)\n\n event_data = windows_events.WindowsRegistryEventData()\n event_data.key_path = key_path\n event_data.values = ' '.join([\n '{0:s}: {1!s}'.format(name, value)\n for name, value in sorted(values_dict.items())]) or None\n\n timestamp = regf_key.get_last_written_time_as_integer()\n last_written_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n event = time_events.DateTimeValuesEvent(\n last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)\n parser_mediator.ProduceEventWithEventData(event, event_data)\n\n def ParseFileObject(self, parser_mediator, file_object):\n \"\"\"Parses an AMCache.hve file-like object for events.\n\n Args:\n parser_mediator (ParserMediator): mediates interactions between parsers\n and other components, such as storage and dfvfs.\n file_object (dfvfs.FileIO): file-like object.\n \"\"\"\n regf_file = pyregf.file()\n try:\n regf_file.open_file_object(file_object)\n except IOError:\n # The error is currently ignored -> see TODO above related to the\n # fixing of handling multiple parsers for the same file format.\n return\n\n root_key = regf_file.get_key_by_path('Root')\n if root_key:\n self._ParseRootKey(parser_mediator, root_key)\n else:\n parser_mediator.ProduceExtractionWarning(\n 'Root key missing from AMCache.hve file.')\n\n regf_file.close()\n\n\nmanager.ParsersManager.RegisterParser(AMCacheParser)\n", "id": "7335631", "language": "Python", "matching_score": 2.0224056243896484, "max_stars_count": 2, "path": "plaso/parsers/amcache.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Output module field formatting helper.\"\"\"\n\nimport abc\nimport csv\nimport datetime\nimport os\nimport pytz\n\nfrom dfvfs.lib import definitions as dfvfs_definitions\n\nfrom plaso.lib import errors\nfrom plaso.output import logger\n\n\nclass EventFormattingHelper(object):\n \"\"\"Output module event formatting helper.\"\"\"\n\n def __init__(self, output_mediator):\n \"\"\"Initializes an event formatting helper.\n\n Args:\n output_mediator (OutputMediator): output mediator.\n \"\"\"\n super(EventFormattingHelper, self).__init__()\n self._output_mediator = output_mediator\n\n @abc.abstractmethod\n def GetFormattedEvent(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Retrieves a string representation of the event.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag.\n\n Returns:\n str: string representation of the event.\n \"\"\"\n\n\nclass FieldFormattingHelper(object):\n \"\"\"Output module field formatting helper.\"\"\"\n\n # Maps the name of a field to callback function that formats the field value.\n _FIELD_FORMAT_CALLBACKS = {}\n\n def __init__(self, output_mediator):\n \"\"\"Initializes a field formatting helper.\n\n Args:\n output_mediator (OutputMediator): output mediator.\n \"\"\"\n super(FieldFormattingHelper, self).__init__()\n self._output_mediator = output_mediator\n self._source_mappings = {}\n\n # The field format callback methods require specific arguments hence\n # the check for unused arguments is disabled here.\n # pylint: disable=unused-argument\n\n def _FormatDateTime(self, event, event_data, event_data_stream):\n \"\"\"Formats a date and time field in ISO 8601 format.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: date and time field with time zone offset.\n \"\"\"\n # For now check if event.timestamp is set, to mimic existing behavior of\n # using 0000-00-00T00:00:00+00:00 for 0 timestamp values.\n if (event.date_time and event.timestamp and\n self._output_mediator.timezone == pytz.UTC):\n iso8601_string = event.date_time.CopyToDateTimeStringISO8601()\n if not iso8601_string:\n iso8601_string = '0000-00-00T00:00:00+00:00'\n\n elif iso8601_string[10] == 'T' and iso8601_string[-1] == 'Z':\n iso8601_string = '{0:s}+00:00'.format(iso8601_string[:-1])\n\n else:\n if event.date_time:\n timestamp = event.date_time.GetPlasoTimestamp()\n else:\n timestamp = event.timestamp\n\n iso8601_string = '0000-00-00T00:00:00+00:00'\n if timestamp:\n try:\n datetime_object = datetime.datetime(\n 1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)\n datetime_object += datetime.timedelta(microseconds=timestamp)\n datetime_object = datetime_object.astimezone(\n self._output_mediator.timezone)\n\n iso8601_string = datetime_object.isoformat()\n\n except (OverflowError, TypeError) as exception:\n self._ReportEventError(event, event_data, (\n 'unable to copy timestamp: {0!s} to a human readable date and '\n 'time with error: {1!s}. Defaulting to: \"{2:s}\"').format(\n timestamp, exception, iso8601_string))\n\n return iso8601_string\n\n def _FormatDisplayName(self, event, event_data, event_data_stream):\n \"\"\"Formats the display name.\n\n The display_name field can be set as an attribute to event_data otherwise\n it is derived from the path specification.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: date field.\n \"\"\"\n display_name = getattr(event_data, 'display_name', None)\n if not display_name:\n path_spec = getattr(event_data_stream, 'path_spec', None)\n if not path_spec:\n path_spec = getattr(event_data, 'pathspec', None)\n\n if path_spec:\n display_name = self._output_mediator.GetDisplayNameForPathSpec(\n path_spec)\n else:\n display_name = '-'\n\n return display_name\n\n def _FormatFilename(self, event, event_data, event_data_stream):\n \"\"\"Formats the filename.\n\n The filename field can be set as an attribute to event_data otherwise\n it is derived from the path specification.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: date field.\n \"\"\"\n filename = getattr(event_data, 'filename', None)\n if not filename:\n path_spec = getattr(event_data_stream, 'path_spec', None)\n if not path_spec:\n path_spec = getattr(event_data, 'pathspec', None)\n\n if path_spec:\n filename = self._output_mediator.GetRelativePathForPathSpec(path_spec)\n else:\n filename = '-'\n\n return filename\n\n def _FormatHostname(self, event, event_data, event_data_stream):\n \"\"\"Formats a hostname field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: hostname field.\n \"\"\"\n return self._output_mediator.GetHostname(event_data)\n\n def _FormatInode(self, event, event_data, event_data_stream):\n \"\"\"Formats an inode field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: inode field.\n \"\"\"\n inode = getattr(event_data, 'inode', None)\n\n # Note that inode can contain 0.\n if inode is None:\n path_specification = getattr(event_data_stream, 'path_spec', None)\n if not path_specification:\n # Note that support for event_data.pathspec is kept for backwards\n # compatibility.\n path_specification = getattr(event_data, 'pathspec', None)\n\n if path_specification:\n if path_specification.type_indicator in (\n dfvfs_definitions.TYPE_INDICATOR_APFS,\n dfvfs_definitions.TYPE_INDICATOR_HFS):\n inode = getattr(path_specification, 'identifier', None)\n\n elif path_specification.type_indicator == (\n dfvfs_definitions.TYPE_INDICATOR_NTFS):\n inode = getattr(path_specification, 'mft_entry', None)\n\n elif path_specification.type_indicator in (\n dfvfs_definitions.TYPE_INDICATOR_EXT,\n dfvfs_definitions.TYPE_INDICATOR_TSK):\n # Note that inode can contain a TSK metadata address.\n inode = getattr(path_specification, 'inode', None)\n\n if inode is None:\n inode = '-'\n\n elif isinstance(inode, int):\n inode = '{0:d}'.format(inode)\n\n return inode\n\n def _FormatMACB(self, event, event_data, event_data_stream):\n \"\"\"Formats a legacy MACB representation field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: MACB field.\n \"\"\"\n return self._output_mediator.GetMACBRepresentation(event, event_data)\n\n def _FormatMessage(self, event, event_data, event_data_stream):\n \"\"\"Formats a message field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: message field.\n\n Raises:\n NoFormatterFound: if no message formatter can be found to match the data\n type in the event data.\n WrongFormatter: if the event data cannot be formatted by the message\n formatter.\n \"\"\"\n message_formatter = self._output_mediator.GetMessageFormatter(\n event_data.data_type)\n if not message_formatter:\n raise errors.NoFormatterFound((\n 'Unable to find message formatter event with data type: '\n '{0:s}.').format(event_data.data_type))\n\n event_values = event_data.CopyToDict()\n message_formatter.FormatEventValues(event_values)\n\n if event_data.data_type in ('windows:evt:record', 'windows:evtx:record'):\n event_values['message_string'] = self._FormatWindowsEventLogMessage(\n event, event_data, event_data_stream)\n\n return message_formatter.GetMessage(event_values)\n\n def _FormatMessageShort(self, event, event_data, event_data_stream):\n \"\"\"Formats a short message field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: short message field.\n\n Raises:\n NoFormatterFound: if no message formatter can be found to match the data\n type in the event data.\n WrongFormatter: if the event data cannot be formatted by the message\n formatter.\n \"\"\"\n message_formatter = self._output_mediator.GetMessageFormatter(\n event_data.data_type)\n if not message_formatter:\n raise errors.NoFormatterFound((\n 'Unable to find message formatter event with data type: '\n '{0:s}.').format(event_data.data_type))\n\n event_values = event_data.CopyToDict()\n message_formatter.FormatEventValues(event_values)\n\n if event_data.data_type in ('windows:evt:record', 'windows:evtx:record'):\n event_values['message_string'] = self._FormatWindowsEventLogMessage(\n event, event_data, event_data_stream)\n\n return message_formatter.GetMessageShort(event_values)\n\n def _FormatSource(self, event, event_data, event_data_stream):\n \"\"\"Formats a source field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: source field.\n\n Raises:\n NoFormatterFound: if no event formatter can be found to match the data\n type in the event data.\n \"\"\"\n if not self._source_mappings:\n self._ReadSourceMappings()\n\n data_type = getattr(event_data, 'data_type', 'default')\n _, source = self._source_mappings.get(data_type, (None, None))\n if source is None:\n return 'N/A'\n\n return source\n\n def _FormatSourceShort(self, event, event_data, event_data_stream):\n \"\"\"Formats a short source field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: short source field.\n\n Raises:\n NoFormatterFound: If no event formatter can be found to match the data\n type in the event data.\n \"\"\"\n if not self._source_mappings:\n self._ReadSourceMappings()\n\n data_type = getattr(event_data, 'data_type', None)\n source_short, _ = self._source_mappings.get(data_type, (None, None))\n if source_short is None:\n return 'N/A'\n\n return source_short\n\n def _FormatTag(self, event_tag):\n \"\"\"Formats an event tag field.\n\n Args:\n event_tag (EventTag): event tag or None if not set.\n\n Returns:\n str: event tag labels or \"-\" if event tag is not set.\n \"\"\"\n if not event_tag:\n return '-'\n\n return ' '.join(event_tag.labels)\n\n def _FormatTime(self, event, event_data, event_data_stream):\n \"\"\"Formats a time field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: time field.\n \"\"\"\n # For now check if event.timestamp is set, to mimic existing behavior of\n # using --:--:-- for 0 timestamp values.\n if (event.date_time and event.timestamp and\n self._output_mediator.timezone == pytz.UTC):\n hours, minutes, seconds = event.date_time.GetTimeOfDay()\n\n else:\n if event.date_time:\n timestamp = event.date_time.GetPlasoTimestamp()\n else:\n timestamp = event.timestamp\n\n hours, minutes, seconds = (None, None, None)\n\n if timestamp:\n try:\n datetime_object = datetime.datetime(\n 1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)\n datetime_object += datetime.timedelta(microseconds=timestamp)\n datetime_object = datetime_object.astimezone(\n self._output_mediator.timezone)\n\n hours, minutes, seconds = (\n datetime_object.hour, datetime_object.minute,\n datetime_object.second)\n\n except (OverflowError, TypeError):\n self._ReportEventError(event, event_data, (\n 'unable to copy timestamp: {0!s} to a human readable time. '\n 'Defaulting to: \"--:--:--\"').format(timestamp))\n\n if None in (hours, minutes, seconds):\n return '--:--:--'\n\n return '{0:02d}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)\n\n def _FormatTimeZone(self, event, event_data, event_data_stream):\n \"\"\"Formats a time zone field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: time zone field.\n \"\"\"\n if event.date_time:\n timestamp = event.date_time.GetPlasoTimestamp()\n else:\n timestamp = event.timestamp\n\n if not timestamp:\n return '-'\n\n # For tzname to work the datetime object must be naive (without a time\n # zone).\n try:\n datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n datetime_object += datetime.timedelta(microseconds=timestamp)\n return self._output_mediator.timezone.tzname(datetime_object)\n\n except OverflowError:\n self._ReportEventError(event, event_data, (\n 'unable to copy timestamp: {0!s} to a human readable time zone. '\n 'Defaulting to: \"-\"').format(timestamp))\n\n return '-'\n\n def _FormatUsername(self, event, event_data, event_data_stream):\n \"\"\"Formats an username field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: username field.\n \"\"\"\n return self._output_mediator.GetUsername(event_data)\n\n def _FormatWindowsEventLogMessage(self, event, event_data, event_data_stream):\n \"\"\"Formats a Windows Event Log message field.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n\n Returns:\n str: Windows Event Log message field or None if not available.\n \"\"\"\n message_string = None\n source_name = getattr(event_data, 'source_name', None)\n message_identifier = getattr(event_data, 'message_identifier', None)\n if source_name and message_identifier:\n windows_event_message = self._output_mediator.GetWindowsEventMessage(\n source_name, message_identifier)\n if windows_event_message:\n try:\n message_string = windows_event_message.format(*event_data.strings)\n except IndexError:\n # Unable to create the message string.\n # TODO: consider returning the unformatted message string.\n pass\n\n return message_string\n\n # pylint: enable=unused-argument\n\n def _ReadSourceMappings(self):\n \"\"\"Reads the source mappings from the sources.config data file.\"\"\"\n self._source_mappings = {}\n\n try:\n sources_data_file = os.path.join(\n self._output_mediator.data_location, 'sources.config')\n\n with open(sources_data_file, encoding='utf8') as file_object:\n csv_reader = csv.reader(file_object, delimiter='\\t')\n # Note that csv.reader returns a list per row.\n header_row = next(csv_reader)\n if header_row == ['data_type', 'short_source', 'source']:\n for row in csv_reader:\n try:\n self._source_mappings[row[0]] = (row[1], row[2])\n except IndexError:\n logger.error('Invalid source mapping: {0!s}'.format(row))\n\n except (IOError, TypeError, csv.Error):\n pass\n\n def _ReportEventError(self, event, event_data, error_message):\n \"\"\"Reports an event related error.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n error_message (str): error message.\n \"\"\"\n event_identifier = event.GetIdentifier()\n event_identifier_string = event_identifier.CopyToString()\n display_name = getattr(event_data, 'display_name', None) or 'N/A'\n parser_chain = getattr(event_data, 'parser', None) or 'N/A'\n error_message = (\n 'Event: {0!s} data type: {1:s} display name: {2:s} '\n 'parser chain: {3:s} with error: {4:s}').format(\n event_identifier_string, event_data.data_type, display_name,\n parser_chain, error_message)\n logger.error(error_message)\n\n def GetFormattedField(\n self, field_name, event, event_data, event_data_stream, event_tag):\n \"\"\"Formats the specified field.\n\n Args:\n field_name (str): name of the field.\n event (EventObject): event.\n event_data (EventData): event data.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag.\n\n Returns:\n str: value of the field.\n \"\"\"\n callback_name = self._FIELD_FORMAT_CALLBACKS.get(field_name, None)\n if callback_name == '_FormatTag':\n return self._FormatTag(event_tag)\n\n callback_function = None\n if callback_name:\n callback_function = getattr(self, callback_name, None)\n\n if callback_function:\n output_value = callback_function(event, event_data, event_data_stream)\n elif hasattr(event_data_stream, field_name):\n output_value = getattr(event_data_stream, field_name, None)\n else:\n output_value = getattr(event_data, field_name, None)\n\n if output_value is None:\n output_value = '-'\n\n elif not isinstance(output_value, str):\n output_value = '{0!s}'.format(output_value)\n\n return output_value\n", "id": "12556875", "language": "Python", "matching_score": 3.457798719406128, "max_stars_count": 2, "path": "plaso/output/formatting_helper.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"The event filter expression parser filter classes.\"\"\"\n\nimport abc\nimport codecs\nimport logging\nimport re\n\nfrom dfdatetime import interface as dfdatetime_interface\n\nfrom plaso.containers import artifacts\nfrom plaso.filters import value_types\nfrom plaso.lib import errors\n\n\nclass Filter(object):\n \"\"\"Filter interface.\n\n Attributes:\n args (list[object]): arguments provided to the filter.\n \"\"\"\n\n def __init__(self, arguments=None):\n \"\"\"Initializes a filter.\n\n Implementations expanders are provided by subclassing ValueExpander.\n\n Args:\n arguments (Optional[object]): arguments.\n \"\"\"\n logging.debug('Adding {0!s}'.format(arguments))\n\n super(Filter, self).__init__()\n self.args = arguments or []\n\n def _CopyValueToString(self, value):\n \"\"\"Copies an event filter value to a string.\n\n Args:\n value (list|int|bytes|str): value to convert.\n\n Returns:\n str: string representation of the argument.\n \"\"\"\n if isinstance(value, list):\n value = [self._CopyValueToString(item) for item in value]\n return ''.join(value)\n\n if isinstance(value, int):\n value = '{0:d}'.format(value)\n\n if not isinstance(value, str):\n return codecs.decode(value, 'utf8', 'ignore')\n return value\n\n @abc.abstractmethod\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n\n\nclass AndFilter(Filter):\n \"\"\"A filter that performs a boolean AND on the arguments.\n\n Note that if no conditions are passed, all objects will pass.\n \"\"\"\n\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n for sub_filter in self.args:\n match = sub_filter.Matches(\n event, event_data, event_data_stream, event_tag)\n if not match:\n return False\n return True\n\n\nclass OrFilter(Filter):\n \"\"\"A filter that performs a boolean OR on the arguments.\n\n Note that if no conditions are passed, all objects will pass.\n \"\"\"\n\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n if not self.args:\n return True\n\n for sub_filter in self.args:\n match = sub_filter.Matches(\n event, event_data, event_data_stream, event_tag)\n if match:\n return True\n return False\n\n\nclass Operator(Filter):\n \"\"\"Interface for filters that represent operators.\"\"\"\n\n @abc.abstractmethod\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n\n\nclass IdentityFilter(Operator):\n \"\"\"A filter which always evaluates to True.\"\"\"\n\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n return True\n\n\nclass BinaryOperator(Operator):\n \"\"\"Interface for binary operators.\n\n Attributes:\n left_operand (object): left hand operand.\n right_operand (object): right hand operand.\n \"\"\"\n\n def __init__(self, arguments=None, **kwargs):\n \"\"\"Initializes a binary operator.\n\n Args:\n arguments (Optional[list[str, object]]): operands of the filter.\n\n Raises:\n InvalidNumberOfOperands: if the number of operands provided is not\n supported.\n \"\"\"\n if len(arguments) != 2:\n raise errors.InvalidNumberOfOperands((\n '{0:s} only supports 2 operands, provided were {1:d} '\n 'operands.').format(self.__class__.__name__, len(arguments)))\n\n super(BinaryOperator, self).__init__(arguments=arguments, **kwargs)\n self.left_operand = arguments[0]\n self.right_operand = arguments[1]\n\n @abc.abstractmethod\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n\n\nclass GenericBinaryOperator(BinaryOperator):\n \"\"\"Shared functionality for common binary operators.\"\"\"\n\n _DEPRECATED_ATTRIBUTE_NAMES = frozenset([\n 'message', 'source', 'source_long', 'source_short', 'sourcetype'])\n\n # Attributes that are stored in the event attribute container.\n _EVENT_ATTRIBUTE_NAMES = frozenset(['timestamp', 'timestamp_desc'])\n\n def __init__(self, arguments=None, **kwargs):\n \"\"\"Initializes a generic binary operator.\n\n Args:\n arguments (Optional[list[str, object]]): operands of the filter.\n \"\"\"\n super(GenericBinaryOperator, self).__init__(arguments=arguments, **kwargs)\n self._bool_value = True\n\n @abc.abstractmethod\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares two values with the operator.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the values match according to the operator, False otherwise.\n \"\"\"\n\n def _GetValue(\n self, attribute_name, event, event_data, event_data_stream, event_tag):\n \"\"\"Retrieves the value of a specific event, data or tag attribute.\n\n Args:\n attribute_name (str): name of the attribute to retrieve the value from.\n event (EventObject): event to retrieve the value from.\n event_data (EventData): event data to retrieve the value from.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to retrieve the value from.\n\n Returns:\n object: attribute value or None if not available.\n \"\"\"\n if attribute_name in self._DEPRECATED_ATTRIBUTE_NAMES:\n logging.warning(\n 'Expansion of {0:s} in event filter no longer supported'.format(\n attribute_name))\n\n if attribute_name in self._EVENT_ATTRIBUTE_NAMES:\n attribute_value = getattr(event, attribute_name, None)\n\n # Make sure timestamp attribute values are (dfdatetime) date time objects.\n # TODO: remove when timestamp values are (de)serialized as dfdatetime\n # objects.\n if attribute_name == 'timestamp' and not isinstance(attribute_value, (\n dfdatetime_interface.DateTimeValues, value_types.DateTimeValueType)):\n attribute_value = value_types.DateTimeValueType(attribute_value)\n\n elif (event_data_stream and\n attribute_name in event_data_stream.GetAttributeNames()):\n attribute_value = getattr(event_data_stream, attribute_name, None)\n\n elif attribute_name == 'tag':\n attribute_value = getattr(event_tag, 'labels', None)\n\n else:\n attribute_value = getattr(event_data, attribute_name, None)\n\n return attribute_value\n\n def FlipBool(self):\n \"\"\"Negates the internal boolean value attribute.\"\"\"\n logging.debug('Negative matching.')\n self._bool_value = not self._bool_value\n\n def Matches(self, event, event_data, event_data_stream, event_tag):\n \"\"\"Determines if the event, data and tag match the filter.\n\n Args:\n event (EventObject): event to compare against the filter.\n event_data (EventData): event data to compare against the filter.\n event_data_stream (EventDataStream): event data stream.\n event_tag (EventTag): event tag to compare against the filter.\n\n Returns:\n bool: True if the event, data and tag match the filter, False otherwise.\n \"\"\"\n value = self._GetValue(\n self.left_operand, event, event_data, event_data_stream, event_tag)\n\n if value and self._CompareValue(value, self.right_operand):\n return self._bool_value\n return not self._bool_value\n\n\nclass EqualsOperator(GenericBinaryOperator):\n \"\"\"Equals (==) operator.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if two values are equal.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the values are equal, False otherwise.\n \"\"\"\n return event_value == filter_value\n\n\nclass NotEqualsOperator(GenericBinaryOperator):\n \"\"\"Not equals (!=) operator.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if two values are not equal.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the values are not equal, False otherwise.\n \"\"\"\n return event_value != filter_value\n\n\nclass LessThanOperator(GenericBinaryOperator):\n \"\"\"Less than (<) operator.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the event value is less than the second.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the event value is less than the second, False otherwise.\n \"\"\"\n return event_value < filter_value\n\n\nclass LessEqualOperator(GenericBinaryOperator):\n \"\"\"Less than or equals (<=) operator.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the event value is less than or equals the second.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the event value is than or equals the second, False\n otherwise.\n \"\"\"\n return event_value <= filter_value\n\n\nclass GreaterThanOperator(GenericBinaryOperator):\n \"\"\"Greater than (>) operator.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the event value is greater than the second.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the event value is greater than the second, False otherwise.\n \"\"\"\n return event_value > filter_value\n\n\nclass GreaterEqualOperator(GenericBinaryOperator):\n \"\"\"Greater than or equals (>=) operator.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the event value is greater than or equals the second.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the event value is greater than or equals the second, False\n otherwise.\n \"\"\"\n return event_value >= filter_value\n\n\nclass Contains(GenericBinaryOperator):\n \"\"\"Operator to determine if a value contains another value.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the second value is part of the first.\n\n Note that this method will do a case insensitive comparison if the first\n value is a string.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the second value is part of the first, False otherwise.\n \"\"\"\n try:\n if isinstance(filter_value, artifacts.PathArtifact):\n return filter_value.ContainedIn(event_value)\n\n if isinstance(event_value, str):\n return filter_value.lower() in event_value.lower()\n\n return filter_value in event_value\n except (AttributeError, TypeError):\n return False\n\n\n# TODO: Change to an N-ary Operator?\nclass InSet(GenericBinaryOperator):\n \"\"\"Operator to determine if a value is part of another value.\"\"\"\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the event value is part of the second.\n\n Note that this method will do a case insensitive string comparison if\n the event value is a string.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the event value is part of the second, False otherwise.\n \"\"\"\n if event_value in filter_value:\n return True\n\n # event_value might be an iterable\n # first we need to skip strings or we'll do silly things\n if isinstance(event_value, (bytes, str)):\n return False\n\n try:\n for value in event_value:\n if value not in filter_value:\n return False\n return True\n except TypeError:\n return False\n\n\n# TODO: is GenericBinaryOperator the most suitable super class here?\n# Would BinaryOperator be a better fit?\nclass Regexp(GenericBinaryOperator):\n \"\"\"Operator to determine if a value matches a regular expression.\n\n Attributes:\n compiled_re (???): compiled regular expression.\n \"\"\"\n\n def __init__(self, arguments=None, **kwargs):\n \"\"\"Initializes a regular expression operator.\n\n This operator uses case sensitive comparison.\n\n Args:\n arguments (Optional[object]): operands of the filter.\n\n Raises:\n ValueError: if the regular expression is malformed.\n \"\"\"\n super(Regexp, self).__init__(arguments=arguments, **kwargs)\n\n # Note that right_operand is not necessarily a string.\n logging.debug('Compiled: {0!s}'.format(self.right_operand))\n\n try:\n expression = self._CopyValueToString(self.right_operand)\n compiled_re = re.compile(expression, re.DOTALL)\n except re.error:\n raise ValueError('Regular expression \"{0!s}\" is malformed.'.format(\n self.right_operand))\n\n self.compiled_re = compiled_re\n\n def _CompareValue(self, event_value, filter_value):\n \"\"\"Compares if the event value matches a regular expression.\n\n Args:\n event_value (object): value retrieved from the event.\n filter_value (object): value defined by the filter.\n\n Returns:\n bool: True if the event value matches the regular expression, False\n otherwise.\n \"\"\"\n try:\n string_value = self._CopyValueToString(event_value)\n if self.compiled_re.search(string_value):\n return True\n except TypeError:\n pass\n\n return False\n\n\nclass RegexpInsensitive(Regexp):\n \"\"\"Operator to determine if a value matches a regular expression.\"\"\"\n\n def __init__(self, arguments=None, **kwargs):\n \"\"\"Initializes a regular expression operator.\n\n This operator uses case insensitive comparison.\n\n Args:\n arguments (Optional[object]): operands of the filter.\n\n Raises:\n ValueError: if the regular expression is malformed.\n \"\"\"\n super(RegexpInsensitive, self).__init__(arguments=arguments, **kwargs)\n\n # Note that right_operand is not necessarily a string.\n logging.debug('Compiled: {0!s}'.format(self.right_operand))\n\n try:\n expression = self._CopyValueToString(self.right_operand)\n compiled_re = re.compile(expression, re.I | re.DOTALL)\n except re.error:\n raise ValueError('Regular expression \"{0!s}\" is malformed.'.format(\n self.right_operand))\n\n self.compiled_re = compiled_re\n", "id": "11943278", "language": "Python", "matching_score": 3.3035552501678467, "max_stars_count": 2, "path": "plaso/filters/filters.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Containers related functions and classes for testing.\"\"\"\n\nfrom dfdatetime import posix_time as dfdatetime_posix_time\n\nfrom plaso.containers import events\nfrom plaso.containers import interface\n\nfrom tests import test_lib as shared_test_lib\n\n\ndef CreateEventFromValues(event_values):\n \"\"\"Creates an event and event data from event values.\n\n Args:\n event_values (dict[str, str]): event values.\n\n Returns:\n tuple[EventObject, EventData, EventDataStream]: event, event data and\n event data stream for testing.\n \"\"\"\n copy_of_event_values = dict(event_values)\n\n event = events.EventObject()\n for attribute_name in ('timestamp', 'timestamp_desc'):\n attribute_value = copy_of_event_values.pop(attribute_name, None)\n if attribute_value is not None:\n if attribute_name == 'timestamp' and isinstance(attribute_value, str):\n attribute_value = shared_test_lib.CopyTimestampFromSring(\n attribute_value)\n setattr(event, attribute_name, attribute_value)\n\n event.date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(\n timestamp=event.timestamp)\n\n event_data_stream = events.EventDataStream()\n for attribute_name in ('path_spec', 'md5_hash', 'sha256_hash'):\n attribute_value = copy_of_event_values.pop(attribute_name, None)\n if attribute_value is not None:\n setattr(event_data_stream, attribute_name, attribute_value)\n\n event_data = events.EventData()\n event_data.CopyFromDict(copy_of_event_values)\n\n return event, event_data, event_data_stream\n\n\ndef CreateEventsFromValues(event_values_list):\n \"\"\"Creates events and event data from a list of event values.\n\n Args:\n event_values_list (list[dict[str, str]]): list of event values.\n\n Yields:\n tuple[EventObject, EventData, EventDataStream]: event, event data and\n event data stream for testing.\n \"\"\"\n for event_values in event_values_list:\n yield CreateEventFromValues(event_values)\n\n\nclass TestAttributeContainer(interface.AttributeContainer):\n \"\"\"Test attribute container.\"\"\"\n\n CONTAINER_TYPE = 'test_attribute_container'\n", "id": "11025783", "language": "Python", "matching_score": 2.112210750579834, "max_stars_count": 2, "path": "tests/containers/test_lib.py" } ]
2.425615
antoninschrab
[ { "content": "\"\"\"\nWe generate data for the MNIST Normalizing Flow \nexperiment as proposed in Section 4.4 of our paper\nKSD Aggregated Goodness-of-fit Test\n<NAME>, <NAME>, <NAME>\nhttps://arxiv.org/pdf/2202.00824.pdf\nThe data is saved in the directory data/NF_MNIST.\n\nWe use the code from Tutorial 11: Normalizing Flows for image modeling\nhttps://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial11/NF_image_modeling.html\n\"\"\"\n\n## Standard libraries\nimport os\nimport math\nimport time\nimport numpy as np\nfrom pathlib import Path\n\n## PyTorch\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as data\nimport torch.optim as optim\n\n# Torchvision\nimport torchvision\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms\n\n# PyTorch Lightning\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint\n\n# Path to the folder where the datasets are/should be downloaded (e.g. MNIST)\nDATASET_PATH = \"mnist/data_mnist\"\n# Path to the folder where the pretrained models are saved\nCHECKPOINT_PATH = \"mnist/saved_models/tutorial11\"\n\n# Deactivate all logging Pytorch Lightning messages\nimport logging\n\npl_loggers = [\n logging.getLogger(name)\n for name in logging.root.manager.loggerDict\n if \"pytorch_lightning\" in name\n]\nfor logger in pl_loggers:\n logger.propagate = False\n\n# Setting the seed\npl.seed_everything(42)\n\n# Ensure that all operations are deterministic on GPU (if used) for reproducibility\ntorch.backends.cudnn.determinstic = True\ntorch.backends.cudnn.benchmark = False\n\n# Fetching the device that will be used throughout this notebook\ndevice = (\n torch.device(\"cpu\") if not torch.cuda.is_available() else torch.device(\"cuda:0\")\n)\nprint(\"Using device\", device)\n\nimport urllib.request\nfrom urllib.error import HTTPError\n\n# Github URL where saved models are stored for this tutorial\nbase_url = \"https://raw.githubusercontent.com/phlippe/saved_models/main/tutorial11/\"\n# Files to download\npretrained_files = [\"MNISTFlow_multiscale.ckpt\"]\n# Create checkpoint path if it doesn't exist yet\nos.makedirs(CHECKPOINT_PATH, exist_ok=True)\n\n# For each file, check whether it already exists. If not, try downloading it.\nfor file_name in pretrained_files:\n file_path = os.path.join(CHECKPOINT_PATH, file_name)\n if not os.path.isfile(file_path):\n file_url = base_url + file_name\n print(f\"Downloading {file_url}...\")\n try:\n urllib.request.urlretrieve(file_url, file_path)\n except HTTPError as e:\n print(\n \"Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\\n\",\n e,\n )\n\n# Convert images from 0-1 to 0-255 (integers)\ndef discretize(sample):\n return (sample * 255).to(torch.int32)\n\n\n# Transformations applied on each image => make them a tensor and discretize\ntransform = transforms.Compose([transforms.ToTensor(), discretize])\n\n# Loading the training dataset. We need to split it into a training and validation part\ntrain_dataset = MNIST(root=DATASET_PATH, train=True, transform=transform, download=True)\npl.seed_everything(42)\ntrain_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000])\n\n# Loading the test set\ntest_set = MNIST(root=DATASET_PATH, train=False, transform=transform, download=True)\n\n# We define a set of data loaders that we can use for various purposes later.\n# Note that for actually training a model, we will use different data loaders\n# with a lower batch size.\ntrain_loader = data.DataLoader(\n train_set, batch_size=256, shuffle=False, drop_last=False\n)\nval_loader = data.DataLoader(\n val_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4\n)\ntest_loader = data.DataLoader(\n test_set, batch_size=64, shuffle=False, drop_last=False, num_workers=4\n)\n\n\ndef show_imgs(imgs, title=None, row_size=4):\n # Form a grid of pictures (we use max. 8 columns)\n num_imgs = imgs.shape[0] if isinstance(imgs, torch.Tensor) else len(imgs)\n is_int = (\n imgs.dtype == torch.int32\n if isinstance(imgs, torch.Tensor)\n else imgs[0].dtype == torch.int32\n )\n nrow = min(num_imgs, row_size)\n ncol = int(math.ceil(num_imgs / nrow))\n imgs = torchvision.utils.make_grid(\n imgs, nrow=nrow, pad_value=128 if is_int else 0.5\n )\n np_imgs = imgs.cpu().numpy()\n # Plot the grid\n plt.figure(figsize=(1.5 * nrow, 1.5 * ncol))\n plt.imshow(np.transpose(np_imgs, (1, 2, 0)), interpolation=\"nearest\")\n plt.axis(\"off\")\n if title is not None:\n plt.title(title)\n plt.show()\n plt.close()\n\n\nclass ImageFlow(pl.LightningModule):\n def __init__(self, flows, import_samples=8):\n \"\"\"\n Inputs:\n flows - A list of flows (each a nn.Module) that should be applied on the images.\n import_samples - Number of importance samples to use during testing (see explanation below). Can be changed at any time\n \"\"\"\n super().__init__()\n self.flows = nn.ModuleList(flows)\n self.import_samples = import_samples\n # Create prior distribution for final latent space\n self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)\n # Example input for visualizing the graph\n self.example_input_array = train_set[0][0].unsqueeze(dim=0)\n\n def forward(self, imgs):\n # The forward function is only used for visualizing the graph\n return self._get_likelihood(imgs)\n\n def encode(self, imgs):\n # Given a batch of images, return the latent representation z and ldj of the transformations\n z, ldj = imgs, torch.zeros(imgs.shape[0], device=self.device)\n for flow in self.flows:\n z, ldj = flow(z, ldj, reverse=False)\n return z, ldj\n\n def _get_likelihood(self, imgs, return_ll=False):\n \"\"\"\n Given a batch of images, return the likelihood of those.\n If return_ll is True, this function returns the log likelihood of the input.\n Otherwise, the ouptut metric is bits per dimension (scaled negative log likelihood)\n \"\"\"\n z, ldj = self.encode(imgs)\n log_pz = self.prior.log_prob(z).sum(dim=[1, 2, 3])\n log_px = ldj + log_pz\n nll = -log_px\n # Calculating bits per dimension\n bpd = nll * np.log2(np.exp(1)) / np.prod(imgs.shape[1:])\n return bpd.mean() if not return_ll else log_px\n\n @torch.no_grad()\n def sample(self, img_shape, z_init=None):\n \"\"\"\n Sample a batch of images from the flow.\n \"\"\"\n # Sample latent representation from prior\n if z_init is None:\n z = self.prior.sample(sample_shape=img_shape).to(device)\n else:\n z = z_init.to(device)\n\n # Transform z to x by inverting the flows\n ldj = torch.zeros(img_shape[0], device=device)\n for flow in reversed(self.flows):\n z, ldj = flow(z, ldj, reverse=True)\n return z\n\n def configure_optimizers(self):\n optimizer = optim.Adam(self.parameters(), lr=1e-3)\n # An scheduler is optional, but can help in flows to get the last bpd improvement\n scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.99)\n return [optimizer], [scheduler]\n\n def training_step(self, batch, batch_idx):\n # Normalizing flows are trained by maximum likelihood => return bpd\n loss = self._get_likelihood(batch[0])\n self.log(\"train_bpd\", loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss = self._get_likelihood(batch[0])\n self.log(\"val_bpd\", loss)\n\n def test_step(self, batch, batch_idx):\n # Perform importance sampling during testing => estimate likelihood M times for each image\n samples = []\n for _ in range(self.import_samples):\n img_ll = self._get_likelihood(batch[0], return_ll=True)\n samples.append(img_ll)\n img_ll = torch.stack(samples, dim=-1)\n\n # To average the probabilities, we need to go from log-space to exp, and back to log.\n # Logsumexp provides us a stable implementation for this\n img_ll = torch.logsumexp(img_ll, dim=-1) - np.log(self.import_samples)\n\n # Calculate final bpd\n bpd = -img_ll * np.log2(np.exp(1)) / np.prod(batch[0].shape[1:])\n bpd = bpd.mean()\n\n self.log(\"test_bpd\", bpd)\n\n\nclass Dequantization(nn.Module):\n def __init__(self, alpha=1e-5, quants=256):\n \"\"\"\n Inputs:\n alpha - small constant that is used to scale the original input.\n Prevents dealing with values very close to 0 and 1 when inverting the sigmoid\n quants - Number of possible discrete values (usually 256 for 8-bit image)\n \"\"\"\n super().__init__()\n self.alpha = alpha\n self.quants = quants\n\n def forward(self, z, ldj, reverse=False):\n if not reverse:\n z, ldj = self.dequant(z, ldj)\n z, ldj = self.sigmoid(z, ldj, reverse=True)\n else:\n z, ldj = self.sigmoid(z, ldj, reverse=False)\n z = z * self.quants\n ldj += np.log(self.quants) * np.prod(z.shape[1:])\n z = torch.floor(z).clamp(min=0, max=self.quants - 1).to(torch.int32)\n return z, ldj\n\n def sigmoid(self, z, ldj, reverse=False):\n # Applies an invertible sigmoid transformation\n if not reverse:\n ldj += (-z - 2 * F.softplus(-z)).sum(dim=[1, 2, 3])\n z = torch.sigmoid(z)\n else:\n z = (\n z * (1 - self.alpha) + 0.5 * self.alpha\n ) # Scale to prevent boundaries 0 and 1\n ldj += np.log(1 - self.alpha) * np.prod(z.shape[1:])\n ldj += (-torch.log(z) - torch.log(1 - z)).sum(dim=[1, 2, 3])\n z = torch.log(z) - torch.log(1 - z)\n return z, ldj\n\n def dequant(self, z, ldj):\n # Transform discrete values to continuous volumes\n z = z.to(torch.float32)\n z = z + torch.rand_like(z).detach()\n z = z / self.quants\n ldj -= np.log(self.quants) * np.prod(z.shape[1:])\n return z, ldj\n\n\nclass VariationalDequantization(Dequantization):\n def __init__(self, var_flows, alpha=1e-5):\n \"\"\"\n Inputs:\n var_flows - A list of flow transformations to use for modeling q(u|x)\n alpha - Small constant, see Dequantization for details\n \"\"\"\n super().__init__(alpha=alpha)\n self.flows = nn.ModuleList(var_flows)\n\n def dequant(self, z, ldj):\n z = z.to(torch.float32)\n img = (\n z / 255.0\n ) * 2 - 1 # We condition the flows on x, i.e. the original image\n\n # Prior of u is a uniform distribution as before\n # As most flow transformations are defined on [-infinity,+infinity], we apply an inverse sigmoid first.\n deq_noise = torch.rand_like(z).detach()\n deq_noise, ldj = self.sigmoid(deq_noise, ldj, reverse=True)\n for flow in self.flows:\n deq_noise, ldj = flow(deq_noise, ldj, reverse=False, orig_img=img)\n deq_noise, ldj = self.sigmoid(deq_noise, ldj, reverse=False)\n\n # After the flows, apply u as in standard dequantization\n z = (z + deq_noise) / 256.0\n ldj -= np.log(256.0) * np.prod(z.shape[1:])\n return z, ldj\n\n\nclass CouplingLayer(nn.Module):\n def __init__(self, network, mask, c_in):\n \"\"\"\n Coupling layer inside a normalizing flow.\n Inputs:\n network - A PyTorch nn.Module constituting the deep neural network for mu and sigma.\n Output shape should be twice the channel size as the input.\n mask - Binary mask (0 or 1) where 0 denotes that the element should be transformed,\n while 1 means the latent will be used as input to the NN.\n c_in - Number of input channels\n \"\"\"\n super().__init__()\n self.network = network\n self.scaling_factor = nn.Parameter(torch.zeros(c_in))\n # Register mask as buffer as it is a tensor which is not a parameter,\n # but should be part of the modules state.\n self.register_buffer(\"mask\", mask)\n\n def forward(self, z, ldj, reverse=False, orig_img=None):\n \"\"\"\n Inputs:\n z - Latent input to the flow\n ldj - The current ldj of the previous flows.\n The ldj of this layer will be added to this tensor.\n reverse - If True, we apply the inverse of the layer.\n orig_img (optional) - Only needed in VarDeq. Allows external\n input to condition the flow on (e.g. original image)\n \"\"\"\n # Apply network to masked input\n z_in = z * self.mask\n if orig_img is None:\n nn_out = self.network(z_in)\n else:\n nn_out = self.network(torch.cat([z_in, orig_img], dim=1))\n s, t = nn_out.chunk(2, dim=1)\n\n # Stabilize scaling output\n s_fac = self.scaling_factor.exp().view(1, -1, 1, 1)\n s = torch.tanh(s / s_fac) * s_fac\n\n # Mask outputs (only transform the second part)\n s = s * (1 - self.mask)\n t = t * (1 - self.mask)\n\n # Affine transformation\n if not reverse:\n # Whether we first shift and then scale, or the other way round,\n # is a design choice, and usually does not have a big impact\n z = (z + t) * torch.exp(s)\n ldj += s.sum(dim=[1, 2, 3])\n else:\n z = (z * torch.exp(-s)) - t\n ldj -= s.sum(dim=[1, 2, 3])\n\n return z, ldj\n\n\ndef create_checkerboard_mask(h, w, invert=False):\n x, y = torch.arange(h, dtype=torch.int32), torch.arange(w, dtype=torch.int32)\n xx, yy = torch.meshgrid(x, y)\n mask = torch.fmod(xx + yy, 2)\n mask = mask.to(torch.float32).view(1, 1, h, w)\n if invert:\n mask = 1 - mask\n return mask\n\n\ndef create_channel_mask(c_in, invert=False):\n mask = torch.cat(\n [\n torch.ones(c_in // 2, dtype=torch.float32),\n torch.zeros(c_in - c_in // 2, dtype=torch.float32),\n ]\n )\n mask = mask.view(1, c_in, 1, 1)\n if invert:\n mask = 1 - mask\n return mask\n\n\nclass ConcatELU(nn.Module):\n \"\"\"\n Activation function that applies ELU in both direction (inverted and plain).\n Allows non-linearity while providing strong gradients for any input (important for final convolution)\n \"\"\"\n\n def forward(self, x):\n return torch.cat([F.elu(x), F.elu(-x)], dim=1)\n\n\nclass LayerNormChannels(nn.Module):\n def __init__(self, c_in):\n \"\"\"\n This module applies layer norm across channels in an image. Has been shown to work well with ResNet connections.\n Inputs:\n c_in - Number of channels of the input\n \"\"\"\n super().__init__()\n self.layer_norm = nn.LayerNorm(c_in)\n\n def forward(self, x):\n x = x.permute(0, 2, 3, 1)\n x = self.layer_norm(x)\n x = x.permute(0, 3, 1, 2)\n return x\n\n\nclass GatedConv(nn.Module):\n def __init__(self, c_in, c_hidden):\n \"\"\"\n This module applies a two-layer convolutional ResNet block with input gate\n Inputs:\n c_in - Number of channels of the input\n c_hidden - Number of hidden dimensions we want to model (usually similar to c_in)\n \"\"\"\n super().__init__()\n self.net = nn.Sequential(\n nn.Conv2d(c_in, c_hidden, kernel_size=3, padding=1),\n ConcatELU(),\n nn.Conv2d(2 * c_hidden, 2 * c_in, kernel_size=1),\n )\n\n def forward(self, x):\n out = self.net(x)\n val, gate = out.chunk(2, dim=1)\n return x + val * torch.sigmoid(gate)\n\n\nclass GatedConvNet(nn.Module):\n def __init__(self, c_in, c_hidden=32, c_out=-1, num_layers=3):\n \"\"\"\n Module that summarizes the previous blocks to a full convolutional neural network.\n Inputs:\n c_in - Number of input channels\n c_hidden - Number of hidden dimensions to use within the network\n c_out - Number of output channels. If -1, 2 times the input channels are used (affine coupling)\n num_layers - Number of gated ResNet blocks to apply\n \"\"\"\n super().__init__()\n c_out = c_out if c_out > 0 else 2 * c_in\n layers = []\n layers += [nn.Conv2d(c_in, c_hidden, kernel_size=3, padding=1)]\n for layer_index in range(num_layers):\n layers += [GatedConv(c_hidden, c_hidden), LayerNormChannels(c_hidden)]\n layers += [\n ConcatELU(),\n nn.Conv2d(2 * c_hidden, c_out, kernel_size=3, padding=1),\n ]\n self.nn = nn.Sequential(*layers)\n\n self.nn[-1].weight.data.zero_()\n self.nn[-1].bias.data.zero_()\n\n def forward(self, x):\n return self.nn(x)\n\n\ndef create_simple_flow(use_vardeq=True):\n flow_layers = []\n if use_vardeq:\n vardeq_layers = [\n CouplingLayer(\n network=GatedConvNet(c_in=2, c_out=2, c_hidden=16),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i % 2 == 1)),\n c_in=1,\n )\n for i in range(4)\n ]\n flow_layers += [VariationalDequantization(var_flows=vardeq_layers)]\n else:\n flow_layers += [Dequantization()]\n\n for i in range(8):\n flow_layers += [\n CouplingLayer(\n network=GatedConvNet(c_in=1, c_hidden=32),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i % 2 == 1)),\n c_in=1,\n )\n ]\n\n flow_model = ImageFlow(flow_layers).to(device)\n return flow_model\n\n\ndef train_flow(flow, model_name=\"MNISTFlow\"):\n # Create a PyTorch Lightning trainer\n trainer = pl.Trainer(\n default_root_dir=os.path.join(CHECKPOINT_PATH, model_name),\n gpus=1 if torch.cuda.is_available() else 0,\n max_epochs=200,\n gradient_clip_val=1.0,\n callbacks=[\n ModelCheckpoint(save_weights_only=True, mode=\"min\", monitor=\"val_bpd\"),\n LearningRateMonitor(\"epoch\"),\n ],\n )\n trainer.logger._log_graph = True\n trainer.logger._default_hp_metric = (\n None # Optional logging argument that we don't need\n )\n\n train_data_loader = data.DataLoader(\n train_set,\n batch_size=128,\n shuffle=True,\n drop_last=True,\n pin_memory=True,\n num_workers=8,\n )\n result = None\n\n # Check whether pretrained model exists. If yes, load it and skip training\n pretrained_filename = os.path.join(CHECKPOINT_PATH, model_name + \".ckpt\")\n if os.path.isfile(pretrained_filename):\n print(\"Found pretrained model, loading...\")\n if device == torch.device(\"cpu\"): # added this\n ckpt = torch.load(pretrained_filename, map_location=torch.device(\"cpu\"))\n else:\n ckpt = torch.load(pretrained_filename)\n flow.load_state_dict(ckpt[\"state_dict\"])\n result = ckpt.get(\"result\", None)\n else:\n print(\"Start training\", model_name)\n trainer.fit(flow, train_data_loader, val_loader)\n\n # Test best model on validation and test set if no result has been found\n # Testing can be expensive due to the importance sampling.\n if result is None:\n val_result = trainer.test(flow, test_dataloaders=val_loader, verbose=False)\n start_time = time.time()\n test_result = trainer.test(flow, test_dataloaders=test_loader, verbose=False)\n duration = time.time() - start_time\n result = {\n \"test\": test_result,\n \"val\": val_result,\n \"time\": duration / len(test_loader) / flow.import_samples,\n }\n\n return flow, result\n\n\nclass SqueezeFlow(nn.Module):\n def forward(self, z, ldj, reverse=False):\n B, C, H, W = z.shape\n if not reverse:\n # Forward direction: H x W x C => H/2 x W/2 x 4C\n z = z.reshape(B, C, H // 2, 2, W // 2, 2)\n z = z.permute(0, 1, 3, 5, 2, 4)\n z = z.reshape(B, 4 * C, H // 2, W // 2)\n else:\n # Reverse direction: H/2 x W/2 x 4C => H x W x C\n z = z.reshape(B, C // 4, 2, 2, H, W)\n z = z.permute(0, 1, 4, 2, 5, 3)\n z = z.reshape(B, C // 4, H * 2, W * 2)\n return z, ldj\n\n\nclass SplitFlow(nn.Module):\n def __init__(self):\n super().__init__()\n self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)\n\n def forward(self, z, ldj, reverse=False):\n if not reverse:\n z, z_split = z.chunk(2, dim=1)\n ldj += self.prior.log_prob(z_split).sum(dim=[1, 2, 3])\n else:\n z_split = self.prior.sample(sample_shape=z.shape).to(device)\n z = torch.cat([z, z_split], dim=1)\n ldj -= self.prior.log_prob(z_split).sum(dim=[1, 2, 3])\n return z, ldj\n\n\ndef create_multiscale_flow():\n flow_layers = []\n\n vardeq_layers = [\n CouplingLayer(\n network=GatedConvNet(c_in=2, c_out=2, c_hidden=16),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i % 2 == 1)),\n c_in=1,\n )\n for i in range(4)\n ]\n flow_layers += [VariationalDequantization(vardeq_layers)]\n\n flow_layers += [\n CouplingLayer(\n network=GatedConvNet(c_in=1, c_hidden=32),\n mask=create_checkerboard_mask(h=28, w=28, invert=(i % 2 == 1)),\n c_in=1,\n )\n for i in range(2)\n ]\n flow_layers += [SqueezeFlow()]\n for i in range(2):\n flow_layers += [\n CouplingLayer(\n network=GatedConvNet(c_in=4, c_hidden=48),\n mask=create_channel_mask(c_in=4, invert=(i % 2 == 1)),\n c_in=4,\n )\n ]\n flow_layers += [SplitFlow(), SqueezeFlow()]\n for i in range(4):\n flow_layers += [\n CouplingLayer(\n network=GatedConvNet(c_in=8, c_hidden=64),\n mask=create_channel_mask(c_in=8, invert=(i % 2 == 1)),\n c_in=8,\n )\n ]\n\n flow_model = ImageFlow(flow_layers).to(device)\n return flow_model\n\n\n# Below is our code to generate the data for the MNIST Normalizing Flow experiment\n\n\ndef sample_NF_grad_NF(seed):\n \"\"\"\n Sample from Normalizing Flow and compute scores with respect to the Normalizing FLow.\n input: seed: non-negative integer\n output: 2-tuple consisting of samples and scores\n \"\"\"\n pl.seed_everything(seed)\n samples = (\n flow_dict[\"multiscale\"][\"model\"]\n .sample(img_shape=[1, 8, 7, 7])\n .to(torch.float64)\n .clone()\n .detach()\n .requires_grad_(True)\n )\n log_px = flow_dict[\"multiscale\"][\"model\"]._get_likelihood(samples, return_ll=True)\n log_px.backward()\n grad_log_px = samples.grad\n return samples.cpu().detach().numpy().reshape(\n 1, 28**2\n ), grad_log_px.cpu().detach().numpy().reshape(1, 28**2)\n\n\ndef sample_MNIST_grad_NF(seed):\n \"\"\"\n Sample from the MNIST dataset and compute scores with respect to the Normalizing FLow.\n input: seed: non-negative integer\n output: 2-tuple consisting of samples and scores\n \"\"\"\n pl.seed_everything(seed)\n np.random.seed(seed)\n index = np.random.randint(len(train_set))\n samples = (\n train_set[index][0][None, :]\n .to(device)\n .to(torch.float64)\n .clone()\n .detach()\n .requires_grad_(True)\n )\n log_px = flow_dict[\"multiscale\"][\"model\"]._get_likelihood(samples, return_ll=True)\n log_px.backward()\n grad_log_px = samples.grad\n return samples.cpu().detach().numpy().reshape(\n 1, 28**2\n ), grad_log_px.cpu().detach().numpy().reshape(1, 28**2)\n\n\nif __name__ == \"__main__\":\n flow_dict = {\"multiscale\": {}}\n flow_dict[\"multiscale\"][\"model\"], flow_dict[\"multiscale\"][\"result\"] = train_flow(\n create_multiscale_flow(), model_name=\"MNISTFlow_multiscale\"\n )\n print(\"Starting\")\n\n # Generate 2 datasets, one for testing and one for the parametric bootstrap\n L = ((\"testing\", 200, 500), (\"bootstrap\", 50, 5000))\n for j in range(len(L)):\n directory, repetitions, m = L[j]\n Path(\"data/NF_MNIST/\" + directory).mkdir(exist_ok=True, parents=True)\n X_level = np.empty((repetitions, m, 28**2))\n score_X_level = np.empty((repetitions, m, 28**2))\n X_power = np.empty((repetitions, m, 28**2))\n score_X_power = np.empty((repetitions, m, 28**2))\n for r in range(repetitions):\n t = time.time()\n for i in range(m):\n seed = i + r * 10 ** (len(str(m)) + 1)\n X_level[r, i], score_X_level[r, i] = sample_NF_grad_NF(seed)\n X_power[r, i], score_X_power[r, i] = sample_MNIST_grad_NF(seed)\n print(\n \"Step:\",\n j + 1,\n \"/\",\n len(L),\n \",\",\n r + 1,\n \"/\",\n repetitions,\n \"time:\",\n time.time() - t,\n )\n np.save(\"data/NF_MNIST/\" + directory + \"/X_mnist_level.npy\", X_level)\n np.save(\n \"data/NF_MNIST/\" + directory + \"/score_X_mnist_level.npy\", score_X_level\n )\n np.save(\"data/NF_MNIST/\" + directory + \"/X_mnist_power.npy\", X_power)\n np.save(\n \"data/NF_MNIST/\" + directory + \"/score_X_mnist_power.npy\", score_X_power\n )\n print(\"NF MNIST data has been saved in data/NF_MNIST.\")\n", "id": "11068851", "language": "Python", "matching_score": 7.091464996337891, "max_stars_count": 2, "path": "generate_data_nf.py" }, { "content": "\"\"\"\nReproduce the figures of our paper\nKSD Aggregated Goodness-of-fit Test\n<NAME>, <NAME>, <NAME>\nhttps://arxiv.org/pdf/2202.00824.pdf\nFigures are generated using the dataframes in the \ndirectory results/ and are saved in the directory figures/.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom matplotlib import rc, rcParams\nimport dataframe_image as dfi\nfrom generate_data_nf import train_flow, create_multiscale_flow\nfrom pathlib import Path\n\n# matplotlib parameters\nfs = 16\nrcParams.update({\"font.size\": fs})\nrc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Computer Modern\"]})\nrc(\"text\", usetex=True)\n\n# create figures directory if it does not exist\nPath(\"figures\").mkdir(exist_ok=True, parents=True)\n\n# Figure 1: Gamma experiment\nresults_gamma = pd.read_pickle(\"results/results_gamma.pkl\")\npower = results_gamma.unstack().to_numpy().T[[0, 3, 2, 1], :]\nnames = np.array(\n [\n r\"\\textsc{KSDAgg}\",\n r\"\\textsc{KSD} median\",\n r\"\\textsc{KSD} split\",\n r\"\\textsc{KSD} split extra data\",\n ]\n)[[0, 3, 2, 1]]\nlines = np.array([\"-\", \"-\", \"-\", \"--\"])[[0, 3, 2, 1]]\nsigma = [0, 0.1, 0.2, 0.3, 0.4]\nplt.figure(figsize=(6, 4))\nfor i in range(4):\n plt.plot(sigma, power[i], lines[i], label=names[i])\nplt.legend()\nplt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])\nplt.ylim(-0.05, 1.05)\nplt.xticks(sigma)\nplt.xlabel(\"Shape parameter shift $s$\", fontsize=fs)\nplt.ylabel(\"Probability of rejecting $H_0$\", labelpad=7, fontsize=fs)\nplt.legend(\n fontsize=16,\n ncol=2,\n handleheight=0.5,\n labelspacing=0.4,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.48),\n)\nplt.savefig(\"figures/figure_1.png\", dpi=300, bbox_inches=\"tight\")\nprint(\"Figure 1 has been saved in figures/.\")\n\n\n# Figure 2: RBM experiment\nresults_rbm = pd.read_pickle(\"results/results_RBM.pkl\")\npower = results_rbm.unstack().to_numpy().T[[0, 3, 2, 1], :]\nnames = np.array(\n [\n r\"\\textsc{KSDAgg}\",\n r\"\\textsc{KSD} median\",\n r\"\\textsc{KSD} split\",\n r\"\\textsc{KSD} split extra data\",\n ]\n)[[0, 3, 2, 1]]\nlines = np.array([\"-\", \"-\", \"-\", \"--\"])[[0, 3, 2, 1]]\nsigma = [0, 0.01, 0.02, 0.03]\nplt.figure(figsize=(6, 4))\nfor i in range(4):\n plt.plot(sigma, power[i][:4], lines[i], label=names[i])\nplt.xticks(sigma)\nplt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])\nplt.ylim(-0.05, 1.05)\nplt.xlabel(\"Perturbation standard deviation $\\sigma$\", fontsize=fs)\nplt.ylabel(\"Probability of rejecting $H_0$\", labelpad=7, fontsize=fs)\nplt.legend(\n fontsize=16,\n ncol=2,\n handleheight=0.5,\n labelspacing=0.4,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.48),\n)\nplt.savefig(\"figures/figure_2.png\", dpi=300, bbox_inches=\"tight\")\nprint(\"Figure 2 has been saved in figures/.\")\n\n\n# Table 1: NF exeperiment level\nresults_nf = pd.read_pickle(\"results/results_NF_MNIST.pkl\")\ndfi.export(results_nf.rename(columns={\"power/level\": \"level\"}).loc[\"level\"].unstack(), \"figures/table_1.png\")\nprint(\"Table 1 has been saved in figures/.\")\n\n\n# Figure 4: NF experiment power\nresults_nf = pd.read_pickle(\"results/results_NF_MNIST.pkl\")\npower = results_nf.loc[\"power\"].unstack().to_numpy().T[[0, 3, 2, 1], :]\nnames = np.array(\n [\n r\"\\textsc{KSDAgg}\",\n r\"\\textsc{KSD} median\",\n r\"\\textsc{KSD} split\",\n r\"\\textsc{KSD} split extra data\",\n ]\n)[[0, 3, 2, 1]]\nlines = np.array([\"-\", \"-\", \"-\", \"--\"])[[0, 3, 2, 1]]\nsamples = [100, 200, 300, 400, 500]\nplt.figure(figsize=(6, 4))\nfor i in range(4):\n plt.plot(samples, power[i], lines[i], label=names[i])\nplt.legend()\nplt.xticks(samples)\nplt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])\nplt.ylim(-0.05, 1.05)\nplt.xlabel(\"Sample size\", fontsize=fs)\nplt.ylabel(\"Probability of rejecting $H_0$\", labelpad=7, fontsize=fs)\nplt.legend(\n fontsize=16,\n ncol=2,\n handleheight=0.5,\n labelspacing=0.4,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.48),\n)\nplt.savefig(\"figures/figure_4.png\", dpi=300, bbox_inches=\"tight\")\nprint(\"Figure 4 has been saved in figures/.\")\n\n\n# For Figure 3: MNIST NF digits, we first need to load the MNIST dataset\n# and pretrained Normalizing Flow model as in generate_data_nf.py\n# This code is taken from Tutorial 11: Normalizing Flows for image modeling\n# https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial11/NF_image_modeling.html\nimport urllib.request\nfrom urllib.error import HTTPError\nimport math\nimport os\nimport torch\nfrom torchvision.datasets import MNIST\nfrom torchvision import transforms\nimport pytorch_lightning as pl\nimport torchvision\n\n# Path to the folder where the datasets are/should be downloaded (e.g. MNIST)\nDATASET_PATH = \"mnist/data_mnist\"\n# Path to the folder where the pretrained models are saved\nCHECKPOINT_PATH = \"mnist/saved_models/tutorial11\"\n# Github URL where saved models are stored for this tutorial\nbase_url = \"https://raw.githubusercontent.com/phlippe/saved_models/main/tutorial11/\"\n# Files to download\npretrained_files = [\"MNISTFlow_multiscale.ckpt\"]\n# Create checkpoint path if it doesn't exist yet\nos.makedirs(CHECKPOINT_PATH, exist_ok=True)\n# For each file, check whether it already exists. If not, try downloading it.\nfor file_name in pretrained_files:\n file_path = os.path.join(CHECKPOINT_PATH, file_name)\n if not os.path.isfile(file_path):\n file_url = base_url + file_name\n print(f\"Downloading {file_url}...\")\n try:\n urllib.request.urlretrieve(file_url, file_path)\n except HTTPError as e:\n print(\n \"Something went wrong. Please try to download the file from the GDrive folder, or contact the author with the full output including the following error:\\n\",\n e,\n )\n# Convert images from 0-1 to 0-255 (integers)\ndef discretize(sample):\n return (sample * 255).to(torch.int32)\n\n\n# Transformations applied on each image => make them a tensor and discretize\ntransform = transforms.Compose([transforms.ToTensor(), discretize])\n# Loading the training dataset. We need to split it into a training and validation part\ntrain_dataset = MNIST(root=DATASET_PATH, train=True, transform=transform, download=True)\npl.seed_everything(42)\ntrain_set, val_set = torch.utils.data.random_split(train_dataset, [50000, 10000])\n# Load pretrained multiscale Normalizing Flow for MNIST\nflow_dict = {\"multiscale\": {}}\nflow_dict[\"multiscale\"][\"model\"], flow_dict[\"multiscale\"][\"result\"] = train_flow(\n create_multiscale_flow(), model_name=\"MNISTFlow_multiscale\"\n)\n\n# Figure 3: MNIST NF digits\nimgs_list = []\nimgs_list.append([train_set[i][0] for i in range(4)])\npl.seed_everything(0)\nimgs_list.append(\n flow_dict[\"multiscale\"][\"model\"].sample(img_shape=[100, 8, 7, 7])[[0, 2, 9, 24]]\n)\npl.seed_everything(0)\nimgs_list.append(\n flow_dict[\"multiscale\"][\"model\"].sample(img_shape=[100, 8, 7, 7])[[3, 6, 27, 5]]\n)\nfilenames = [\"figure_3_top\", \"figure_3_middle\", \"figure_3_bottom\"]\nplt.figure()\nfor i in range(len(imgs_list)):\n imgs = imgs_list[i]\n row_size = 4\n num_imgs = imgs.shape[0] if isinstance(imgs, torch.Tensor) else len(imgs)\n is_int = (\n imgs.dtype == torch.int32\n if isinstance(imgs, torch.Tensor)\n else imgs[0].dtype == torch.int32\n )\n nrow = min(num_imgs, row_size)\n ncol = int(math.ceil(num_imgs / nrow))\n imgs = torchvision.utils.make_grid(\n imgs, nrow=nrow, pad_value=128 if is_int else 0.5\n )\n np_imgs = imgs.cpu().numpy()\n plt.figure(figsize=(1.5 * nrow, 1.5 * ncol))\n plt.imshow(np.transpose(np_imgs, (1, 2, 0)), interpolation=\"nearest\")\n plt.axis(\"off\")\n plt.savefig(\"figures/\" + filenames[i], dpi=300, bbox_inches=\"tight\")\nprint(\"Figure 3 has been saved in figures/.\")\n", "id": "5866723", "language": "Python", "matching_score": 2.7218902111053467, "max_stars_count": 2, "path": "figures.py" }, { "content": "import numpy as np\nimport itertools\nfrom weights import create_weights\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom sampling import G, f_theta, f0\n\n# https://gist.github.com/thriveth/8560036\nCB_color_cycle = [\n \"#377eb8\",\n \"#ff7f00\",\n \"#4daf4a\",\n \"#f781bf\",\n \"#a65628\",\n \"#984ea3\",\n \"#999999\",\n \"#e41a1c\",\n \"#dede00\",\n]\n\n# Parameters for plots\nlinewidth = 2.5\nmarkersize = 8\nfs = 32\n\n\ndef plot_fig_1(f, axs):\n \"\"\"\n Plot the figure labelled Figure 1 in our paper given the data.\n \"\"\"\n idx = 0\n N = 5\n x_values = [i + 1 for i in range(N)]\n axs[idx].plot(\n x_values,\n create_weights(N, \"uniform\"),\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].plot(\n x_values,\n create_weights(N, \"centred\"),\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].plot(\n x_values,\n create_weights(N, \"increasing\"),\n CB_color_cycle[2],\n marker=\"o\",\n label=r\"\\texttt{increasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].plot(\n x_values,\n create_weights(N, \"decreasing\"),\n CB_color_cycle[0],\n marker=\"o\",\n label=r\"\\texttt{decreasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].tick_params(\n axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False\n )\n axs[idx].set_ylabel(\"Weights\", labelpad=10)\n axs[idx].set_title(str(N) + \" bandwidths\", pad=8, fontsize=fs)\n axs[idx].set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5])\n axs[idx].set_ylim([-0.025, 0.525])\n idx = 1\n N = 6\n x_values = [i + 1 for i in range(N)]\n axs[idx].plot(\n x_values,\n create_weights(N, \"uniform\"),\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].plot(\n x_values,\n create_weights(N, \"centred\"),\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].plot(\n x_values,\n create_weights(N, \"increasing\"),\n CB_color_cycle[2],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg increasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].plot(\n x_values,\n create_weights(N, \"decreasing\"),\n CB_color_cycle[0],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg decreasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx].tick_params(\n axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False\n )\n axs[idx].set_title(str(N) + \" bandwidths\", pad=8, fontsize=fs)\n f.text(0.5, 0.065, \"Bandwidths sorted in increasing order\", ha=\"center\")\n\n\ndef plot_fig_2(f, axs):\n \"\"\"\n Plot the figure labelled Figure 2 in our paper given the data.\n \"\"\"\n idx = 0\n start = -1\n stop = 0\n n_points = 300\n xplot = np.linspace(start, stop, n_points)\n Gplot = [G(x) for x in xplot]\n axs[idx].plot(xplot, Gplot, \"k\")\n axs[idx].set_yticks([-0.4, 0, 0.4])\n axs[idx].set_ylim(-0.4, 0.4)\n axs[idx].set_xlim(-1, 0)\n axs[idx].set_xticks([-1, -0.5, 0])\n axs[idx].set_yticklabels([\"\", 0, \"\"])\n axs[idx].set_yticks([-0.37, 0.37], minor=True)\n axs[idx].set_yticklabels([\"$-0.4$\", \"$0.4$\"], minor=True)\n axs[idx].set_xticklabels([\"\", \"$-0.5$\", \"\"])\n axs[idx].set_xticks([-0.97, -0.06], minor=True)\n axs[idx].set_xticklabels([\"$-1.0$\", \"$0.0$\"], minor=True)\n axs[idx].set_title(\"(a)\", pad=12, fontsize=fs)\n for line in axs[idx].yaxis.get_minorticklines():\n line.set_visible(False)\n for line in axs[idx].xaxis.get_minorticklines():\n line.set_visible(False)\n idx = 1\n s = 1\n d = 1\n perturbation_multiplier = 2.7\n start = 0\n stop = 1\n n_points = 300\n xplot = np.linspace(start, stop, n_points)\n f0plot = [f0(x) for x in xplot]\n zeroplot = [0 for x in xplot]\n axs[idx].plot(xplot, f0plot, \"k\")\n colors = [\"blue\", \"red\", \"purple\", \"orange\"]\n for p in range(1, 5):\n fplot = [f_theta(x, p, s, perturbation_multiplier, p + 5) for x in xplot]\n axs[idx].plot(xplot, fplot, colors[p - 1])\n axs[idx].set_xticks([0, 0.5, 1])\n axs[idx].set_yticks([0, 1, 2])\n axs[idx].set_yticklabels([\"\", 1, \"\"])\n axs[idx].set_yticks([0.06, 1.94], minor=True)\n axs[idx].set_yticklabels([\"$0$\", \"$2$\"], minor=True)\n axs[idx].set_xticklabels([\"\", \"$0.5$\", \"\"])\n axs[idx].set_xticks([0.06, 0.94], minor=True)\n axs[idx].set_xticklabels([\"$0.0$\", \"$1.0$\"], minor=True)\n axs[idx].set_ylim(0, 2)\n axs[idx].set_xlim(0, 1)\n axs[idx].set_title(\"(b)\", pad=12, fontsize=fs)\n for line in axs[idx].yaxis.get_minorticklines():\n line.set_visible(False)\n for line in axs[idx].xaxis.get_minorticklines():\n line.set_visible(False)\n idx = 2\n start = 0\n stop = 1\n n_points = 100\n p = 2\n s = 1\n d = 2\n perturbation_multiplier = 7.3\n x = np.linspace(start, stop, n_points)\n y = np.linspace(start, stop, n_points)\n z = np.array(\n [\n f_theta(\n np.concatenate((np.atleast_1d(j), np.atleast_1d(i))),\n p,\n s,\n perturbation_multiplier,\n 5,\n )\n for j in y\n for i in x\n ]\n )\n Z = z.reshape(n_points, n_points)\n im1 = axs[idx].imshow(\n Z, origin=\"lower\", interpolation=\"bilinear\", extent=[0, 1, 0, 1], cmap=\"bwr\"\n )\n axs[idx].set_title(\"(c)\", pad=12, fontsize=fs)\n divider = make_axes_locatable(axs[idx])\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cbar = f.colorbar(im1, cax=cax, orientation=\"vertical\")\n cbar.set_ticks([0.6, 1, 1.4])\n cbar.set_ticklabels([0.6, 1, 1.4])\n axs[idx].set_yticks([0, 0.5, 1])\n axs[idx].set_xticks([0, 0.5, 1])\n axs[idx].set_yticklabels([\"\", 0.5, \"\"])\n axs[idx].set_yticks([0.03, 0.97], minor=True)\n axs[idx].set_yticklabels([\"$0.0$\", \"$1.0$\"], minor=True)\n axs[idx].set_xticklabels([\"\", \"$0.5$\", \"\"])\n axs[idx].set_xticks([0.06, 0.94], minor=True)\n axs[idx].set_xticklabels([\"$0.0$\", \"$1.0$\"], minor=True)\n\n for line in axs[idx].yaxis.get_minorticklines():\n line.set_visible(False)\n for line in axs[idx].xaxis.get_minorticklines():\n line.set_visible(False)\n\n\ndef plot_fig_3_4(idx, idy, f, axs, power, power_ms, l_minus, l_plus):\n \"\"\"\n Plot the figures labelled Figures 3 and 4 in our paper given the data.\n \"\"\"\n x_values = [i for i in range(1, 1 + len(power[0]))]\n axs[idx, idy].plot(\n x_values,\n power[0],\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[3],\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[1],\n CB_color_cycle[2],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg increasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[2],\n CB_color_cycle[0],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg decreasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[0],\n CB_color_cycle[6],\n marker=\"s\",\n linestyle=\"--\",\n label=r\"\\texttt{median}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[4],\n CB_color_cycle[5],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[1],\n CB_color_cycle[4],\n marker=\"s\",\n linestyle=\"--\",\n label=r\"\\texttt{split}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[2],\n CB_color_cycle[3],\n marker=\"v\",\n linestyle=\":\",\n label=r\"\\texttt{oracle}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].set_xticks(x_values)\n if idx == 0:\n axs[idx, idy].set_title(\n \"$\\Lambda(\" + str(l_minus) + \",\" + str(l_plus) + \")$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_ylim(-0.05, 1.05)\n if (idx, idy) == (1, 1):\n axs[idx, idy].set_xlabel(\"Number of perturbations\", fontsize=fs)\n if (idx, idy) == (0, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Gaussian kernel\", fontsize=fs, labelpad=10)\n if (idx, idy) == (1, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Laplace kernel\", fontsize=fs, labelpad=10)\n axs[idx, idy].set_yticks([0, 0.25, 0.5, 0.75, 1])\n\n\ndef plot_fig_5(idx, idy, f, axs, power, power_ms):\n \"\"\"\n Plot the figure labelled Figure 5 in our paper given the data.\n \"\"\"\n x_values = [\"$Q_1$\", \"$Q_2$\", \"$Q_3$\", \"$Q_4$\", \"$Q_5$\"]\n axs[idx, idy].plot(\n x_values,\n power[0],\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[3],\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[1],\n CB_color_cycle[2],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg increasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[2],\n CB_color_cycle[0],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg decreasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[0],\n CB_color_cycle[6],\n marker=\"s\",\n linestyle=\"--\",\n label=r\"\\texttt{median}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[4],\n CB_color_cycle[5],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[1],\n CB_color_cycle[4],\n marker=\"s\",\n linestyle=\"--\",\n label=r\"\\texttt{split}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[2],\n CB_color_cycle[3],\n marker=\"v\",\n linestyle=\":\",\n label=r\"\\texttt{oracle}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].set_xticks(x_values)\n axs[idx, idy].xaxis.set_tick_params(pad=7)\n axs[idx, idy].set_yticks([0, 0.25, 0.5, 0.75, 1])\n axs[idx, idy].set_ylim(-0.05, 1.05)\n if (idx, idy) == (1, 1):\n axs[idx, idy].set_xlabel(\"Choice of alternative\", fontsize=fs)\n if (idx, idy) == (0, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Gaussian kernel\", fontsize=fs)\n axs[idx, idy].set_title(\n \"$\\Lambda(8,12)$, $\\Lambda(10,14)$\", fontsize=fs, pad=10\n )\n if (idx, idy) == (0, 1):\n axs[idx, idy].set_title(\n \"$\\Lambda(10,14)$, $\\Lambda(12,16)$\", fontsize=fs, pad=10\n )\n if (idx, idy) == (0, 2):\n axs[idx, idy].set_title(\n \"$\\Lambda(12,16)$, $\\Lambda(14,18)$\", fontsize=fs, pad=10\n )\n if (idx, idy) == (1, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Laplace kernel\", fontsize=fs, labelpad=10)\n\n\ndef plot_fig_6(idx, idy, f, axs, power):\n \"\"\"\n Plot the figure labelled Figure 6 in our paper given the data.\n \"\"\"\n x_values = [3, 5, 7, 9, 11, 13, 15]\n axs[idx, idy].plot(\n x_values,\n power[0][1:],\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[1][1:],\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].set_yticks([0, 0.25, 0.5, 0.75, 1])\n axs[idx, idy].set_ylim([-0.05, 1.05])\n axs[idx, idy].set_xticks(x_values)\n if (idx, idy) == (0, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Gaussian kernel\", fontsize=fs, labelpad=10)\n axs[idx, idy].set_title(\n \"$d=1$, 3 perturbations\\n$m=n=500$\\n $\\Lambda(-2-i,-2+i)$\",\n fontsize=fs,\n pad=10,\n )\n if (idx, idy) == (1, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Laplace kernel\", fontsize=fs, labelpad=10)\n if (idx, idy) == (0, 1):\n axs[idx, idy].set_title(\n \"$d=2$, 2 perturbations\\n$m=n=2\\,000$\\n $\\Lambda(-2-i,-2+i)$\",\n fontsize=fs,\n pad=10,\n )\n if (idx, idy) == (1, 1):\n axs[idx, idy].set_xlabel(\n \"Number of bandwidths in the collection (corresponding to $i=1,\\dots,7$)\",\n labelpad=10,\n fontsize=fs,\n )\n if (idx, idy) == (0, 2):\n axs[idx, idy].set_title(\n \"MNIST, $Q_3$, $m=n=500$\\n $\\Lambda(12-i,12+i)$\\n $\\Lambda(14-i,14+i)$\",\n fontsize=fs,\n pad=10,\n )\n\n\ndef plot_fig_7(idx, idy, f, axs, power, power_ms):\n \"\"\"\n Plot the figure labelled Figure 7 in our paper given the data.\n \"\"\"\n if idy == 0:\n x_values = [i + 1 for i in range(4)]\n if idy == 1:\n x_values = [i + 1 for i in range(3)]\n if idy == 2:\n x_values = [\"$Q_1$\", \"$Q_2$\", \"$Q_3$\", \"$Q_4$\", \"$Q_5$\"]\n axs[idx, idy].plot(\n x_values,\n power[0],\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[3],\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[1],\n CB_color_cycle[2],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg increasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[2],\n CB_color_cycle[0],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg decreasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[0],\n CB_color_cycle[6],\n marker=\"s\",\n linestyle=\"--\",\n label=r\"\\texttt{median}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_ms[1],\n CB_color_cycle[4],\n marker=\"s\",\n linestyle=\"--\",\n label=r\"\\texttt{split}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].set_yticks([-0.02, 0, 0.02])\n axs[idx, idy].set_ylim([-0.03, 0.03])\n axs[idx, idy].set_xticks(x_values)\n if (idx, idy) == (0, 0):\n axs[idx, idy].set_ylabel(\"Difference in power \\n Gaussian kernel\", fontsize=fs)\n axs[idx, idy].set_title(\n \"$d=1$, $\\Lambda(-4,0)$\\n$m=n=500$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\"])\n if (idx, idy) == (1, 0):\n axs[idx, idy].set_ylabel(\"Difference in power \\n Laplace kernel\", fontsize=fs)\n axs[idx, idy].set_xlabel(\"Number of perturbations\", fontsize=fs)\n if (idx, idy) == (0, 1):\n axs[idx, idy].set_title(\n \"$d=2$, $\\Lambda(-4,0)$\\n$m=n=2\\,000$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\"])\n if (idx, idy) == (1, 1):\n axs[idx, idy].set_xlabel(\"Number of perturbations\", fontsize=fs)\n if (idx, idy) == (0, 2):\n axs[idx, idy].set_title(\n \"MNIST, $\\Lambda(10,14)$, $\\Lambda(12,16)$\\n$m=n=500$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\", \"\"])\n if (idx, idy) == (1, 2):\n axs[idx, idy].set_xlabel(\"Choice of alternative\", fontsize=fs)\n axs[idx, idy].xaxis.set_tick_params(pad=7)\n\n\ndef plot_fig_8(idx, idy, f, axs, power):\n \"\"\"\n Plot the figure labelled Figure 8 in our paper given the data.\n \"\"\"\n sample_sizes = [\"$1\\,000$\", \"$2\\,000$\", \"$3\\,000$\", \"$4\\,000$\", \"$5\\,000$\"]\n if idy == 2:\n sample_sizes = [\"200\", \"400\", \"600\", \"800\", \"$1\\,000$\"]\n axs[idx, idy].plot(\n sample_sizes,\n power[0],\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n sample_sizes,\n power[3],\n CB_color_cycle[7],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg centred}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n sample_sizes,\n power[1],\n CB_color_cycle[2],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg increasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n sample_sizes,\n power[2],\n CB_color_cycle[0],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg decreasing}\",\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].set_yticks([0, 0.25, 0.5, 0.75, 1])\n axs[idx, idy].set_ylim([-0.05, 1.05])\n axs[idx, idy].set_xticks(sample_sizes)\n axs[idx, idy].tick_params(axis=\"x\", labelrotation=90)\n if (idx, idy) == (0, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Gaussian kernel\", fontsize=fs, labelpad=10)\n axs[idx, idy].set_title(\n \"$d=1$, 3 perturbations\\n$m=100$, $\\Lambda(-4,0)$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\", \"\"])\n if (idx, idy) == (1, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Laplace kernel\", fontsize=fs, labelpad=10)\n if (idx, idy) == (0, 1):\n axs[idx, idy].set_title(\n \"$d=2$, 2 perturbations\\n$m=250$, $\\Lambda(-4,0)$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\", \"\"])\n if (idx, idy) == (1, 1):\n axs[idx, idy].set_xlabel(\"Sample size $n$\", labelpad=12, fontsize=fs)\n if (idx, idy) == (0, 2):\n axs[idx, idy].set_title(\n \"MNIST, $Q_3$, $m=100$\\n$\\Lambda(10,14)$, $\\Lambda(12,16)$\",\n fontsize=fs,\n pad=10,\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\", \"\"])\n\n\ndef plot_fig_9(idx, idy, f, axs, power, power_u, power_o):\n \"\"\"\n Plot the figure labelled Figure 9 in our paper given the data.\n \"\"\"\n # https://github.com/matplotlib/matplotlib/issues/9460\n colors = [\"#1845fb\", \"#578dff\", \"#86c8dd\", \"#adad7d\", \"#656364\"]\n mn = [\"500\", \"$2\\,000$\", \"500\"]\n if idy == 0:\n x_values = [i + 1 for i in range(4)]\n sample_sizes = [\n \"$25\\,000$\",\n \"$50\\,000$\",\n \"$75\\,000$\",\n \"$100\\,000$\",\n ] # = [int(i * 10**5) for i in [0.25, 0.5, 0.75, 1]]\n if idy == 1:\n x_values = [i + 1 for i in range(3)]\n sample_sizes = [\n \"$250\\,000$\",\n \"$500\\,000$\",\n \"$750\\,000$\",\n \"$1\\,000\\,000$\",\n ] # = [int(i * 10**5) for i in [2.5, 5, 7.5, 10]]\n if idy == 2:\n x_values = [\"$Q_1$\", \"$Q_2$\", \"$Q_3$\", \"$Q_4$\", \"$Q_5$\"]\n sample_sizes = [\n \"$50\\,000$\",\n \"$100\\,000$\",\n \"$150\\,000$\",\n \"$200\\,000$\",\n \"$250\\,000$\",\n ] # = [int(i * 10**5) for i in [0.5, 1, 1.5, 2, 2.5]]\n axs[idx, idy].plot(\n x_values,\n power_u,\n CB_color_cycle[1],\n marker=\"o\",\n label=r\"\\texttt{MMDAgg uniform} \" + str(mn[idy]),\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power_o,\n CB_color_cycle[5],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost} \" + str(mn[idy]),\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[0],\n colors[0],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost} \" + str(sample_sizes[0]),\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[1],\n colors[1],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost} \" + str(sample_sizes[1]),\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[2],\n colors[2],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost} \" + str(sample_sizes[2]),\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].plot(\n x_values,\n power[3],\n colors[3],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost} \" + str(sample_sizes[3]),\n linewidth=linewidth,\n markersize=markersize,\n )\n if len(sample_sizes) == 5:\n axs[idx, idy].plot(\n x_values,\n power[4],\n colors[4],\n marker=\"^\",\n linestyle=\"--\",\n label=r\"\\texttt{ost} \" + str(sample_sizes[4]),\n linewidth=linewidth,\n markersize=markersize,\n )\n axs[idx, idy].set_yticks([0, 0.25, 0.5, 0.75, 1])\n axs[idx, idy].set_ylim([-0.05, 1.05])\n axs[idx, idy].set_xticks(x_values)\n if (idx, idy) == (0, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Gaussian kernel\", fontsize=fs, labelpad=10)\n axs[idx, idy].set_title(\"$d=1$, $\\Lambda(-4,0)$\", fontsize=fs, pad=10)\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\"])\n if (idx, idy) == (1, 0):\n axs[idx, idy].set_ylabel(\"Power \\n Laplace kernel\", fontsize=fs, labelpad=10)\n axs[idx, idy].set_xlabel(\"Number of perturbations\", fontsize=fs)\n if (idx, idy) == (0, 1):\n axs[idx, idy].set_title(\"$d=2$, $\\Lambda(-4,0)$\", fontsize=fs, pad=10)\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\"])\n if (idx, idy) == (1, 1):\n axs[idx, idy].set_xlabel(\"Number of perturbations\", fontsize=fs)\n if (idx, idy) == (0, 2):\n axs[idx, idy].set_title(\n \"MNIST, $\\Lambda(10,14)$, $\\Lambda(12,16)$\", fontsize=fs, pad=10\n )\n axs[idx, idy].set_xticklabels([\"\", \"\", \"\", \"\", \"\"])\n if (idx, idy) == (1, 2):\n axs[idx, idy].set_xlabel(\"Choice of alternative\", fontsize=fs)\n axs[idx, idy].xaxis.set_tick_params(pad=7)\n", "id": "4031672", "language": "Python", "matching_score": 1.0709830522537231, "max_stars_count": 10, "path": "plotting_functions.py" }, { "content": "import numpy as np\n\n\ndef create_weights(N, weights_type):\n \"\"\"\n Create weights as defined in Section 5.1 of our paper.\n inputs: N: number of bandwidths to test\n weights_type: \"uniform\" or \"decreasing\" or \"increasing\" or \"centred\"\n output: (N,) array of weights\n \"\"\"\n if weights_type == \"uniform\":\n weights = np.array([1 / N,] * N)\n elif weights_type == \"decreasing\":\n normaliser = sum([1 / i for i in range(1, N + 1)])\n weights = np.array([1 / (i * normaliser) for i in range(1, N + 1)])\n elif weights_type == \"increasing\":\n normaliser = sum([1 / i for i in range(1, N + 1)])\n weights = np.array([1 / ((N + 1 - i) * normaliser) for i in range(1, N + 1)])\n elif weights_type == \"centred\":\n if N % 2 == 1:\n normaliser = sum([1 / (abs((N + 1) / 2 - i) + 1) for i in range(1, N + 1)])\n weights = np.array(\n [1 / ((abs((N + 1) / 2 - i) + 1) * normaliser) for i in range(1, N + 1)]\n )\n else:\n normaliser = sum(\n [1 / (abs((N + 1) / 2 - i) + 0.5) for i in range(1, N + 1)]\n )\n weights = np.array(\n [\n 1 / ((abs((N + 1) / 2 - i) + 0.5) * normaliser)\n for i in range(1, N + 1)\n ]\n )\n else:\n raise ValueError(\n 'The value of weights_type should be \"uniform\" or'\n '\"decreasing\" or \"increasing\" or \"centred\".'\n )\n return weights\n", "id": "12064390", "language": "Python", "matching_score": 0.3775809407234192, "max_stars_count": 10, "path": "weights.py" }, { "content": "\"\"\"\nRunning this script creates two files results.csv and results.plk in \nthe 'user/raw' directory containing the relevant parameters and the estimated \npower/level of the tests. We use the following numbering of experiments: \nuniform mnist \n 1 2 \n 3 4\n 5 6\n 7 8\n 9 10\n 11 12\nExperiments i and i+1 are the same but using the uniform and mnist data.\nWe first run the uniform experiments followed by the mnist experiments.\nThe settings of all those experiments can be understood by their \nrelations to the figures presented in our paper:\nFigure 3 and 4: experiments 1 and 3\nFigure 5: experiments 2 and 4\nFigure 6: experiments 9 and 10\nFigure 7: experiments 1, 2, 3 and 4\nFigure 8: experiments 7 and 8\nFigure 9: experiments 1, 2, 11 and 12\nTable 1: experiments 5 and 6\nThe figures and table can be created by running the figures.py script.\n\"\"\"\n\nimport numpy as np\nimport itertools\nimport pandas as pd\nfrom mnist import download_mnist, load_mnist\nfrom pathlib import Path\nfrom seed import generate_seed\nfrom sample_test import sample_and_test_uniform, sample_and_test_mnist\nimport argparse\n\n# create results directory if it does not exist\nPath(\"user/raw\").mkdir(exist_ok=True, parents=True)\n\n# panda dataframe: lists of indices and entries\nindex_vals = []\nresults = []\n\n# parameters shared for all experiments\nalpha = 0.05\nB1 = 500\nB2 = 500\nB3 = 100\nkernel_types = [\"gaussian\", \"laplace\"]\nk_num = len(kernel_types)\napprox_types = [\"wild bootstrap\", \"permutation\"]\na_num = len(approx_types)\n\n############# UNIFORM #############\n# parameters for all uniform experiments\ns = 1\nperturbation_multipliers = [2.7, 7.3]\nbandwidth_multipliers = np.linspace(0.1, 1, 10)\ne_num = 2\np_num = 4\n\n# Experiment 1\nexp = \"1: uniform alternative\"\nrepetitions = 500\nsample_sizes = [500, 2000]\nL = [(-6, -2), (-4, 0), (-2, 2)]\nl_num = len(L)\nfunction_types = [\"uniform\", \"increasing\", \"decreasing\", \"centred\", \"ost\"]\nf_num = len(function_types)\n\nfor a, k, e, l, f, p in itertools.product(\n range(a_num), range(k_num), range(2), range(l_num), range(f_num), range(p_num)\n):\n if (a, l) not in [(1, 0), (1, 2)] and (e, p) != (1, 3):\n approx_type = approx_types[a]\n kernel_type = kernel_types[k]\n d = e + 1\n n = m = sample_sizes[e]\n perturbation_multiplier = perturbation_multipliers[e]\n l_minus, l_plus = L[l]\n l_minus_l_plus = L[l]\n function_type = function_types[f]\n perturbation = p + 1\n perturbation_or_Qi = perturbation\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, e, l, f, p, i)\n test_output_list.append(\n sample_and_test_uniform(\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n d,\n perturbation,\n s,\n perturbation_multiplier,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 1 completed.')\n\n# Experiment 3\nexp = \"3: uniform alternative\"\nrepetitions = 500\nsample_sizes = [500, 2000]\nl_minus = l_plus = None\nl_minus_l_plus = None\nfunction_types = [\"median\", \"split\", \"split (doubled sample sizes)\"]\nf_num = len(function_types)\n\nfor a, k, e, f, p in itertools.product(\n range(a_num), range(k_num), range(e_num), range(f_num), range(p_num)\n):\n if (e, p) != (1, 3):\n approx_type = approx_types[a]\n kernel_type = kernel_types[k]\n d = e + 1\n n = m = sample_sizes[e]\n perturbation_multiplier = perturbation_multipliers[e]\n function_type = function_types[f]\n perturbation = p + 1\n perturbation_or_Qi = perturbation\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, e, 3, f, p, i)\n test_output_list.append(\n sample_and_test_uniform(\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n d,\n perturbation,\n s,\n perturbation_multiplier,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 3 completed.')\n\n# Experiment 5\nexp = \"5: uniform null\"\nrepetitions = 5000\nsample_sizes = [500, 2000]\nfunction_types = [\n \"uniform\",\n \"increasing\",\n \"decreasing\",\n \"centred\",\n \"ost\",\n \"median\",\n \"split\",\n]\nf_num = len(function_types)\nL = [(-4, 0)]\nl_num = len(L)\nl_minus, l_plus = L[0]\nl_minus_l_plus = L[0]\nperturbation = 0\nperturbation_or_Qi = perturbation\n\nfor a, k, e, f in itertools.product(\n range(a_num), range(k_num), range(e_num), range(f_num)\n):\n approx_type = approx_types[a]\n kernel_type = kernel_types[k]\n d = e + 1\n n = m = sample_sizes[e]\n function_type = function_types[f]\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, e, 0, f, 5, i)\n test_output_list.append(\n sample_and_test_uniform(\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n d,\n perturbation,\n s,\n perturbation_multiplier,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 5 completed.')\n\n# Experiment 7\nexp = \"7: uniform alternative\"\nrepetitions = 500\nsample_sizes_m = [100, 250]\nsample_sizes_n = [1000, 2000, 3000, 4000, 5000]\nv_num = len(sample_sizes_n)\nfunction_types = [\"uniform\", \"increasing\", \"decreasing\", \"centred\"]\nf_num = len(function_types)\nl_minus, l_plus = (-4, 0)\nl_minus_l_plus = (-4, 0)\nperturbations = [3, 2]\napprox_type = \"permutation\"\n\nfor k, e, f, v in itertools.product(\n range(k_num), range(e_num), range(f_num), range(v_num)\n):\n kernel_type = kernel_types[k]\n d = e + 1\n n = sample_sizes_n[v]\n m = sample_sizes_m[e]\n perturbation_multiplier = perturbation_multipliers[e]\n function_type = function_types[f]\n perturbation = perturbations[e]\n perturbation_or_Qi = perturbation\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, e, 3, f, v, i)\n test_output_list.append(\n sample_and_test_uniform(\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n d,\n perturbation,\n s,\n perturbation_multiplier,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 7 completed.')\n\n# Experiment 9\nexp = \"9: uniform alternative\"\nrepetitions = 500\nsample_sizes = [500, 2000]\nfunction_types = [\"uniform\", \"centred\"]\nf_num = len(function_types)\nL = [(-2, -2), (-3, -1), (-4, 0), (-5, 1), (-6, 2), (-7, 3), (-8, 4), (-9, 5)]\nl_num = len(L)\nperturbations = [3, 2]\napprox_type = \"wild bootstrap\"\n\nfor k, e, l, f in itertools.product(\n range(k_num), range(e_num), range(l_num), range(f_num)\n):\n kernel_type = kernel_types[k]\n d = e + 1\n n = m = sample_sizes[e]\n perturbation_multiplier = perturbation_multipliers[e]\n l_minus, l_plus = L[l]\n l_minus_l_plus = L[l]\n function_type = function_types[f]\n perturbation = perturbations[e]\n p = perturbation - 1\n perturbation_or_Qi = perturbation\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, e, l, f, p, i)\n test_output_list.append(\n sample_and_test_uniform(\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n d,\n perturbation,\n s,\n perturbation_multiplier,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 9 completed.')\n\n# Experiment 11\nexp = \"11: uniform alternative\"\nrepetitions = 500\nfunction_types = [\"ost\"]\nf = 0\nfunction_type = function_types[f]\nl_minus, l_plus = (-4, 0)\nl_minus_l_plus = (-4, 0)\nl = 3\napprox_type = None\nsample_sizes = [\n [int(i * 10 ** 5) for i in [0.25, 0.5, 0.75, 1]],\n [int(i * 10 ** 5) for i in [2.5, 5, 7.5, 10]],\n]\nv_num = len(sample_sizes[0])\n\nfor v, k, e, p in itertools.product(\n range(v_num), range(k_num), range(e_num), range(p_num)\n):\n if (e, p) != (1, 3):\n kernel_type = kernel_types[k]\n d = e + 1\n n = m = sample_sizes[e][v]\n perturbation_multiplier = perturbation_multipliers[e]\n function_type = function_types[f]\n perturbation = p + 1\n perturbation_or_Qi = perturbation\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, e, 3, 0, p, i)\n test_output_list.append(\n sample_and_test_uniform(\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n d,\n perturbation,\n s,\n perturbation_multiplier,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 11 completed.')\n\n\n############# MNIST #############\n# parameters for all mnist experiments\np_num = 5\nd = None\nQ_list = [\"Q1\", \"Q2\", \"Q3\", \"Q4\", \"Q5\"]\nbandwidth_multipliers = np.array([2 ** i for i in range(10, 21)])\nPath(\"mnist_dataset\").mkdir(exist_ok=True)\nif Path(\"mnist_dataset/mnist_7x7.data\").is_file() == False:\n download_mnist()\nP, Q_list = load_mnist()\nQ_list_str = [\"Q1\", \"Q2\", \"Q3\", \"Q4\", \"Q5\"]\n\n# Experiment 2\nexp = \"2: mnist alternative\"\nrepetitions = 500\nsample_size = 500\nfunction_types = [\"uniform\", \"increasing\", \"decreasing\", \"centred\", \"ost\"]\nf_num = len(function_types)\nL = [[(8, 12), (10, 14), (12, 16)], [(10, 14), (12, 16), (14, 18)]]\nl_num = len(L[0])\n\nfor a, k, l, f, p in itertools.product(\n range(a_num), range(k_num), range(l_num), range(f_num), range(p_num)\n):\n if (a, l) not in [(1, 0), (1, 2)]:\n approx_type = approx_types[a]\n kernel_type = kernel_types[k]\n n = m = sample_size\n l_minus, l_plus = L[k][l]\n l_minus_l_plus = L[k][l]\n function_type = function_types[f]\n Qi = Q_list[p]\n perturbation_or_Qi = Q_list_str[p]\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, 2, l, f, p, i)\n test_output_list.append(\n sample_and_test_mnist(\n P,\n Qi,\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 2 completed.')\n\n# Experiment 4\nexp = \"4: mnist alternative\"\nrepetitions = 500\nsample_size = 500\nfunction_types = [\"median\", \"split\", \"split (doubled sample sizes)\"]\nf_num = len(function_types)\nl_minus = l_plus = None\nl_minus_l_plus = None\n\nfor a, k, f, p in itertools.product(\n range(a_num), range(k_num), range(f_num), range(p_num)\n):\n approx_type = approx_types[a]\n kernel_type = kernel_types[k]\n n = m = sample_size\n function_type = function_types[f]\n Qi = Q_list[p]\n perturbation_or_Qi = Q_list_str[p]\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, 2, 3, f, p, i)\n test_output_list.append(\n sample_and_test_mnist(\n P,\n Qi,\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 4 completed.')\n\n# Experiment 6\nexp = \"6: mnist null\"\nrepetitions = 5000\nsample_size = 500\nfunction_types = [\n \"uniform\",\n \"increasing\",\n \"decreasing\",\n \"centred\",\n \"ost\",\n \"median\",\n \"split\",\n]\nf_num = len(function_types)\nL = [(10, 14), (12, 16)]\n\nfor a, k, f in itertools.product(range(a_num), range(k_num), range(f_num)):\n approx_type = approx_types[a]\n kernel_type = kernel_types[k]\n n = m = sample_size\n function_type = function_types[f]\n l_minus, l_plus = L[k]\n l_minus_l_plus = L[k]\n perturbation_or_Qi = \"P\"\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, 2, l, f, 5, i)\n test_output_list.append(\n sample_and_test_mnist(\n P,\n P,\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 6 completed.')\n\n# Experiment 8\nexp = \"8: mnist alternative\"\nrepetitions = 500\nsample_size_m = 100\nsample_sizes_n = [200, 400, 600, 800, 1000]\nv_num = len(sample_sizes_n)\nfunction_types = [\"uniform\", \"increasing\", \"decreasing\", \"centred\"]\nf_num = len(function_types)\nL = [(10, 14), (12, 16)]\napprox_type = \"permutation\"\n\nfor k, f, v in itertools.product(range(k_num), range(f_num), range(v_num)):\n kernel_type = kernel_types[k]\n n = sample_sizes_n[v]\n m = sample_size_m\n function_type = function_types[f]\n Qi = Q_list[2]\n perturbation_or_Qi = Q_list_str[2]\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, 2, 3, f, v, i)\n test_output_list.append(\n sample_and_test_mnist(\n P,\n Qi,\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 8 completed.')\n\n# Experiment 10\nexp = \"10: mnist alternative\"\nrepetitions = 500\nsample_sizes = 500\nfunction_types = [\"uniform\", \"centred\"]\nf_num = len(function_types)\nL = [\n [(12, 12), (11, 13), (10, 14), (9, 15), (8, 16), (7, 17), (6, 18), (5, 19)],\n [(14, 14), (13, 15), (12, 16), (11, 17), (10, 18), (9, 19), (8, 20), (7, 21)],\n]\nl_num = len(L[0])\napprox_type = \"wild bootstrap\"\n\nfor k, l, f in itertools.product(range(k_num), range(l_num), range(f_num)):\n kernel_type = kernel_types[k]\n n = m = sample_sizes\n l_minus, l_plus = L[k][l]\n l_minus_l_plus = L[k][l]\n function_type = function_types[f]\n Qi = Q_list[2]\n perturbation_or_Qi = Q_list_str[2]\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, 2, l, f, v, i)\n test_output_list.append(\n sample_and_test_mnist(\n P,\n Qi,\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 10 completed.')\n\n# Experiment 12\nexp = \"12: mnist alternative\"\nrepetitions = 500\nfunction_types = [\"ost\"]\nf = 0\nfunction_type = function_types[f]\nL = [(10, 14), (12, 16)]\nl = 3\napprox_type = None\nsample_sizes = [int(i * 10 ** 5) for i in [0.5, 1, 1.5, 2, 2.5]]\nv_num = len(sample_sizes)\n\nfor v, k, p in itertools.product(range(v_num), range(k_num), range(p_num)):\n kernel_type = kernel_types[k]\n n = m = sample_sizes[v]\n function_type = function_types[f]\n l_minus, l_plus = L[k]\n l_minus_l_plus = L[k]\n Qi = Q_list[p]\n perturbation_or_Qi = Q_list_str[p]\n test_output_list = []\n for i in range(repetitions):\n seed = generate_seed(k, 2, 3, f, p, i)\n test_output_list.append(\n sample_and_test_mnist(\n P,\n Qi,\n function_type,\n seed,\n kernel_type,\n approx_type,\n m,\n n,\n alpha,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n bandwidth_multipliers,\n )\n )\n power = np.mean(test_output_list)\n index_val = (\n exp,\n d,\n repetitions,\n m,\n n,\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n perturbation_or_Qi,\n )\n index_vals.append(index_val)\n results.append(power)\nprint('Experiment 12 completed.')\n\n# save panda dataframe\nindex_names = (\n \"experiment\",\n \"d\",\n \"repetitions\",\n \"m\",\n \"n\",\n \"approx_type\",\n \"kernel_type\",\n \"l_minus_l_plus\",\n \"function_type\",\n \"perturbation_or_Qi\",\n)\nindex = pd.MultiIndex.from_tuples(index_vals, names=index_names)\nresults_df = pd.Series(results, index=index).to_frame(\"power\")\nresults_df.reset_index().to_csv(\"user/raw/results.csv\")\nresults_df.to_pickle(\"user/raw/results.pkl\")\n", "id": "3614198", "language": "Python", "matching_score": 4.209716796875, "max_stars_count": 10, "path": "experiments.py" }, { "content": "\"\"\"\nRunning this script generates the figures (Figures 1-9) and table (Table 1) from our paper.\n\"\"\"\n\nimport numpy as np\nimport itertools\nfrom matplotlib import rc, rcParams\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport plotting_functions\nfrom pathlib import Path\nimport dataframe_image as dfi\nimport argparse\n\nparser = argparse.ArgumentParser(\n description=\"If the command is run with '-user' the raw data generated by the user \"\n \"(user/raw/results.pkl) is used to create the figures (saved in user/figures).\"\n \"If the command is run without '-user' the raw data we have already generated \"\n \"(paper/raw/results.pkl) is used to create the figures (saved in paper/figures).\"\n)\nparser.add_argument('-user', action='store_true')\nuser = parser.parse_args().user\n\n# running the terminal command 'python figures.py'\n# or running this script from an IDE\n# gives user = False\n\n# running the terminal command 'python figures.py -user'\n# gives user = True\n# to do the same when running this script from an IDE simply uncomment the following line:\n# user = True \n\nif user:\n user_or_paper = \"user/\"\nelse:\n user_or_paper = \"paper/\"\n\n# create figures directory if it does not exist\nPath(user_or_paper + \"figures\").mkdir(exist_ok=True, parents=True)\n\n# load panda dataframe \n# either paper/raw/results.pkl or user/raw/results.pkl\n# depending on '-user' or user = True as explained above\nPath(\"user/raw\").mkdir(exist_ok=True, parents=True)\nresults_df = pd.read_pickle(user_or_paper + \"raw/results.pkl\")\n\n# Parameters for plots\nfs = 32\nrcParams.update({\"font.size\": fs})\nrc(\"font\", **{\"family\": \"serif\", \"serif\": [\"Computer Modern\"]})\nrc(\"text\", usetex=True)\n\ndef mutate(List):\n \"\"\"\n Mutate a list into a list of lists except for elements of type slice.\n input: List: list\n \"\"\"\n for i in range(len(List)):\n if type(List[i]) not in [list, slice]:\n List[i] = [List[i]] \n\n\ndef get_data(\n df,\n exp_number,\n d=slice(None),\n approx_type=slice(None),\n kernel_type=slice(None),\n l_minus_l_plus=slice(None),\n):\n \"\"\"\n Extract the relevant data points given the dataframe results.pkl for the different experiments.\n inputs: df: dataframe\n exp_number: integer between 1 and 12\n d: integer between 1 and 2\n approx_types: \"permutation\" or \"wild bootstrap\"\n kernel_type: \"gaussian\" or \"laplace\"\n l_minus_l_plus: tuple of the form (l_minus,l_plus)\n output: array consisting of relevant data points for the specific experiment\n \"\"\"\n exps = [\n \"1: uniform alternative\",\n \"2: mnist alternative\",\n \"3: uniform alternative\",\n \"4: mnist alternative\",\n \"5: uniform null\",\n \"6: mnist null\",\n \"7: uniform alternative\",\n \"8: mnist alternative\",\n \"9: uniform alternative\",\n \"10: mnist alternative\",\n \"11: uniform alternative\",\n \"12: mnist alternative\",\n ]\n exp = exps[exp_number - 1]\n function_type = slice(None)\n if exp_number in [1, 2]:\n function_type = [\"uniform\", \"increasing\", \"decreasing\", \"centred\", \"ost\"]\n List = [\n exp,\n d,\n slice(None),\n slice(None),\n slice(None),\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n ]\n mutate(List)\n return df.loc[tuple(List)].unstack().to_numpy()\n elif exp_number in [3, 4]:\n function_type = [\"median\", \"split\", \"split (doubled sample sizes)\"]\n List = [\n exp,\n d,\n slice(None),\n slice(None),\n slice(None),\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n ]\n mutate(List)\n return df.loc[tuple(List)].unstack().to_numpy()\n elif exp_number in [7, 8]:\n List = [\n exp,\n d,\n slice(None),\n slice(None),\n slice(None),\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n ]\n mutate(List)\n return (\n df.loc[tuple(List)].swaplevel(\"n\", \"perturbation_or_Qi\").unstack().to_numpy()\n )\n elif exp_number in [9, 10]:\n if l_minus_l_plus != slice(None):\n raise ValueError(\n \"l_minus_l_plus should not be specified for exp_number = 9 or 10.\"\n )\n List = [\n exp,\n d,\n slice(None),\n slice(None),\n slice(None),\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n ]\n mutate(List)\n return (\n df.loc[tuple(List)]\n .swaplevel(\"perturbation_or_Qi\", \"l_minus_l_plus\")\n .unstack()\n .to_numpy()\n )\n elif exp_number in [11, 12]:\n if approx_type != slice(None):\n raise ValueError(\n \"approx_type should either not be specified for exp_number = 11 or 12.\"\n )\n List = [\n exp,\n d,\n slice(None),\n slice(None),\n slice(None),\n approx_type,\n kernel_type,\n l_minus_l_plus,\n function_type,\n ]\n mutate(List)\n return df.loc[tuple(List)].unstack().to_numpy()\n else:\n raise ValueError(\n \"exp_number should be an integer between 1 and 12 (excluding 5 and 6).\"\n )\n\n\ndef table_1(df):\n \"\"\"\n Extract data relevant to constructing the table labelled Table 1 in our paper.\n input: df: dataframe\n output: dataframe entries corresponding to Table 1.\n \"\"\"\n exps = [\n \"1: uniform alternative\",\n \"2: mnist alternative\",\n \"3: uniform alternative\",\n \"4: mnist alternative\",\n \"5: uniform null\",\n \"6: mnist null\",\n \"7: uniform alternative\",\n \"8: uniform mnist\",\n \"9: uniform alternative\",\n \"10: uniform mnist\",\n \"11: uniform alternative\",\n \"12: uniform mnist\",\n ]\n d = slice(None)\n approx_type = [\"wild bootstrap\", \"permutation\"]\n kernel_type = [\"gaussian\", \"laplace\"]\n l_minus_l_plus = slice(None)\n exp = [exps[5 - 1], exps[6 - 1]]\n function_type = [\n \"uniform\",\n \"centred\",\n \"increasing\",\n \"decreasing\",\n \"median\",\n \"split\",\n \"ost\",\n ]\n List = [\n exp,\n slice(None),\n d,\n slice(None),\n slice(None),\n slice(None),\n kernel_type,\n approx_type,\n slice(None),\n function_type,\n ]\n mutate(List)\n columns = [(\"power\", function_type[i]) for i in range(len(function_type))]\n levels_order = [\n \"experiment\",\n \"perturbation_or_Qi\",\n \"d\",\n \"repetitions\",\n \"m\",\n \"n\",\n \"kernel_type\",\n \"approx_type\",\n \"l_minus_l_plus\",\n \"function_type\",\n ]\n return (\n df.reorder_levels(levels_order)\n .loc[tuple(List)]\n .reindex(index=[\"wild bootstrap\", \"permutation\"], level=7)\n .unstack()[columns]\n )\n\napprox_types = [\"wild bootstrap\", \"permutation\"]\nkernel_types = [\"gaussian\", \"laplace\"]\n\n# Table 1\ndfi.export(table_1(results_df), user_or_paper + \"figures/table_1.png\")\n\n# Figure 1\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(1, 2, figsize=(width, height - 1), sharey=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.1, hspace=0.45)\n\nplotting_functions.plot_fig_1(f, axs)\naxs[0].legend(\n fontsize=fs,\n ncol=2,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(1, -0.47),\n)\nf.savefig(user_or_paper + \"figures/figure_1.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 2\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(1, 3, figsize=(width, height - 1.2))\nf.tight_layout()\nf.subplots_adjust(wspace=0.15, hspace=0.25)\n\nplotting_functions.plot_fig_2(f, axs)\nf.savefig(user_or_paper + \"figures/figure_2.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 3\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(2, 3, figsize=(width, height + 2), sharey=True, sharex=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\nL = [(-6, -2), (-4, 0), (-2, 2)]\nd = 1\na = 0\nfor k, l in itertools.product(range(2), range(3)):\n idxy = (k, l)\n power_ms = get_data(\n results_df, 3, d=d, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )\n power = get_data(\n results_df,\n 1,\n d=d,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[l],\n )\n plotting_functions.plot_fig_3_4(*idxy, f, axs, power, power_ms, *L[l])\n\naxs[1, 1].legend(\n fontsize=fs,\n ncol=4,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.65),\n)\nf.savefig(user_or_paper + \"figures/figure_3.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 4\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(2, 3, figsize=(width, height + 2), sharey=True, sharex=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\nL = [(-6, -2), (-4, 0), (-2, 2)]\nd = 2\na = 0\nfor k, l in itertools.product(range(2), range(3)):\n idxy = (k, l)\n power_ms = get_data(\n results_df, 3, d=d, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )\n power = get_data(\n results_df,\n 1,\n d=d,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[l],\n )\n plotting_functions.plot_fig_3_4(*idxy, f, axs, power, power_ms, *L[l])\n\naxs[1, 1].legend(\n fontsize=fs,\n ncol=4,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.65),\n)\nf.savefig(user_or_paper + \"figures/figure_4.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 5\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(2, 3, figsize=(width, height + 2.2), sharey=True, sharex=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\nL = [[(8, 12), (10, 14), (12, 16)], [(10, 14), (12, 16), (14, 18)]]\na = 0\nfor k, l in itertools.product(range(2), range(3)):\n idxy = (k, l)\n power_ms = get_data(\n results_df, 4, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )\n power = get_data(\n results_df,\n 2,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[k][l],\n )\n plotting_functions.plot_fig_5(*idxy, f, axs, power, power_ms)\n\naxs[1, 1].legend(\n fontsize=fs,\n ncol=4,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.65),\n)\nf.savefig(user_or_paper + \"figures/figure_5.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 6\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(\n 2, 3, figsize=(width, height + 2), sharey=True, sharex=True\n) # ,figsize=(5.326,3.562)) ,sharey=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\na = 0\nfor k, e in itertools.product(range(2), range(3)):\n idxy = (k, e)\n if e == 2:\n power = get_data(\n results_df, 10, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )\n else:\n power = get_data(\n results_df,\n 9,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n )\n plotting_functions.plot_fig_6(*idxy, f, axs, power)\n\naxs[1, 1].legend(\n fontsize=fs,\n ncol=2,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.57),\n)\nf.savefig(user_or_paper + \"figures/figure_6.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 7\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(2, 3, figsize=(width, height + 2), sharey=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\nL = [(-4, 0), [(10, 14), (12, 16)]]\na = 0\nfor k, e in itertools.product(range(2), range(3)):\n idxy = (k, e)\n if e == 2:\n a = 0\n power_ms_wb = get_data(\n results_df, 4, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )[:-1]\n power_wb = get_data(\n results_df,\n 2,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[1][k],\n )\n a = 1\n power_ms_p = get_data(\n results_df, 4, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )[:-1]\n power_p = get_data(\n results_df,\n 2,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[1][k],\n )\n power_ms = power_ms_wb - power_ms_p\n power = power_wb - power_p\n else:\n a = 0\n power_ms_wb = get_data(\n results_df,\n 3,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n )[:-1]\n power_wb = get_data(\n results_df,\n 1,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[0],\n )\n a = 1\n power_ms_p = get_data(\n results_df,\n 3,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n )[:-1]\n power_p = get_data(\n results_df,\n 1,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[0],\n )\n power_ms = power_ms_wb - power_ms_p\n power = power_wb - power_p\n plotting_functions.plot_fig_7(*idxy, f, axs, power, power_ms)\n\naxs[1, 1].legend(\n fontsize=fs,\n ncol=3,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.67),\n)\nf.savefig(user_or_paper + \"figures/figure_7.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 8\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(2, 3, figsize=(width, height + 2), sharey=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\na = 1\nfor k, e in itertools.product(range(2), range(3)):\n idxy = (k, e)\n if e == 2:\n power = get_data(\n results_df, 8, approx_type=approx_types[a], kernel_type=kernel_types[k]\n )\n else:\n power = get_data(\n results_df,\n 7,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n )\n plotting_functions.plot_fig_8(*idxy, f, axs, power)\n\naxs[1, 1].legend(\n fontsize=fs,\n ncol=2,\n handleheight=0.5,\n labelspacing=0.05,\n columnspacing=0.6,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -0.81),\n)\nf.savefig(user_or_paper + \"figures/figure_8.png\", dpi=300, bbox_inches=\"tight\")\n\n# Figure 9\nmult = 3\nwidth = 433.62 / 72.27 * mult\nheight = width * (5 ** 0.5 - 1) / 2 * (2 / 3)\nf, axs = plt.subplots(2, 3, figsize=(width, height + 2), sharey=True)\nf.tight_layout()\nf.subplots_adjust(wspace=0.03, hspace=0.03)\n\nL = [(-4, 0), [(10, 14), (12, 16)]]\na = 0\nfor k, e in itertools.product(range(2), range(3)):\n idxy = (k, e)\n if e == 2:\n power = get_data(results_df, 12, kernel_type=kernel_types[k])\n power_u = get_data(\n results_df,\n 2,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[1][k],\n )[0]\n power_o = get_data(\n results_df,\n 2,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[1][k],\n )[4]\n else:\n power = get_data(results_df, 11, d=e + 1, kernel_type=kernel_types[k])\n power_u = get_data(\n results_df,\n 1,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[0],\n )[0]\n power_o = get_data(\n results_df,\n 1,\n d=e + 1,\n approx_type=approx_types[a],\n kernel_type=kernel_types[k],\n l_minus_l_plus=L[0],\n )[4]\n plotting_functions.plot_fig_9(*idxy, f, axs, power, power_u, power_o)\n\naxs[1, 0].legend(\n fontsize=fs,\n ncol=1,\n handleheight=0.5,\n labelspacing=0.05,\n handlelength=1,\n columnspacing=0.2,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -1.12),\n)\naxs[1, 1].legend(\n fontsize=fs,\n ncol=1,\n handleheight=0.5,\n labelspacing=0.05,\n handlelength=1,\n columnspacing=0.2,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -1.12),\n)\naxs[1, 2].legend(\n fontsize=fs,\n ncol=1,\n handleheight=0.5,\n labelspacing=0.05,\n handlelength=1,\n columnspacing=0.2,\n loc=\"lower center\",\n bbox_to_anchor=(0.5, -1.239),\n)\nf.savefig(user_or_paper + \"figures/figure_9.png\", dpi=300, bbox_inches=\"tight\")\n\nprint(\n \"Figures have been generated using \" + user_or_paper + \"raw/results.pkl\"\n \" and have been saved in \" + user_or_paper + \"figures.\"\n)\n", "id": "7999558", "language": "Python", "matching_score": 3.031010389328003, "max_stars_count": 10, "path": "figures.py" }, { "content": "import numpy as np\nfrom tests import mmdagg, mmd_median_test, mmd_split_test\nfrom ost import ost\nfrom sampling import f_theta_sampler\n\n\ndef sample_and_test_uniform(\n function_type, seed, kernel_type, approx_type, m, n, d, p, s, \n perturbation_multiplier, alpha, l_minus, l_plus, B1, B2, B3, bandwidth_multipliers\n): \n \"\"\"\n Sample from uniform and perturbed uniform density and run two-sample test.\n inputs: function_type: \"uniform\", \"increasing\", \"decreasing\", \"centred\", \"ost\", \n \"median\", \"split\" or \"split (doubled sample sizes)\"\n seed: integer random seed\n kernel_type: \"gaussian\" or \"laplace\": \n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n m: non-negative integer (sample size for uniform distribution)\n n: non-negative integer (sample size for perturbed uniform distribution)\n d: non-negative integer (dimension of samples)\n p: non-negative integer (number of permutations)\n s: positive number (smoothness parameter of Sobolev ball (Eq. (1))\n perturbation_multiplier: perturbation_multiplier: positive number (c_d in Eq. (17)) \n alpha: real number in (0,1) (level of the test)\n l_minus: integer (for collection of bandwidths Eq. (16) in our paper)\n l_plus: integer (for collection of bandwidths Eq. (16) in our paper)\n B1: number of simulated test statistics to estimate the quantiles\n B2: number of simulated test statistics to estimate the probability in Eq. (13) in our paper\n B3: number of iterations for the bisection method\n bandwidth_multipliers: array such that mmd_split_test function (used for \"split\" \n and \"split (doubled sample sizes)\") selects 'optimal' bandwidth from\n collection_bandwidths = [c*bandwidth_median for c in bandwidth_multipliers]\n output: result of test (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n if function_type == \"split (doubled sample sizes)\":\n m = 2 * m\n n = 2 * n\n rs = np.random.RandomState(seed)\n if p == 0:\n X = rs.uniform(0, 1, (m, d)) \n Y = rs.uniform(0, 1, (n, d)) \n else:\n h = 1/p\n X = f_theta_sampler(seed, seed, m, p, s, perturbation_multiplier, d)\n Y = rs.uniform(0, 1, (n, d))\n if function_type == \"median\":\n return mmd_median_test(\n seed, X, Y, alpha, kernel_type, approx_type, B1, bandwidth_multiplier=1\n )\n elif function_type in [\"split\", \"split (doubled sample sizes)\"]:\n return mmd_split_test(\n seed, X, Y, alpha, kernel_type, approx_type, B1, bandwidth_multipliers\n )\n elif function_type == \"ost\":\n return ost(seed, X, Y, alpha, kernel_type, l_minus, l_plus)\n elif function_type in [\"uniform\", \"increasing\", \"decreasing\", \"centred\"]:\n return mmdagg(\n seed, X, Y, alpha, kernel_type, approx_type, \n function_type, l_minus, l_plus, B1, B2, B3\n )\n else:\n raise ValueError(\n 'Undefined function_type: function_type should be \"median\", \"split\",' \n '\"split (doubled sample sizes)\", \"ost\", \"uniform\", \"increasing\", '\n '\"decreasing\" or \"centred\".'\n ) \n\n\ndef sample_and_test_mnist(\n P, Q, function_type, seed, kernel_type, approx_type, m, n, \n alpha, l_minus, l_plus, B1, B2, B3, bandwidth_multipliers\n): \n \"\"\"\n Sample from dataset P and dataset Q and run two-sample test.\n inputs: P: dataset of shape (number_points, dimension) from which to sample\n Q: dataset of shape (number_points, dimension) from which to sample\n function_type: \"uniform\", \"increasing\", \"decreasing\", \"centred\", \"ost\", \n \"median\", \"split\" or \"split (doubled sample sizes)\"\n seed: integer random seed\n kernel_type: \"gaussian\" or \"laplace\":\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n m: non-negative integer (sample size for uniform distribution)\n n: non-negative integer (sample size for perturbed uniform distribution)\n alpha: real number in (0,1) (level of the test)\n l_minus: integer (for collection of bandwidths Eq. (16) in our paper)\n l_plus: integer (for collection of bandwidths Eq. (16) in our paper)\n B1: number of simulated test statistics to estimate the quantiles\n B2: number of simulated test statistics to estimate the probability in Eq. (13) in our paper\n B3: number of iterations for the bisection method\n bandwidth_multipliers: array such that mmd_split_test function (used for \"split\" \n and \"split (doubled sample sizes)\") selects 'optimal' bandwidth from\n collection_bandwidths = [c*bandwidth for c in bandwidth_multipliers]\n output: result of test (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n rs = np.random.RandomState(seed)\n if function_type == \"split (doubled sample sizes)\":\n m = 2 * m\n n = 2 * n \n idx_X = rs.randint(len(P), size=m)\n X = P[idx_X, :]\n idx_Y = rs.randint(len(Q), size=n)\n Y = Q[idx_Y, :]\n if function_type == \"median\":\n return mmd_median_test(\n seed, X, Y, alpha, kernel_type, approx_type, B1, bandwidth_multiplier=1\n )\n elif function_type in [\"split\", \"split (doubled sample sizes)\"]:\n return mmd_split_test(\n seed, X, Y, alpha, kernel_type, approx_type, B1, bandwidth_multipliers\n )\n elif function_type == \"ost\":\n return ost(seed, X, Y, alpha, kernel_type, l_minus, l_plus)\n elif function_type in [\"uniform\", \"increasing\", \"decreasing\", \"centred\"]:\n return mmdagg(\n seed, X, Y, alpha, kernel_type, approx_type, \n function_type, l_minus, l_plus, B1, B2, B3\n )\n else:\n raise ValueError(\n 'Undefined function_type: function_type should be \"median\", \"split\",' \n '\"split (doubled sample sizes)\", \"ost\", \"uniform\", \"increasing\", '\n '\"decreasing\" or \"centred\".'\n ) \n", "id": "12591691", "language": "Python", "matching_score": 2.9798521995544434, "max_stars_count": 10, "path": "sample_test.py" }, { "content": "import numpy as np\nimport itertools\n\n\ndef G(x):\n \"\"\"\n Function G defined in Section 5.4 of our paper.\n input: x: real number\n output: G(x): real number\n \"\"\"\n if -1 < x and x < -0.5:\n return np.exp(-1 / (1 - (4 * x + 3) ** 2))\n if -0.5 < x and x < 0:\n return - np.exp(-1 / ( 1 - (4 * x + 1) ** 2)) \n return 0 \n \n\ndef f_theta(x, p, s, perturbation_multiplier=1, seed=None):\n \"\"\"\n Function f_theta defined in in Section 5.4 (Eq. (17)) of our paper.\n inputs: x: (d,) array (point in R^d)\n p: non-negative integer (number of permutations)\n s: positive number (smoothness parameter of Sobolev ball (Eq. (1))\n perturbation_multiplier: positive number (c_d in Eq. (17))\n seed: integer random seed (samples theta in Eq. (17))\n output: real number f_theta(x) \n \"\"\"\n x = np.atleast_1d(x)\n d = x.shape[0]\n assert perturbation_multiplier * p ** (-s) * np.exp(-d) <= 1, \"density is negative\"\n np.random.seed(seed)\n theta = np.random.choice([-1,1], p ** d)\n output = 0\n I = list(itertools.product([i+1 for i in range(p)], repeat=d)) # set {1,...,p}^d\n for i in range(len(I)):\n j = I[i]\n output += theta[i] * np.prod([G(x[r] * p - j[r]) for r in range(d)])\n output *= p ** (-s) * perturbation_multiplier\n if np.min(x) >= 0 and np.max(x) <= 1:\n output += 1\n np.random.seed(None)\n return output\n \n\ndef f0(x):\n \"\"\"\n Probability density function of multi-dimensional uniform distribution.\n input: array\n output: probability density function evaluated at the input\n \"\"\"\n output = 0\n if np.min(x) >= 0 and np.max(x) <= 1:\n output += 1\n return output\n\n\ndef rejection_sampler(seed, density, d, density_max, number_samples, x_min, x_max):\n \"\"\"\n Sample from density using a rejection sampler.\n inputs: seed: integer random seed\n density: probability density function\n d: dimension of input of the density\n density_max: maximum of the density\n number_samples: number of samples\n x_min: density is 0 on (-\\infty,x_min)^d\n x_max: density is 0 on (x_max,\\infty)^d\n output: number_samples samples from density sampled from [x_min, x_max]^d\n \"\"\"\n samples = []\n count = 0\n rs = np.random.RandomState(seed)\n while count < number_samples:\n x = rs.uniform(x_min, x_max, d)\n y = rs.uniform(0, density_max)\n if y <= density(x):\n count += 1\n samples.append(x)\n return np.array(samples)\n \n\ndef f_theta_sampler(\n f_theta_seed, sampling_seed, number_samples, p, s, perturbation_multiplier, d\n):\n \"\"\"\n Sample from the probability density function f_theta.\n inputs: f_theta_seed: integer random seed for f_theta\n sampling_seed: integer random seed for rejection sampler\n number_samples: number of samples\n p: non-negative integer (number of permutations)\n s: positive number (smoothness parameter of Sobolev ball (Eq. (1))\n perturbation_multiplier: positive number (c_d in Eq. (17)) \n non-negative integer (dimension of input of density)\n output: number_samples samples from f_theta\n \"\"\"\n density_max = 1 + perturbation_multiplier * p ** (-s) * np.exp(-d) # maximum of f_theta\n return rejection_sampler(\n sampling_seed, \n lambda x: f_theta(x, p, s, perturbation_multiplier, f_theta_seed), \n d, \n density_max, \n number_samples, \n 0, \n 1,\n )\n", "id": "662465", "language": "Python", "matching_score": 0.42273664474487305, "max_stars_count": 10, "path": "sampling.py" }, { "content": "\"\"\"\nRun Gamma distribution experiment using data \nfrom the directory parametric/Gamma\nas proposed in Section 4.3 of our paper\nKSD Aggregated Goodness-of-fit Test\n<NAME>, <NAME>, <NAME>\nhttps://arxiv.org/pdf/2202.00824.pdf \nResults are saved as dataframes in the directory results/.\n\"\"\"\n\nfrom kernel import stein_kernel_matrices, ratio_ksd_stdev\nfrom ksd_single import ksd_parametric\nfrom ksdagg import ksdagg_parametric\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport time\nimport argparse\n\n\ndef score_gamma(x, k, theta):\n return (k - 1) / x - 1 / theta\n\n\n# create results directory if it does not exist\nPath(\"results\").mkdir(exist_ok=True, parents=True)\n\n# panda dataframe: lists of indices and entries\nindex_vals = []\nresults = []\n\ntest_names = (\n \"ksdagg\",\n \"median\",\n \"split\",\n \"split_extra_data\",\n)\n\n# run all the experiments\nt = time.time()\nverbose = True\nweights_type = \"uniform\"\nkernel_type = \"imq\"\nbeta_imq = 0.5\n# B1 = 500 as in B1_parametric\n# B2 = 500 as in B2_parametric\nB3 = 50\nl_minus = 0\nl_plus = 10\nalpha = 0.05\nnumber_samples = 500\nperturbations = [0, 0.1, 0.2, 0.3, 0.4]\nrepetitions = 200\nfor s in range(len(perturbations)):\n perturbation = perturbations[s]\n k_p = 5\n k_q = k_p + perturbation\n theta_p = 5\n theta_q = theta_p\n rs = np.random.RandomState(s + 10)\n X_rep = rs.gamma(k_q, theta_q, (repetitions + 1, number_samples, 1))\n score_X_rep = score_gamma(X_rep, k_p, theta_p)\n B_parametric = np.load(\n \"parametric/Gamma/B_parametric\" + str(number_samples) + \".npy\"\n )\n B1_parametric = np.load(\n \"parametric/Gamma/B1_parametric\" + str(number_samples) + \".npy\"\n )\n B1_parametric_split = np.load(\n \"parametric/Gamma/B1_parametric_split\" + str(number_samples) + \".npy\"\n )\n B2_parametric = np.load(\n \"parametric/Gamma/B2_parametric\" + str(number_samples) + \".npy\"\n )\n median_bandwidth = np.load(\n \"parametric/Gamma/bandwidth\" + str(number_samples) + \".npy\"\n )\n if verbose:\n print(\" \")\n print(\"Starting s =\", s + 1, \"/\", len(perturbations))\n print(\"perturbation\", perturbation)\n print(\"bandwidth\", median_bandwidth)\n ksdagg_results = np.zeros(repetitions)\n median_results = np.zeros(repetitions)\n split_results = np.zeros(repetitions)\n split_extra_data_results = np.zeros(repetitions)\n for r in range(repetitions):\n X = X_rep[r]\n score_X = score_X_rep[r]\n X_extra = X_rep[r + 1]\n score_X_extra = score_X_rep[r + 1]\n\n # KSDAgg\n ksdagg_results[r] = ksdagg_parametric(\n X,\n score_X,\n alpha,\n beta_imq,\n kernel_type,\n weights_type,\n l_minus,\n l_plus,\n median_bandwidth,\n B1_parametric,\n B2_parametric,\n B3,\n )\n\n # Median\n median_results[r] = ksd_parametric(\n X, score_X, alpha, beta_imq, kernel_type, median_bandwidth, B_parametric\n )\n\n # Stein kernel matrices\n bandwidths_collection = np.array(\n [2**i * median_bandwidth for i in range(l_minus, l_plus + 1)]\n )\n stein_kernel_matrices_list = stein_kernel_matrices(\n X,\n score_X,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n stein_kernel_matrices_list_extra_data = stein_kernel_matrices(\n X_extra,\n score_X_extra,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n\n # Split\n split_size = int(number_samples // 2)\n ratio_values = []\n for i in range(len(stein_kernel_matrices_list)):\n H = stein_kernel_matrices_list[i][:split_size, :split_size]\n ratio_values.append(ratio_ksd_stdev(H))\n selected_bandwidth = bandwidths_collection[np.argmax(ratio_values)]\n split_results[r] = ksd_parametric(\n X[split_size:],\n score_X[split_size:],\n alpha,\n beta_imq,\n kernel_type,\n selected_bandwidth,\n B1_parametric_split[np.argmax(ratio_values)],\n )\n\n # Split extra data\n ratio_values = []\n for i in range(len(stein_kernel_matrices_list_extra_data)):\n H_extra = stein_kernel_matrices_list_extra_data[i]\n ratio_values.append(ratio_ksd_stdev(H_extra))\n selected_bandwidth = bandwidths_collection[np.argmax(ratio_values)]\n split_extra_data_results[r] = ksd_parametric(\n X,\n score_X,\n alpha,\n beta_imq,\n kernel_type,\n selected_bandwidth,\n B1_parametric[np.argmax(ratio_values)],\n )\n\n if (r + 1) % 10 == 0 and verbose:\n print(\n \"Step s =\",\n s + 1,\n \"/\",\n len(perturbations),\n \",\",\n r + 1,\n \"/\",\n repetitions,\n \"time:\",\n time.time() - t,\n )\n t = time.time()\n power_level = (\n np.mean(ksdagg_results),\n np.mean(median_results),\n np.mean(split_results),\n np.mean(split_extra_data_results),\n )\n if verbose:\n for i in range(len(power_level)):\n print(perturbation, test_names[i], power_level[i])\n\n for i in range(len(power_level)):\n index_vals.append((perturbation, test_names[i]))\n results.append(power_level[i])\n\n# save panda dataframe\nindex_names = (\n \"perturbation\",\n \"test\",\n)\nindex = pd.MultiIndex.from_tuples(index_vals, names=index_names)\nresults_df = pd.Series(results, index=index).to_frame(\"power/level\")\nresults_df.reset_index().to_csv(\"results/results_gamma.csv\")\nresults_df.to_pickle(\"results/results_gamma.pkl\")\n\nif verbose:\n print(\"Dataframes for Gamma experiment have been saved in results/.\")\n", "id": "6841956", "language": "Python", "matching_score": 6.324124813079834, "max_stars_count": 2, "path": "experiment_gamma.py" }, { "content": "\"\"\"\nRun Gaussian-Bernoulli Restricted Boltzmann Machine experiment \nusing data from the directories data/RBM/ and parametric/RBM\nas proposed in Section 4.4 of our paper\nKSD Aggregated Goodness-of-fit Test\n<NAME>, <NAME>, <NAME>\nhttps://arxiv.org/pdf/2202.00824.pdf\nResults are saved as dataframes in the directory results/.\n\"\"\"\n\nfrom kernel import stein_kernel_matrices, ratio_ksd_stdev\nfrom ksd_single import ksd_parametric\nfrom ksdagg import ksdagg_parametric\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport time\nimport argparse\n\n# create results directory if it does not exist\nPath(\"results\").mkdir(exist_ok=True, parents=True)\n\n# panda dataframe: lists of indices and entries\nindex_vals = []\nresults = []\n\ntest_names = (\n \"ksdagg\",\n \"median\",\n \"split\",\n \"split_extra_data\",\n)\n\nfilenames = [\"rbm_s\" + str(s) for s in [0, 1, 2, 3, 4]]\n\n# run all the experiments\nt = time.time()\nverbose = True\nweights_type = \"uniform\"\nkernel_type = \"imq\"\nbeta_imq = 0.5\n# B1 = 500 as in B1_parametric\n# B2 = 500 as in B2_parametric\nB3 = 50\nd = 50\nl_minus = -20\nl_plus = 0\nalpha = 0.05\nnumber_samples = 1000\nrepetitions = 200\nfor f in range(len(filenames)):\n filename = filenames[f]\n rs = np.random.RandomState(0)\n X_rep = np.load(\"data/RBM/X_\" + filename + \".npy\").reshape(-1, d)\n score_X_rep = np.load(\"data/RBM/score_X_\" + filename + \".npy\").reshape(-1, d)\n B_parametric = np.load(\"parametric/RBM/B_parametric\" + str(number_samples) + \".npy\")\n B1_parametric = np.load(\n \"parametric/RBM/B1_parametric\" + str(number_samples) + \".npy\"\n )\n B1_parametric_split = np.load(\n \"parametric/RBM/B1_parametric_split\" + str(number_samples) + \".npy\"\n )\n B2_parametric = np.load(\n \"parametric/RBM/B2_parametric\" + str(number_samples) + \".npy\"\n )\n median_bandwidth = np.load(\n \"parametric/RBM/bandwidth\" + str(number_samples) + \".npy\"\n )\n if verbose:\n print(\"Starting f =\", f + 1, \"/\", len(filenames))\n print(\"bandwidth\", median_bandwidth)\n ksdagg_results = np.zeros(repetitions)\n median_results = np.zeros(repetitions)\n split_results = np.zeros(repetitions)\n split_extra_data_results = np.zeros(repetitions)\n for r in range(repetitions):\n indices = rs.choice(X_rep.shape[0] - 1, size=number_samples, replace=False)\n X = X_rep[indices]\n score_X = score_X_rep[indices]\n X_extra = X_rep[indices + 1]\n score_X_extra = score_X_rep[indices + 1]\n\n # KSDAgg\n ksdagg_results[r] = ksdagg_parametric(\n X,\n score_X,\n alpha,\n beta_imq,\n kernel_type,\n weights_type,\n l_minus,\n l_plus,\n median_bandwidth,\n B1_parametric,\n B2_parametric,\n B3,\n )\n\n # Median\n median_results[r] = ksd_parametric(\n X, score_X, alpha, beta_imq, kernel_type, median_bandwidth, B_parametric\n )\n\n # Stein kernel matrices\n bandwidths_collection = np.array(\n [2**i * median_bandwidth for i in range(l_minus, l_plus + 1)]\n )\n stein_kernel_matrices_list = stein_kernel_matrices(\n X,\n score_X,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n stein_kernel_matrices_list_extra_data = stein_kernel_matrices(\n X_extra,\n score_X_extra,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n\n # Split\n split_size = int(number_samples // 2)\n ratio_values = []\n for i in range(len(stein_kernel_matrices_list)):\n H = stein_kernel_matrices_list[i][:split_size, :split_size]\n ratio_values.append(ratio_ksd_stdev(H))\n selected_bandwidth = bandwidths_collection[np.argmax(ratio_values)]\n split_results[r] = ksd_parametric(\n X[split_size:],\n score_X[split_size:],\n alpha,\n beta_imq,\n kernel_type,\n selected_bandwidth,\n B1_parametric_split[np.argmax(ratio_values)],\n )\n\n # Split extra data\n ratio_values = []\n for i in range(len(stein_kernel_matrices_list_extra_data)):\n H_extra = stein_kernel_matrices_list_extra_data[i]\n ratio_values.append(ratio_ksd_stdev(H_extra))\n selected_bandwidth = bandwidths_collection[np.argmax(ratio_values)]\n split_extra_data_results[r] = ksd_parametric(\n X,\n score_X,\n alpha,\n beta_imq,\n kernel_type,\n selected_bandwidth,\n B1_parametric[np.argmax(ratio_values)],\n )\n\n if (r + 1) % 10 == 0 and verbose:\n print(\n \"Step f =\",\n f + 1,\n \"/\",\n len(filenames),\n \",\",\n r + 1,\n \"/\",\n repetitions,\n \"time:\",\n time.time() - t,\n )\n t = time.time()\n power_level = (\n np.mean(ksdagg_results),\n np.mean(median_results),\n np.mean(split_results),\n np.mean(split_extra_data_results),\n )\n if verbose:\n for i in range(len(power_level)):\n print(f, test_names[i], power_level[i])\n\n for i in range(len(power_level)):\n index_vals.append((f, test_names[i]))\n results.append(power_level[i])\n\n# save panda dataframe\nindex_names = (\n \"noise\",\n \"test\",\n)\nindex = pd.MultiIndex.from_tuples(index_vals, names=index_names)\nresults_df = pd.Series(results, index=index).to_frame(\"power/level\")\nresults_df.reset_index().to_csv(\"results/results_RBM.csv\")\nresults_df.to_pickle(\"results/results_RBM.pkl\")\n\nif verbose:\n print(\"Dataframes for RBM experiment have been saved in results/.\")\n", "id": "8573801", "language": "Python", "matching_score": 4.1945109367370605, "max_stars_count": 2, "path": "experiment_rbm.py" }, { "content": "from kernel import stein_kernel_matrices, compute_median_bandwidth, compute_ksd\nimport numpy as np\n\n\ndef ksdagg_wild(\n seed,\n X,\n score_X,\n alpha,\n beta_imq,\n kernel_type,\n weights_type,\n l_minus,\n l_plus,\n B1,\n B2,\n B3,\n):\n \"\"\"\n Compute KSDAgg using a wild bootstrap using bandwidths\n 2 ** i * median_bandwidth for i = l_minus,...,l_plus.\n inputs: seed: non-negative integer\n X: (m,d) array of samples (m d-dimensional points)\n score_X: (m,d) array of score values for X\n alpha: real number in (0,1) (level of the test)\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n kernel_type: \"imq\"\n weights_type: \"uniform\", \"decreasing\", \"increasing\" or \"centred\"\n see Section 5.1 of MMD Aggregated Two-Sample Test (Schrab et al., 2021)\n l_minus: integer for bandwidth collection\n l_plus: integer for bandwidth collection\n B1: number of simulated test statistics to estimate the quantiles\n B2: number of simulated test statistics to estimate the level\n B3: number of iterations for the bisection method\n output: result of KSDAgg (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n m = X.shape[0]\n assert m >= 2\n assert 0 < alpha and alpha < 1\n assert l_minus <= l_plus\n median_bandwidth = compute_median_bandwidth(seed, X)\n bandwidths_collection = np.array(\n [2**i * median_bandwidth for i in range(l_minus, l_plus + 1)]\n )\n N = 1 + l_plus - l_minus\n weights = create_weights(N, weights_type)\n stein_kernel_matrices_list = stein_kernel_matrices(\n X, score_X, kernel_type, bandwidths_collection, beta_imq\n )\n return ksdagg_wild_custom(\n seed,\n stein_kernel_matrices_list,\n weights,\n alpha,\n B1,\n B2,\n B3,\n )\n\n\ndef ksdagg_wild_custom(seed, stein_kernel_matrices_list, weights, alpha, B1, B2, B3):\n \"\"\"\n Compute KSDAgg using a wild bootstrap with custom kernel matrices and weights.\n inputs: seed: non-negative integer\n stein_kernel_matrices_list: list of N stein kernel matrices\n weights: (N,) array consisting of positive entries summing to 1\n alpha: real number in (0,1) (level of the test)\n B1: number of simulated test statistics to estimate the quantiles\n B2: number of simulated test statistics to estimate the level\n B3: number of iterations for the bisection method\n output: result of KSDAgg (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n m = stein_kernel_matrices_list[0].shape[0]\n N = len(stein_kernel_matrices_list)\n assert len(stein_kernel_matrices_list) == weights.shape[0]\n assert m >= 2\n assert 0 < alpha and alpha < 1\n\n # Step 1: compute all simulated KSD estimates efficiently\n M = np.zeros((N, B1 + B2 + 1))\n rs = np.random.RandomState(seed)\n R = rs.choice([1.0, -1.0], size=(B1 + B2 + 1, m)) # (B1+B2+1, m) Rademacher\n R[B1] = np.ones(m)\n R = R.transpose() # (m, B1+B2+1)\n for i in range(N):\n H = stein_kernel_matrices_list[i]\n np.fill_diagonal(H, 0)\n # (B1+B2+1, ) wild bootstrap KSD estimates\n M[i] = np.sum(R * (H @ R), 0) / (m * (m - 1))\n KSD_original = M[:, B1]\n M1_sorted = np.sort(M[:, :B1]) # (N, B1)\n M2 = M[:, B1 + 1 :] # (N, B2)\n\n # Step 2: compute u_alpha using the bisection method\n quantiles = np.zeros((N, 1)) # (1-u*w_lambda)-quantiles for the N bandwidths\n u_min = 0\n u_max = np.min(1 / weights)\n for _ in range(B3):\n u = (u_max + u_min) / 2\n for i in range(N):\n quantiles[i] = M1_sorted[\n i, int(np.ceil(B1 * (1 - u * weights[i]))) - 1\n ]\n P_u = np.sum(np.max(M2 - quantiles, 0) > 0) / B2\n if P_u <= alpha:\n u_min = u\n else:\n u_max = u\n u = u_min\n\n # Step 3: output test result\n for i in range(N):\n if KSD_original[i] > M1_sorted[i, int(np.ceil(B1 * (1 - u * weights[i]))) - 1]:\n return 1\n return 0\n\n\ndef ksdagg_parametric(\n X,\n score_X,\n alpha,\n beta_imq,\n kernel_type,\n weights_type,\n l_minus,\n l_plus,\n bandwidth_reference,\n B1_parametric,\n B2_parametric,\n B3,\n):\n \"\"\"\n Compute KSDAgg using a parametric bootstrap using bandwidths\n 2 ** i * median_bandwidth for i = l_minus,...,l_plus.\n inputs: seed: non-negative integer\n X: (m,d) array of samples (m d-dimensional points)\n score_X: (m,d) array of score values for X\n alpha: real number in (0,1) (level of the test)\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n kernel_type: \"imq\"\n weights_type: \"uniform\", \"decreasing\", \"increasing\" or \"centred\"\n see Section 5.1 of MMD Aggregated Two-Sample Test (Schrab et al., 2021)\n l_minus: integer for bandwidth collection\n l_plus: integer for bandwidth collection\n bandwidth_reference: non-negative number\n (if 0 then median bandwidth is computed)\n B1_parametric: (N, B1) array of ksd values computed with N bandwidths\n using samples from the model\n B2_parametric: (N, B2) array of ksd values computed with N bandwidths\n using samples from the model\n B3: number of iterations for the bisection method\n output: result of KSDAgg (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n assert bandwidth_reference >= 0\n if bandwidth_reference == 0:\n bandwidth_reference = compute_median_bandwidth(seed=0, X=X)\n bandwidths_collection = np.array(\n [2**i * bandwidth_reference for i in range(l_minus, l_plus + 1)]\n )\n N = bandwidths_collection.shape[0] # N = 1 + l_plus - l_minus\n weights = create_weights(N, weights_type)\n ksd_values = compute_ksd(\n X,\n score_X,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n return ksdagg_parametric_custom(\n ksd_values,\n alpha,\n weights,\n B1_parametric,\n B2_parametric,\n B3,\n )\n\n\ndef ksdagg_parametric_custom(\n ksd_values,\n alpha,\n weights,\n B1_parametric,\n B2_parametric,\n B3,\n):\n \"\"\"\n Compute KSDAgg using a parametric bootstrap with custom kernel matrices and weights.\n inputs: ksd_values: (N,) array consisting of KSD values\n for N bandwidths for inputs X and score_X\n alpha: real number in (0,1) (level of the test)\n weights: (N,) array consisting of positive entries summing to 1\n B1_parametric: (N, B1) array of ksd values computed with N bandwidths\n using samples from the model\n B2_parametric: (N, B2) array of ksd values computed with N bandwidths\n using samples from the model\n B3: number of iterations for the bisection method\n output: result of KSDAgg (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n B1 = B1_parametric.shape[1]\n B2 = B2_parametric.shape[1]\n N = ksd_values.shape[0]\n quantiles = np.zeros((N, 1)) # (1-u*w_lambda)-quantiles for the N bandwidths\n u_min = 0\n u_max = np.min(1 / weights)\n for _ in range(B3):\n u = (u_max + u_min) / 2\n for i in range(N):\n quantiles[i] = B1_parametric[i, int(np.ceil(B1 * (1 - u * weights[i]))) - 1]\n P_u = np.mean(np.max(B2_parametric - quantiles, 0) > 0)\n if P_u <= alpha:\n u_min = u\n else:\n u_max = u\n u = u_min\n for i in range(N):\n if (\n ksd_values[i]\n > B1_parametric[i, int(np.ceil(B1 * (1 - u * weights[i]))) - 1]\n ):\n return 1\n return 0\n\n\ndef create_weights(N, weights_type):\n \"\"\"\n Create weights as in Section 5.1 of MMD Aggregated Two-Sample Test (Schrab et al., 2021). \n inputs: N: number of bandwidths to test\n weights_type: \"uniform\" or \"decreasing\" or \"increasing\" or \"centred\"\n output: (N,) array of weights\n \"\"\"\n if weights_type == \"uniform\":\n weights = np.array(\n [\n 1 / N,\n ]\n * N\n )\n elif weights_type == \"decreasing\":\n normaliser = sum([1 / i for i in range(1, N + 1)])\n weights = np.array([1 / (i * normaliser) for i in range(1, N + 1)])\n elif weights_type == \"increasing\":\n normaliser = sum([1 / i for i in range(1, N + 1)])\n weights = np.array([1 / ((N + 1 - i) * normaliser) for i in range(1, N + 1)])\n elif weights_type == \"centred\":\n if N % 2 == 1:\n normaliser = sum([1 / (abs((N + 1) / 2 - i) + 1) for i in range(1, N + 1)])\n weights = np.array(\n [1 / ((abs((N + 1) / 2 - i) + 1) * normaliser) for i in range(1, N + 1)]\n )\n else:\n normaliser = sum(\n [1 / (abs((N + 1) / 2 - i) + 0.5) for i in range(1, N + 1)]\n )\n weights = np.array(\n [\n 1 / ((abs((N + 1) / 2 - i) + 0.5) * normaliser)\n for i in range(1, N + 1)\n ]\n )\n else:\n raise ValueError(\n 'The value of weights_type should be \"uniform\" or'\n '\"decreasing\" or \"increasing\" or \"centred\".'\n )\n return weights\n", "id": "5866113", "language": "Python", "matching_score": 4.84526252746582, "max_stars_count": 0, "path": "ksdagg.py" }, { "content": "from kernel import stein_kernel_matrices, compute_median_bandwidth, compute_ksd\nimport numpy as np\n\n\ndef ksd_median_wild(\n seed, X, score_X, alpha, beta_imq, kernel_type, B1, bandwidth_multiplier=1\n):\n \"\"\"\n Compute KSD test using a wild bootstrap with the median heuristic as\n kernel bandwidth multiplied by bandwidth_multiplier.\n This KSD test has been proposed by\n <NAME>, <NAME> and <NAME>\n A Kernel Test of Goodness of Fit\n ICML 2016\n http://proceedings.mlr.press/v48/chwialkowski16.pdf\n inputs: seed: non-negative integer\n X: (m,d) array of samples (m d-dimensional points)\n score_X: (m,d) array of score values for X\n alpha: real number in (0,1) (level of the test)\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n kernel_type: \"imq\"\n B1: number of simulated test statistics to estimate the quantiles\n bandwidth_multiplier: multiplicative factor for the median bandwidth\n output: result of KSD test using the median heuristic as kernel bandwidth\n (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n m = X.shape[0]\n assert m >= 2\n assert 0 < alpha and alpha < 1\n median_bandwidth = compute_median_bandwidth(seed, X)\n H = stein_kernel_matrices(\n X,\n score_X,\n kernel_type,\n np.array([median_bandwidth * bandwidth_multiplier]),\n beta_imq,\n )[0]\n return ksd_wild_custom(\n seed,\n H,\n alpha,\n B1,\n )\n\n\ndef ksd_wild_custom(seed, H, alpha, B1):\n \"\"\"\n Compute KSD test using a wild bootstrap with custom kernel matrix.\n inputs: seed: non-negative integer\n H: stein kernel matrix\n alpha: real number in (0,1) (level of the test)\n B1: number of simulated test statistics to estimate the quantiles\n output: result of KSD test (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n m = H.shape[0]\n np.fill_diagonal(H, 0)\n rs = np.random.RandomState(seed)\n R = rs.choice([1.0, -1.0], size=(B1 + 1, m)) # (B1+1, m) Rademacher\n R[B1] = np.ones(m)\n R = R.transpose() # (m, B1+1)\n M1 = np.sum(R * (H @ R), 0) / (m * (m - 1))\n KSD_original = M1[B1]\n M1_sorted = np.sort(M1[:B1]) # (B1,)\n if KSD_original > M1_sorted[int(np.ceil(B1 * (1 - alpha))) - 1]:\n return 1\n return 0\n\n\ndef ksd_parametric(\n X, score_X, alpha, beta_imq, kernel_type, bandwidth_reference, B_parametric\n):\n \"\"\"\n Compute KSD test using a parametric bootstrap with a reference kernel bandwidth\n This KSD test has been proposed by\n <NAME>, <NAME> and <NAME>\n A Kernel Test of Goodness of Fit\n ICML 2016\n http://proceedings.mlr.press/v48/chwialkowski16.pdf\n inputs: seed: non-negative integer\n X: (m,d) array of samples (m d-dimensional points)\n score_X: (m,d) array of score values for X\n alpha: real number in (0,1) (level of the test)\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n kernel_type: \"imq\"\n bandwidth_reference: non-negative number\n (if 0 then median bandwidth is computed)\n B_parametric: (N, B) array of ksd values computed with\n the reference bandwidth using samples from the model\n output: result of KSD test using the reference bandwidth\n (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n ksd_value = compute_ksd(\n X,\n score_X,\n kernel_type,\n np.array([bandwidth_reference]),\n beta_imq,\n )[0]\n return ksd_parametric_custom(ksd_value, alpha, B_parametric)\n\n\ndef ksd_parametric_custom(ksd_value, alpha, B_parametric):\n \"\"\"\n Compute KSD test using a parametric bootstrap with kernel matrix\n inputs: ksd_values: (N,) array consisting of KSD values\n for N bandwidths for inputs X and score_X\n alpha: real number in (0,1) (level of the test)\n B_parametric: (N, B) array of ksd values computed with\n the reference bandwidth using samples from the model\n output: result of KSD test (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n B = B_parametric.shape[0]\n if ksd_value > B_parametric[int(np.ceil(B * (1 - alpha))) - 1]:\n return 1\n return 0\n", "id": "11326838", "language": "Python", "matching_score": 1.3549875020980835, "max_stars_count": 0, "path": "ksd_single.py" }, { "content": "from kernel import compute_ksd, compute_median_bandwidth\nfrom pathlib import Path\nimport numpy as np\nimport time\n\n\ndef generate_parametric(\n X_rep,\n score_X_rep,\n B,\n B1,\n B2,\n kernel_type,\n l_minus,\n l_plus,\n beta_imq,\n verbose=True,\n):\n \"\"\"\n Compute KSD values.\n inputs: X_rep: (r,m,d) array of r repetitions of m d-dimensional samples\n score_X: (r,m,d) array of score values for X\n B: positive integer\n B1: positive integer\n B2: positive integer\n kernel_type: \"imq\"\n l_minus: integer for bandwidth collection\n l_plus: integer for bandwidth collection\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n verbose: boolean (print statements)\n output: 4-tuple with ordered elements:\n B_parametric: (B,) array of KSD values computed with m samples\n B1_parametric: (N,B1) array of KSD values computed with m samples for N bandwidths\n B1_parametric_split: (N,B1) array of KSD values computed with m/2 samples for N bandwidths\n B2_parametric: (N,B2) array of KSD values computed with m samples for N bandwidths\n median_bandwidth: float\n \"\"\"\n assert X_rep.shape[0] >= B1 + B2\n m = X_rep.shape[1]\n\n # compute median bandwidth\n median_bandwidth = compute_median_bandwidth(0, X_rep[0])\n\n # define bandwidth_multipliers and weights\n bandwidth_multipliers = np.array([2**i for i in range(l_minus, l_plus + 1)])\n bandwidths_collection = np.array(\n [b * median_bandwidth for b in bandwidth_multipliers]\n )\n N = bandwidth_multipliers.shape[0] # N = 1 + l_plus - l_minus\n\n # B_parametric\n t = time.time()\n B_parametric = np.zeros((B,))\n for b in range(B):\n if (b + 1) % 25 == 0 and verbose:\n print(\"1 / 4,\", b + 1, \"/\", B, time.time() - t)\n t = time.time()\n B_parametric[b] = compute_ksd(\n X_rep[b],\n score_X_rep[b],\n kernel_type,\n np.array([median_bandwidth]),\n beta_imq,\n )\n B_parametric = np.sort(B_parametric.T)\n\n B1_parametric = np.zeros((B1, N))\n for b in range(B1):\n if (b + 1) % 25 == 0 and verbose:\n print(\"2 / 4,\", b + 1, \"/\", B1, time.time() - t)\n t = time.time()\n B1_parametric[b] = compute_ksd(\n X_rep[b], score_X_rep[b], kernel_type, bandwidths_collection, beta_imq\n )\n B1_parametric = np.sort(B1_parametric.T)\n\n B2_parametric = np.zeros((B2, N))\n for b in range(B2):\n if (b + 1) % 25 == 0 and verbose:\n print(\"3 / 4,\", b + 1, \"/\", B2, time.time() - t)\n t = time.time()\n B2_parametric[b] = compute_ksd(\n X_rep[B1 + b],\n score_X_rep[B1 + b],\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n B2_parametric = B2_parametric.T\n\n B1_parametric_split = np.zeros((B1, N))\n split_size = int(m // 2)\n for b in range(B1):\n if (b + 1) % 25 == 0 and verbose:\n print(\"4 / 4,\", b + 1, \"/\", B1, time.time() - t)\n t = time.time()\n B1_parametric_split[b] = compute_ksd(\n X_rep[b][:split_size],\n score_X_rep[b][:split_size],\n kernel_type,\n bandwidths_collection,\n beta_imq,\n )\n B1_parametric_split = np.sort(B1_parametric_split.T)\n\n return (\n B_parametric,\n B1_parametric,\n B1_parametric_split,\n B2_parametric,\n median_bandwidth,\n )\n\n\n# Gamma\ndef score_gamma(x, k, theta):\n \"\"\"\n Compute score function of one-dimensional Gamma distribution.\n inputs: x: real number at which the score function is evaluated\n k: positive number (shape parameter of Gamma distribution)\n theta: positive number (scale parameter of Gamma distribution)\n output: score\n \"\"\"\n return (k - 1) / x - 1 / theta\n\n\nl_minus = 0\nl_plus = 10\nrs = np.random.RandomState(0)\nnumber_samples = 500\nk_p = 5\ntheta_p = 5\nB = 500\nB1 = 500\nB2 = 500\nX_rep_param = rs.gamma(k_p, theta_p, (B1 + B2, number_samples, 1))\nscore_X_rep_param = score_gamma(X_rep_param, k_p, theta_p)\nkernel_type = \"imq\"\nbeta_imq = 0.5\n(\n B_parametric,\n B1_parametric,\n B1_parametric_split,\n B2_parametric,\n median_bandwidth,\n) = generate_parametric(\n X_rep_param,\n score_X_rep_param,\n B,\n B1,\n B2,\n kernel_type,\n l_minus,\n l_plus,\n beta_imq,\n verbose=True,\n)\nPath(\"parametric/Gamma\").mkdir(exist_ok=True, parents=True)\nnp.save(\"parametric/Gamma/B_parametric\" + str(number_samples) + \".npy\", B_parametric)\nnp.save(\"parametric/Gamma/B1_parametric\" + str(number_samples) + \".npy\", B1_parametric)\nnp.save(\n \"parametric/Gamma/B1_parametric_split\" + str(number_samples) + \".npy\",\n B1_parametric_split,\n)\nnp.save(\"parametric/Gamma/B2_parametric\" + str(number_samples) + \".npy\", B2_parametric)\nnp.save(\"parametric/Gamma/bandwidth\" + str(number_samples) + \".npy\", median_bandwidth)\nprint(\"Gamma parametric has been saved in parametric/Gamma/.\")\n\n\n# Gaussian-Bernoulli Restricted Boltzmann Machine\nl_minus = -20\nl_plus = 0\nrs = np.random.RandomState(0)\nnumber_samples = 1000\nd = 50\nX_rep_all = np.load(\"data/RBM/X_rbm_s0.npy\").reshape(-1, d)\nscore_X_rep_all = np.load(\"data/RBM/score_X_rbm_s0.npy\").reshape(-1, d)\nB = 500\nB1 = 500\nB2 = 500\nX_rep_param = np.zeros((B1 + B2, number_samples, d))\nscore_X_rep_param = np.zeros((B1 + B2, number_samples, d))\nfor i in range(B1 + B2):\n indices = rs.choice(X_rep_all.shape[0], size=number_samples, replace=False)\n X_rep_param[i] = X_rep_all[indices]\n score_X_rep_param[i] = score_X_rep_all[indices]\nkernel_type = \"imq\"\nbeta_imq = 0.5\n(\n B_parametric,\n B1_parametric,\n B1_parametric_split,\n B2_parametric,\n median_bandwidth,\n) = generate_parametric(\n X_rep_param,\n score_X_rep_param,\n B,\n B1,\n B2,\n kernel_type,\n l_minus,\n l_plus,\n beta_imq,\n verbose=True,\n)\nPath(\"parametric/RBM\").mkdir(exist_ok=True, parents=True)\nnp.save(\"parametric/RBM/B_parametric\" + str(number_samples) + \".npy\", B_parametric)\nnp.save(\"parametric/RBM/B1_parametric\" + str(number_samples) + \".npy\", B1_parametric)\nnp.save(\n \"parametric/RBM/B1_parametric_split\" + str(number_samples) + \".npy\",\n B1_parametric_split,\n)\nnp.save(\"parametric/RBM/B2_parametric\" + str(number_samples) + \".npy\", B2_parametric)\nnp.save(\"parametric/RBM/bandwidth\" + str(number_samples) + \".npy\", median_bandwidth)\nprint(\"RBM parametric has been saved in parametric/RBM/.\")\n\n\n# Normalizing Flow\nfor number_samples in [100, 200, 300, 400, 500]:\n l_minus = -20\n l_plus = 0\n rs = np.random.RandomState(0)\n d = 28**2\n X_rep_all = np.load(\"data/NF_MNIST/bootstrap/X_mnist_level.npy\").reshape(-1, d)\n score_X_rep_all = np.load(\n \"data/NF_MNIST/bootstrap/score_X_mnist_level.npy\"\n ).reshape(-1, d)\n B = 500\n B1 = 500\n B2 = 500\n B3 = 0\n X_rep_param = np.zeros((B1 + B2, number_samples, d))\n score_X_rep_param = np.zeros((B1 + B2, number_samples, d))\n for i in range(B1 + B2):\n indices = rs.choice(X_rep_all.shape[0], size=number_samples, replace=False)\n X_rep_param[i] = X_rep_all[indices]\n score_X_rep_param[i] = score_X_rep_all[indices]\n kernel_type = \"imq\"\n beta_imq = 0.5\n (\n B_parametric,\n B1_parametric,\n B1_parametric_split,\n B2_parametric,\n median_bandwidth,\n ) = generate_parametric(\n X_rep_param,\n score_X_rep_param,\n B,\n B1,\n B2,\n kernel_type,\n l_minus,\n l_plus,\n beta_imq,\n verbose=True,\n )\n Path(\"parametric/NF_MNIST\").mkdir(exist_ok=True, parents=True)\n np.save(\n \"parametric/NF_MNIST/B_parametric\" + str(number_samples) + \".npy\", B_parametric\n )\n np.save(\n \"parametric/NF_MNIST/B1_parametric\" + str(number_samples) + \".npy\",\n B1_parametric,\n )\n np.save(\n \"parametric/NF_MNIST/B1_parametric_split\" + str(number_samples) + \".npy\",\n B1_parametric_split,\n )\n np.save(\n \"parametric/NF_MNIST/B2_parametric\" + str(number_samples) + \".npy\",\n B2_parametric,\n )\n np.save(\n \"parametric/NF_MNIST/bandwidth\" + str(number_samples) + \".npy\", median_bandwidth\n )\nprint(\"NF MNIST parametric has been saved in parametric/NF_MNIST/.\")\n", "id": "12147283", "language": "Python", "matching_score": 2.2490732669830322, "max_stars_count": 0, "path": "generate_parametric.py" }, { "content": "import numpy as np\nimport scipy.spatial\n\n\ndef stein_kernel_matrices(\n X,\n score_X,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n):\n \"\"\"\n Compute Stein kernel matrices for several bandwidths.\n Function adapted from https://github.com/pierreablin/ksddescent/blob/main/ksddescent/kernels.py\n inputs: X: (m,d) array of samples (m d-dimensional points)\n score_X: (m,d) array of score values for X\n kernel_type: \"imq\"\n bandwidths_collection: (N,) array of bandwidths\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n outputs: list of N stein kernel matrices computed with the N bandwidths\n \"\"\"\n if kernel_type == \"imq\":\n p = X.shape[1]\n norms = np.sum(X**2, -1)\n dists = -2 * X @ X.T + np.expand_dims(norms, 1) + np.expand_dims(norms, 0)\n diffs = np.expand_dims(np.sum(X * score_X, -1), 1) - (X @ score_X.T)\n diffs = diffs + diffs.T\n output_list = []\n for bandwith in bandwidths_collection:\n g = 1 / bandwith**2\n res = 1 + g * dists\n kxy = res ** (-beta_imq)\n dkxy = 2 * beta_imq * g * (res) ** (-beta_imq - 1) * diffs\n d2kxy = 2 * (\n beta_imq * g * (res) ** (-beta_imq - 1) * p\n - 2\n * beta_imq\n * (beta_imq + 1)\n * g**2\n * dists\n * res ** (-beta_imq - 2)\n )\n h = score_X @ score_X.T * kxy + dkxy + d2kxy\n output_list.append(h)\n return output_list\n else:\n raise ValueError('The value of kernel_type should be \"imq\".')\n\n\ndef compute_ksd(\n X,\n score_X,\n kernel_type,\n bandwidths_collection,\n beta_imq,\n):\n \"\"\"\n Compute KSD values for several bandwidths.\n Function adapted from https://github.com/pierreablin/ksddescent/blob/main/ksddescent/kernels.py\n inputs: X: (m,d) array of samples (m d-dimensional points)\n score_X: (m,d) array of score values for X\n kernel_type: \"imq\"\n bandwidths_collection: (N,) array of bandwidths\n beta_imq: parameter beta in (0,1) for the IMQ kernel\n outputs: (N,) array of KSD values for the N bandwidths\n \"\"\"\n if kernel_type == \"imq\":\n p = X.shape[1]\n norms = np.sum(X**2, -1)\n dists = -2 * X @ X.T + np.expand_dims(norms, 1) + np.expand_dims(norms, 0)\n diffs = np.expand_dims(np.sum(X * score_X, -1), 1) - (X @ score_X.T)\n diffs = diffs + diffs.T\n output_list = []\n N = bandwidths_collection.shape[0]\n ksd_values = np.zeros((N,))\n for b in range(N):\n bandwidth = bandwidths_collection[b]\n g = 1 / bandwidth**2\n res = 1 + g * dists\n kxy = res ** (-beta_imq)\n dkxy = 2 * beta_imq * g * (res) ** (-beta_imq - 1) * diffs\n d2kxy = 2 * (\n beta_imq * g * (res) ** (-beta_imq - 1) * p\n - 2\n * beta_imq\n * (beta_imq + 1)\n * g**2\n * dists\n * res ** (-beta_imq - 2)\n )\n H = score_X @ score_X.T * kxy + dkxy + d2kxy\n np.fill_diagonal(H, 0)\n m = H.shape[0]\n r = np.ones(m)\n ksd_values[b] = r @ H @ r / (m * (m - 1))\n return ksd_values\n else:\n raise ValueError('The value of kernel_type should be \"imq\".')\n\n\ndef ratio_ksd_stdev(H, regulariser=10 ** (-8)):\n \"\"\"\n Compute the estimated ratio of the KSD to the asymptotic standard deviation under the alternative.\n The original MMD formulation is attributed to (Eq. 3):\n <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>\n Learning deep kernels for non-parametric two-sample tests\n International Conference on Machine Learning, 2020\n http://proceedings.mlr.press/v119/liu20m/liu20m.pdf\n inputs: H: (m, m) stein kernel matrix WITH diagonal entries\n (np.fill_diagonal(H, 0) has not been applied)\n regulariser: small positive number (we use 10**(-8) as done by Liu et al.)\n output: estimate of the ratio of KSD^2 and of the standard deviation under H_a\n warning: this function mutates H by applying np.fill_diagonal(H, 0)\n \"\"\"\n m = H.shape[0]\n\n # compute variance\n H_column_sum = np.sum(H, axis=1)\n\n var = (\n 4 / m**3 * np.sum(H_column_sum**2)\n - 4 / m**4 * np.sum(H_column_sum) ** 2\n + regulariser\n )\n # we should obtain var > 0, if var <= 0 then we discard the corresponding\n # bandwidth by returning a large negative value so that we do not select\n # the corresponding bandwidth when selecting the maximum of the outputs\n # of ratio_mmd_stdev for bandwidths in the collection\n if not var > 0:\n raise ValueError(\"Variance is negative. Try using a larger regulariser.\")\n # return -1e10\n\n # compute original KSD estimate\n np.fill_diagonal(H, 0)\n v = np.ones(m)\n ksd = v @ H @ v / (m * (m - 1))\n\n return ksd / np.sqrt(var)\n\n\ndef compute_median_bandwidth(seed, X, max_samples=1000, min_value=0.0001):\n \"\"\"\n Compute the median L^2-distance between all the points in X using at\n most max_samples samples and using a minimum threshold value min_value.\n inputs: seed: non-negative integer\n X: (m,d) array of samples\n max_samples: number of samples used to compute the median (int or None)\n output: median bandwidth (float)\n \"\"\"\n if max_samples != None:\n rs = np.random.RandomState(seed)\n pX = rs.choice(X.shape[0], min(max_samples, X.shape[0]), replace=False)\n median_bandwidth = np.median(scipy.spatial.distance.pdist(X[pX], \"euclidean\"))\n else:\n median_bandwidth = np.median(scipy.spatial.distance.pdist(X, \"euclidean\"))\n return np.maximum(median_bandwidth, min_value)\n", "id": "363057", "language": "Python", "matching_score": 3.5519912242889404, "max_stars_count": 0, "path": "kernel.py" }, { "content": "import numpy as np\nfrom numba import njit\n\n\ndef compute_median_bandwidth_subset(seed, X, Y, max_samples=2000, min_value = 0.0001):\n \"\"\"\n Compute the median distance in each dimension between all the points in X and Y\n using at most max_samples samples and using a threshold value min_value.\n inputs: seed: random seed\n X: (m,d) array of samples\n Y: (n,d) array of samples\n max_samples: number of samples used to compute the median (int or None)\n output: (d,) array: median of absolute difference in each component\n \"\"\"\n if max_samples != None:\n rs = np.random.RandomState(seed)\n pX = rs.choice(X.shape[0], min(max_samples // 2, X.shape[0]), replace=False)\n pY = rs.choice(Y.shape[0], min(max_samples // 2, Y.shape[0]), replace=False)\n Z = np.concatenate((X[pX], Y[pY]))\n else:\n Z = np.concatenate((X, Y))\n median_bandwidth = compute_median_bandwidth(Z)\n return np.maximum(median_bandwidth, min_value)\n \n\n@njit\ndef compute_median_bandwidth(Z):\n \"\"\"\n Compute the median distance in each dimension between all the points in Z.\n input: Z: (m+n,d) array of pooled samples \n output: (d,) array: median of absolute different in each component\n \"\"\"\n mn, d = Z.shape\n diff = np.zeros((d, int((mn ** 2 - mn) / 2)))\n output = np.zeros(d)\n for u in range(d):\n k = 0\n for i in range(mn - 1):\n for j in range(i + 1, mn):\n diff[u, k] = np.abs(Z[i, u] - Z[j, u])\n k += 1\n output[u] = np.median(diff[u])\n return output\n ", "id": "7777711", "language": "Python", "matching_score": 0.6985843777656555, "max_stars_count": 10, "path": "median.py" }, { "content": "import numpy as np\nfrom numba import njit\n\n\n@njit\ndef pairwise_square_l2_distance(Z):\n \"\"\"\n Compute the pairwise L^2-distance matrix between all points in Z.\n inputs: Z is (mn,d) array\n output: (mn,mn) array of pairwise squared distances (L^2)\n https://stackoverflow.com/questions/53376686/what-is-the-most-efficient-way-to-compute-the-square-euclidean-distance-between/53380192#53380192\n faster than scipy.spatial.distance.cdist(Z,Z,'sqeuclidean')\n \"\"\"\n mn, d = Z.shape\n dist = np.dot(Z, Z.T) \n TMP = np.empty(mn, dtype=Z.dtype)\n for i in range(mn):\n sum_Zi = 0.0\n for j in range(d):\n sum_Zi += Z[i, j] ** 2\n TMP[i] = sum_Zi\n for i in range(mn):\n for j in range(mn):\n dist[i, j] = -2.0 * dist[i, j] + TMP[i] + TMP[j]\n return dist\n\n\n@njit \ndef pairwise_l1_distance(Z):\n \"\"\"\n Compute the pairwise L^1-distance matrix between all points in Z.\n inputs: Z is (mn,d) array\n output: (mn,mn) array of pairwise squared distances (L^1)\n \"\"\"\n mn, d = Z.shape\n output = np.zeros((mn, mn))\n for i in range(mn):\n for j in range(mn):\n temp = 0.0\n for u in range(d):\n temp += np.abs(Z[i, u] - Z[j, u])\n output[i, j] = temp\n return output\n\n\n@njit\ndef kernel_matrices(X, Y, kernel_type, bandwidth, bandwidth_multipliers):\n \"\"\"\n Compute kernel matrices for several bandwidths.\n inputs: kernel_type: \"gaussian\" or \"laplace\"\n X is (m,d) array (m d-dimensional points)\n Y is (n,d) array (n d-dimensional points)\n bandwidth is (d,) array\n bandwidth_multipliers is (N,) array such that: \n collection_bandwidths = [c*bandwidth for c in bandwidth_multipliers]\n kernel_type: \"gaussian\" or \"laplace\" (as defined in Section 5.3 of our paper)\n outputs: list of N kernel matrices for the pooled sample with the N bandwidths\n \"\"\"\n m, d = X.shape\n Z = np.concatenate((X / bandwidth, Y / bandwidth))\n if kernel_type == \"gaussian\":\n pairwise_sq_l2_dists = pairwise_square_l2_distance(Z) \n prod = np.prod(bandwidth)\n output_list = []\n for c in bandwidth_multipliers:\n output_list.append(np.exp(-pairwise_sq_l2_dists / (c ** 2))) \n return output_list\n elif kernel_type == \"laplace\":\n pairwise_l1_dists = pairwise_l1_distance(Z) \n prod = np.prod(bandwidth)\n output_list = []\n for c in bandwidth_multipliers:\n output_list.append(np.exp(-pairwise_l1_dists / c)) \n return output_list\n else:\n raise ValueError(\n 'The value of kernel_type should be either \"gaussian\" or \"laplace\"'\n )\n\n\n@njit\ndef mutate_K(K, approx_type):\n \"\"\"\n Mutate the kernel matrix K depending on the type of approximation.\n inputs: K: kernel matrix of size (m+n,m+n) consisting of \n four matrices of sizes (m,m), (m,n), (n,m) and (n,n)\n m and n are the numbers of samples from p and q respectively\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n \n output: if approx_type is \"permutation\" then the estimate is MMD_a (Eq. (3)) and \n the matrix K is mutated to have zero diagonal entries\n if approx_type is \"wild bootstrap\" then the estimate is MMD_b (Eq. (6)),\n we have m = n and the matrix K is mutated so that the four matrices \n have zero diagonal entries\n \"\"\"\n if approx_type == \"permutation\":\n for i in range(K.shape[0]):\n K[i, i] = 0 \n if approx_type == \"wild bootstrap\":\n m = int(K.shape[0] / 2) # m = n\n for i in range(m):\n K[i, i] = 0\n K[m + i, m + i] = 0\n K[i, m + i] = 0 \n K[m + i, i] = 0\n", "id": "4246717", "language": "Python", "matching_score": 3.667219877243042, "max_stars_count": 10, "path": "kernel.py" }, { "content": "import numpy as np\nfrom median import compute_median_bandwidth_subset\nfrom weights import create_weights\nfrom kernel import kernel_matrices, mutate_K\n\n\ndef mmdagg(\n seed, X, Y, alpha, kernel_type, approx_type, weights_type, l_minus, l_plus, B1, B2, B3\n):\n \"\"\"\n Compute MMDAgg as defined in Algorithm 1 in our paper using the collection of\n bandwidths defined in Eq. (16) and the weighting strategies proposed in Section 5.1.\n inputs: seed: integer random seed\n X: (m,d) array (m d-dimensional points)\n Y: (n,d) array (n d-dimensional points)\n alpha: real number in (0,1) (level of the test)\n kernel_type: \"gaussian\" or \"laplace\"\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n weights_type: \"uniform\", \"decreasing\", \"increasing\" or \"centred\" (Section 5.1 of our paper)\n l_minus: integer (for collection of bandwidths Eq. (16) in our paper)\n l_plus: integer (for collection of bandwidths Eq. (16) in our paper)\n B1: number of simulated test statistics to estimate the quantiles\n B2: number of simulated test statistics to estimate the probability in Eq. (13) in our paper\n B3: number of iterations for the bisection method\n output: result of MMDAgg (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n m = X.shape[0]\n n = Y.shape[0]\n mn = m + n\n assert n >= 2 and m >= 2\n assert X.shape[1] == Y.shape[1]\n assert 0 < alpha and alpha < 1\n assert kernel_type in [\"gaussian\", \"laplace\"]\n assert approx_type in [\"permutation\", \"wild bootstrap\"]\n assert weights_type in [\"uniform\", \"decreasing\", \"increasing\", \"centred\"]\n assert l_plus >= l_minus\n\n # compute median bandwidth\n median_bandwidth = compute_median_bandwidth_subset(seed, X, Y)\n \n # define bandwidth_multipliers and weights\n bandwidth_multipliers = np.array([2 ** i for i in range(l_minus, l_plus + 1)])\n N = bandwidth_multipliers.shape[0] # N = 1 + l_plus - l_minus\n weights = create_weights(N, weights_type)\n \n # compute the kernel matrices\n kernel_matrices_list = kernel_matrices(\n X, Y, kernel_type, median_bandwidth, bandwidth_multipliers\n ) \n\n return mmdagg_custom(\n seed, \n kernel_matrices_list, \n weights, \n m, \n alpha, \n approx_type, \n B1, \n B2, \n B3,\n )\n\n\ndef mmdagg_custom(\n seed, kernel_matrices_list, weights, m, alpha, approx_type, B1, B2, B3\n):\n \"\"\"\n Compute MMDAgg as defined in Algorithm 1 in our paper with custom kernel matrices\n and weights.\n inputs: seed: integer random seed\n kernel_matrices_list: list of N kernel matrices\n these can correspond to kernel matrices obtained by considering\n different bandwidths of a fixed kernel as we consider in our paper\n but one can also use N fundamentally different kernels.\n It is assumed that the kernel matrices are of shape (m+n,m+n) with\n the top left (m,m) submatrix corresponding to samples from X and \n the bottom right (n,n) submatrix corresponding to samples from Y\n weights: array of shape (N,) consisting of positive entries summing to 1\n m: the number of samples from X used to create the kernel matrices\n alpha: real number in (0,1) (level of the test)\n kernel_type: \"gaussian\" or \"laplace\"\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n B1: number of simulated test statistics to estimate the quantiles\n B2: number of simulated test statistics to estimate the probability in Eq. (13) in our paper\n B3: number of iterations for the bisection method\n output: result of MMDAgg (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n n = kernel_matrices_list[0].shape[0] - m\n mn = m + n\n N = len(kernel_matrices_list)\n assert len(kernel_matrices_list) == weights.shape[0]\n assert n >= 2 and m >= 2\n assert 0 < alpha and alpha < 1\n assert approx_type in [\"permutation\", \"wild bootstrap\"]\n \n # Step 1: compute all simulated MMD estimates (efficient as in Appendix C in our paper)\n M = np.zeros((N, B1 + B2 + 1)) \n rs = np.random.RandomState(seed)\n if approx_type == \"permutation\":\n idx = rs.rand(B1 + B2 + 1, m + n).argsort(axis=1) # (B1+B2+1, m+n): rows of permuted indices\n #11\n v11 = np.concatenate((np.ones(m), -np.ones(n))) # (m+n, )\n V11i = np.tile(v11, (B1 + B2 + 1, 1)) # (B1+B2+1, m+n)\n V11 = np.take_along_axis(V11i, idx, axis=1) # (B1+B2+1, m+n): permute the entries of the rows\n V11[B1] = v11 # (B1+1)th entry is the original MMD (no permutation)\n V11 = V11.transpose() # (m+n, B1+B2+1)\n #10\n v10 = np.concatenate((np.ones(m), np.zeros(n)))\n V10i = np.tile(v10, (B1 + B2 + 1, 1))\n V10 = np.take_along_axis(V10i, idx, axis=1)\n V10[B1] = v10\n V10 = V10.transpose() \n #01\n v01 = np.concatenate((np.zeros(m), -np.ones(n)))\n V01i = np.tile(v01, (B1 + B2 + 1, 1))\n V01 = np.take_along_axis(V01i, idx, axis=1)\n V01[B1] = v01\n V01 = V01.transpose() \n for i in range(N):\n K = kernel_matrices_list[i]\n mutate_K(K, approx_type)\n M[i] = (\n np.sum(V10 * (K @ V10), 0) * (n - m + 1) / (m * n * (m - 1))\n + np.sum(V01 * (K @ V01), 0) * (m - n + 1) / (m * n * (n - 1))\n + np.sum(V11 * (K @ V11), 0) / (m * n)\n ) # (B1+B2+1, ) permuted MMD estimates\n elif approx_type == \"wild bootstrap\":\n R = rs.choice([-1.0, 1.0], size=(B1 + B2 + 1, n))\n R[B1] = np.ones(n)\n R = R.transpose()\n R = np.concatenate((R, -R)) # (2n, B1+B2+1) \n for i in range(N):\n K = kernel_matrices_list[i]\n mutate_K(K, approx_type)\n M[i] = np.sum(R * (K @ R) , 0) /(n * (n - 1))\n else:\n raise ValueError(\n 'The value of approx_type should be either \"permutation\" or \"wild bootstrap\".'\n )\n MMD_original = M[:, B1]\n M1_sorted = np.sort(M[:, :B1 + 1]) # (N, B1+1)\n M2 = M[:, B1 + 1:] # (N, B2)\n \n # Step 2: compute u_alpha_hat using the bisection method\n quantiles = np.zeros((N, 1)) # (1-u*w_lambda)-quantiles for the N bandwidths\n u_min = 0\n u_max = np.min(1 / weights)\n for _ in range(B3): \n u = (u_max + u_min) / 2\n for i in range(N):\n quantiles[i] = M1_sorted[\n i, int(np.ceil((B1 + 1) * (1 - u * weights[i]))) - 1\n ]\n P_u = np.sum(np.max(M2 - quantiles, 0) > 0) / B2\n if P_u <= alpha:\n u_min = u\n else:\n u_max = u\n u = u_min\n \n # Step 3: output test result\n for i in range(N):\n if ( MMD_original[i] \n > M1_sorted[i, int(np.ceil((B1 + 1) * (1 - u * weights[i]))) - 1]\n ):\n return 1\n return 0 \n\n\ndef mmd_median_test(\n seed, X, Y, alpha, kernel_type, approx_type, B1, bandwidth_multiplier=1\n):\n \"\"\"\n Compute MMD test using kernel with bandwidth the median bandwidth multiplied by bandwidth_multiplier.\n This test has been proposed by \n <NAME>, <NAME>, <NAME>, <NAME> and <NAME>\n A Kernel Two-Sample Test\n Journal of Machine Learning Research 2012\n https://www.jmlr.org/papers/volume13/gretton12a/gretton12a.pdf\n inputs: seed: random seed\n X: (m,d) array (m d-dimensional points)\n Y: (n,d) array (n d-dimensional points)\n alpha: real number in (0,1) (level of the test)\n kernel_type: \"gaussian\" or \"laplace\"\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n B1: number of simulated test statistics to estimate the quantiles\n bandwidth_multiplier: multiplicative factor for the median bandwidth \n output: result of the MMD test with median bandwidth multiplied by bandwidth_multiplier\n (1 for \"REJECT H_0\" and 0 for \"FAIL TO REJECT H_0\")\n \"\"\"\n m = X.shape[0]\n n = Y.shape[0]\n mn = m + n\n assert n >= 2 and m >= 2\n assert X.shape[1] == Y.shape[1]\n assert 0 < alpha and alpha < 1\n assert kernel_type in [\"gaussian\", \"laplace\"]\n assert approx_type in [\"permutation\", \"wild bootstrap\"]\n\n # compute median bandwidth\n median_bandwidth = compute_median_bandwidth_subset(seed, X, Y)\n \n # compute all simulated MMD estimates (efficient)\n K = kernel_matrices(\n X, Y, kernel_type, median_bandwidth, np.array([bandwidth_multiplier])\n )[0]\n mutate_K(K, approx_type) \n rs = np.random.RandomState(seed)\n if approx_type == \"permutation\":\n v11 = np.concatenate((np.ones(m), -np.ones(n))) # (m+n, )\n v10 = np.concatenate((np.ones(m), np.zeros(n)))\n v01 = np.concatenate((np.zeros(m), -np.ones(n)))\n V11 = np.tile(v11, (B1 + 1, 1)) # (B1+1, m+n)\n V10 = np.tile(v10, (B1 + 1, 1))\n V01 = np.tile(v01, (B1 + 1, 1))\n idx = rs.rand(*V11.shape).argsort(axis=1) # (B1+1, m+n): rows of permuted indices\n V11 = np.take_along_axis(V11, idx, axis=1) # (B1+1, m+n): permute the entries of the rows\n V10 = np.take_along_axis(V10, idx, axis=1)\n V01 = np.take_along_axis(V01, idx, axis=1)\n V11[B1] = v11 # (B1+1)th entry is the original MMD (no permutation)\n V10[B1] = v10\n V01[B1] = v01\n V11 = V11.transpose() # (m+n, B1+1)\n V10 = V10.transpose() \n V01 = V01.transpose() \n M1 = (\n np.sum(V10 * (K @ V10), 0) * (n - m + 1) / (m * n * (m - 1))\n + np.sum(V01 * (K @ V01), 0) * (m - n + 1) / (m * n * (n - 1))\n + np.sum(V11 * (K @ V11), 0) / (m * n)\n ) # (B1+1, ) permuted MMD estimates\n elif approx_type == \"wild bootstrap\":\n R = rs.choice([-1.0, 1.0], size=(B1 + 1, n))\n R[B1] = np.ones(n)\n R = R.transpose()\n R = np.concatenate((R, -R)) # (2n, B1+1) \n M1 = np.sum(R * (K @ R) , 0) /(n * (n - 1))\n else:\n raise ValueError(\n 'The value of approx_type should be either \"permutation\" or \"wild bootstrap\".'\n )\n MMD_original = M1[B1]\n M1_sorted = np.sort(M1) \n \n # output test result\n if MMD_original > M1_sorted[int(np.ceil((B1 + 1) * (1 - alpha))) - 1]:\n return 1\n return 0 \n\n\ndef ratio_mmd_stdev(K, approx_type, regulariser=10**(-8)):\n \"\"\"\n Compute the estimated ratio of the MMD to the asymptotic standard deviation under the alternative.\n This is stated in Eq. (15) in our paper, it originally comes from Eq. (3) in:\n <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>\n Learning deep kernels for non-parametric two-sample tests\n International Conference on Machine Learning, 2020\n http://proceedings.mlr.press/v119/liu20m/liu20m.pdf\n assumption: m = n: equal number of samples in X and Y\n inputs: K: (m+n, m+n) kernel matrix for pooled sample WITH diagonal \n (K has NOT been mutated by mutate_K function)\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n regulariser: small positive number (we use 10**(-8) as done by Liu et al.)\n m: number of samples (d-dimensional points) in X \n K: (m+n, m+n) kernel matrix for pooled sample WITH diagonal \n output: estimate of criterion J which is the ratio of MMD^2 and of the variance under the H_a\n warning: this function mutates K using the mutate_K function\n there is no approximation but approx_type is required to determine whether to use\n MMD_a estimate as in Eq. (3) or MMD_b estimate as in Eq. (6)\n \"\"\" \n n = int(K.shape[0]/2)\n\n # compute variance\n Kxx = K[:n, :n]\n Kxy = K[:n, n:]\n Kyx = K[n:, :n]\n Kyy = K[n:, n:]\n H_column_sum = (\n np.sum(Kxx, axis=1)\n + np.sum(Kyy, axis=1)\n - np.sum(Kxy, axis=1)\n - np.sum(Kyx, axis=1)\n )\n var = (\n 4 / n ** 3 * np.sum(H_column_sum ** 2)\n - 4 / n ** 4 * np.sum(H_column_sum) ** 2\n + regulariser\n )\n # we should obtain var > 0, if var <= 0 then we discard the corresponding\n # bandwidth by returning a large negative value so that we do not select\n # the corresponding bandwidth when selecting the maximum of the outputs\n # of ratio_mmd_stdev for bandwidths in the collection\n if not var > 0:\n return -1e10 \n\n # compute original MMD estimate\n mutate_K(K, approx_type)\n if approx_type == \"permutation\":\n # compute MMD_a estimate\n Kxx = K[:n, :n]\n Kxy = K[:n, n:]\n Kyy = K[n:, n:]\n s = np.ones(n)\n mmd = (\n s @ Kxx @ s / (n * (n - 1))\n + s @ Kyy @ s / (n * (n - 1))\n - 2 * s @ Kxy @ s / (n ** 2)\n )\n elif approx_type == \"wild bootstrap\":\n # compute MMD_b estimate\n v = np.concatenate((np.ones(n), -np.ones(n)))\n mmd = v @ K @ v / (n * (n - 1))\n else:\n raise ValueError(\n 'The value of approx_type should be either \"permutation\" or \"wild bootstrap\".'\n )\n return mmd / np.sqrt(var)\n\n\ndef mmd_split_test(\n seed, X, Y, alpha, kernel_type, approx_type, B1, bandwidth_multipliers, proportion=0.5\n):\n \"\"\"\n Split data in equal halves. Select 'optimal' bandwidth using first half (in the sense \n that it maximizes ratio_mmd_stdev) and run the MMD test with the selected bandwidth on \n the second half. This was first proposed by Gretton et al. (2012) for the linear-time \n MMD estimate and generalised by Liu et al. (2020) to the quadratic-time MMD estimate.\n <NAME>, <NAME>, <NAME>, <NAME>,\n <NAME>, <NAME> and <NAME>ukumizu\n Optimal kernel choice for large-scale two-sample tests\n Advances in Neural Information Processing Systems 2012\n https://papers.nips.cc/paper/2012/file/dbe272bab69f8e13f14b405e038deb64-Paper.pdf\n <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>\n Learning deep kernels for non-parametric two-sample tests\n International Conference on Machine Learning, 2020\n http://proceedings.mlr.press/v119/liu20m/liu20m.pdf\n inputs: seed: integer random seed\n X: (m,d) array (m d-dimensional points)\n Y: (n,d) array (n d-dimensional points)\n alpha: real number in (0,1) (level of the test)\n kernel_type: \"gaussian\" or \"laplace\"\n approx_type: \"permutation\" (for MMD_a estimate Eq. (3)) \n or \"wild bootstrap\" (for MMD_b estimate Eq. (6))\n B1: number of simulated test statistics to estimate the quantiles\n bandwidth_multipliers: array such that the 'optimal' bandwidth is selected from\n collection_bandwidths = [c*median_bandwidth for c in bandwidth_multipliers]\n proportion: proportion of data used to select the bandwidth \n output: result of MMD test run on half the data with the bandwidth from collection_bandwidths which is \n 'optimal' in the sense that it maximizes ratio_mmd_stdev on the other half of the data\n (REJECT H_0 = 1, FAIL TO REJECT H_0 = 0)\n \"\"\"\n assert X.shape == Y.shape\n n, d = X.shape \n \n split_size = int(n * proportion) \n \n rs = np.random.RandomState(seed)\n pX = rs.permutation(n)\n pY = rs.permutation(n)\n X1 = X[pX][:split_size]\n X2 = X[pX][split_size:]\n Y1 = Y[pY][:split_size]\n Y2 = Y[pY][split_size:] \n \n # compute median bandwidth\n median_bandwidth = compute_median_bandwidth_subset(seed, X, Y)\n\n # select bandwidth which maximizes criterion J using X1 and Y1\n kernel_matrices_list = kernel_matrices(\n X1, Y1, kernel_type, median_bandwidth, bandwidth_multipliers\n )\n ratio_values = []\n for i in range(len(kernel_matrices_list)):\n K = kernel_matrices_list[i]\n ratio_values.append(ratio_mmd_stdev(K, approx_type))\n selected_multiplier = bandwidth_multipliers[np.argmax(ratio_values)]\n \n # run MMD test on X2 and Y2 with the selected bandwidth\n return mmd_median_test(\n seed, X2, Y2, alpha, kernel_type, approx_type, B1, selected_multiplier\n )\n", "id": "9656307", "language": "Python", "matching_score": 3.6206390857696533, "max_stars_count": 10, "path": "tests.py" }, { "content": "\"\"\"\nThis code was extracted from https://github.com/MPI-IS/tests-wo-splitting under The MIT License.\n<NAME>, <NAME>, <NAME>, <NAME>\nLearning Kernel Tests Without Data Splitting\nNeural Information Processing Systems 2020\nhttps://papers.nips.cc/paper/2020/file/44f683a84163b3523afe57c2e008bc8c-Paper.pdf\n\nWe use their test as a comparison in our experiments.\nWe modified the PTKGauss class to use the exact same kernel\nwe use for our test. We have also added a PTKLaplace class \nand have defined the ost function which we use in our experiments.\n\"\"\"\n\nimport torch\nfrom scipy.stats import norm\nfrom scipy.stats import chi as chi_stats\nfrom cvxopt import matrix, solvers\nfrom median import compute_median_bandwidth_subset\nimport numpy as np\n\n\nclass PTKGauss:\n \"\"\"\n Pytorch implementation of the isotropic Gaussian kernel.\n Parameterization is the same as in the density of the standard normal\n distribution. sigma2 is analogous to the variance.\n \n Modifications: keep only case X.dim() = 2\n change parameter to bandwidth = sqrt(2*sigma2)\n \"\"\"\n\n def __init__(self, bandwidth):\n \"\"\"\n bandwidth: a number \n \"\"\"\n bandwidth = torch.tensor(bandwidth)\n assert (bandwidth > 0).any(), 'bandwidth must be > 0. Was %s' % str(bandwidth)\n self.bandwidth = bandwidth\n\n def eval_lin(self, X, Y):\n \"\"\"\n Evaluate only the relevant entries for the linear time mmd\n ----------\n X : n1 x d Torch Tensor\n Y : n2 x d Torch Tensor\n Return\n ------\n K : a n/2 list of entries.\n \"\"\"\n bandwidth = torch.sqrt(self.bandwidth ** 2)\n assert X.dim() == 2\n assert X.size() == Y.size()\n assert bandwidth.size()[0] == X.size()[1]\n D2 = torch.sum(((X - Y).div(bandwidth)) ** 2, dim=1).view(1, -1)\n K = torch.exp(-D2)\n # We have rewritten this to be similar to PTKLaplace.\n # This way of computing D2 is equivalent to the following:\n # sumx2 = torch.sum(X ** 2, dim=1).view(1, -1)\n # sumy2 = torch.sum(Y ** 2, dim=1).view(1, -1)\n # D2 = sumx2 - 2 * torch.sum(X * Y, dim=1).view(1, -1) + sumy2\n # K = torch.exp(-D2.div(bandwidth**2))\n return K\n \n\n# added\nclass PTKLaplace:\n \"\"\"\n Pytorch implementation of the isotropic Laplace kernel.\n \"\"\"\n\n def __init__(self, bandwidth):\n \"\"\"\n bandwidth: a number \n \"\"\"\n bandwidth = torch.tensor(bandwidth)\n assert (bandwidth > 0).any(), 'bandwidth must be > 0. Was %s' % str(bandwidth)\n self.bandwidth = bandwidth\n\n def eval_lin(self, X, Y):\n \"\"\"\n Evaluate only the relevant entries for the linear time mmd\n ----------\n X : n1 x d Torch Tensor\n Y : n2 x d Torch Tensor\n Return\n ------\n K : a n/2 list of entries.\n \"\"\"\n bandwidth = torch.sqrt(self.bandwidth ** 2)\n assert X.dim() == 2\n assert X.size() == Y.size()\n assert bandwidth.size()[0] == X.size()[1]\n D1 = torch.sum(torch.abs((X - Y).div(bandwidth)), dim=1).view(1, -1)\n K = torch.exp(-D1)\n return K\n\n\nclass LinearMMD:\n \"\"\"\n To compute linear time MMD estimates and the covariance matrix of the asymptotic distribution of the linear time\n MMD for d different kernels.\n \"\"\"\n\n def __init__(self, kernels):\n \"\"\"\n :param kernels: list of kernels, which will be considered\n :returns\n mmd: linear time mmd estimates for all the kernels. Scaled with sqrt(n)\n Sigma: covariance matrix of the asymptotic normal distribution of linear mmd estimates\n \"\"\"\n self.kernels = kernels\n\n # number of kernels considered\n self.d = len(kernels)\n\n def estimate(self, x_sample, y_sample):\n \"\"\"\n Computes the linear time estimates of the MMD, for all kernels that should be considered. Further\n it computes the asymptotic covariance matrix of the linear time MMD for the kernels.\n The samplesize is taken into account on the side of the MMD, i.e., we estimate sqrt(n) MMD^2\n :param x_sample: data from P\n :param y_sample: data from Q\n :return:\n \"\"\"\n if not isinstance(x_sample, torch.Tensor):\n # convert data to torch tensors\n x_sample = torch.tensor(x_sample)\n y_sample = torch.tensor(y_sample)\n assert list(x_sample.size())[0] == list(y_sample.size())[0], 'datasets must have same samplesize'\n\n # determine length of the sample\n size = list(x_sample.size())[0]\n # for linear time mmd assume that the number of samples is 2n. Truncate last data point if uneven\n size = size - size % 2\n n = int(size / 2)\n # define the\n x1, x2 = x_sample[:n], x_sample[n:size]\n y1, y2 = y_sample[:n], y_sample[n:size]\n\n # tensor of all functions h defined for the kernels\n h = torch.zeros(self.d, n)\n\n # compute values of h on the data\n for u in range(self.d):\n gram_xx = self.kernels[u].eval_lin(X=x1, Y=x2).squeeze() # added .squeeze()\n gram_xy = self.kernels[u].eval_lin(X=x1, Y=y2).squeeze() # added .squeeze()\n gram_yx = self.kernels[u].eval_lin(X=y1, Y=x2).squeeze() # added .squeeze()\n gram_yy = self.kernels[u].eval_lin(X=y1, Y=y2).squeeze() # added .squeeze()\n\n h[u] = gram_xx - gram_xy - gram_yx + gram_yy\n\n mmd = torch.sum(h, dim=1) / n\n Sigma = 1 / n * h.matmul(h.transpose(0,1)) - mmd.view(-1,1).matmul(mmd.view(1,-1))\n\n # We consider sqrt(n) * mmd. Therefore we will keep Sigma on a scale independent of n\n mmd = np.sqrt(n) * mmd\n\n return np.array(mmd), np.array(Sigma)\n\n\ndef truncation(beta_star, tau, Sigma, accuracy=1e-6):\n \"\"\"\n Compute\n :param beta_star: optimal projection of tau\n :param tau: vector of test statistics\n :param Sigma: Covariance matrix of test statistics\n :param accuracy: threshold to determine whether an entry is zero\n :return: Lower threshold of conditional distribution of beta_star^T tau\n \"\"\"\n # dimension of data\n d = len(tau)\n # determine non-zero entries of betastar\n non_zero = [1 if beta_i > accuracy else 0 for beta_i in beta_star]\n # define the arguments of the maximization of V^-\n arguments = [(tau[i] * (beta_star @ Sigma @ beta_star) - (np.eye(1, d, i) @ Sigma @ beta_star) * (beta_star @ tau))\n / (np.sqrt(Sigma[i][i]) * np.sqrt(beta_star @ Sigma @ beta_star) - np.eye(1, d, i) @ Sigma @ beta_star)\n for i in range(len(tau)) if non_zero[i] == 0]\n # catch cases for which we have 0/0 and hence nan. We dont consider these\n arguments = np.array([argument if argument > -10e6 else -10e6 for argument in arguments])\n if len(arguments) == 0:\n return -10e6\n v_minus = np.max(arguments)\n return v_minus\n\n\ndef truncated_gaussian(var, v_minus, level):\n \"\"\"\n Computes the (1-level) threshold of a truncated normal (the original normal is assumed to be centered)\n :param var: variance of the original normal\n :param v_minus: lower truncation\n :param level: desired level\n :return:\n \"\"\"\n # normalize everything\n lower = v_minus / np.sqrt(var)\n # compute normalization of the truncated section\n renormalize = 1 - norm.cdf(lower)\n if renormalize == 0:\n # force a reject\n return np.sqrt(var) * 10000\n assert renormalize > 0, \"renormalize is not positive\"\n\n threshold = np.sqrt(var) * norm.ppf(renormalize * (1 - level) + norm.cdf(lower))\n return threshold\n\n\ndef optimization(tau, Sigma, selection='continuous'):\n \"\"\"\n optimizes the signal to noise ratio. If tau has at least one positive entry, we fix the nominator to some constant\n by setting beta^T tau = 1 and then optimize the denominator.\n If tau has only negative entries, the signal to noise ratio is given by the optimum of the discrete optimization\n :param tau: Signal\n :param Sigma: noise\n :param selection: discrete (select from base tests) / continuous (OST in canoncical form)\n :return: optimal vector beta_star\n \"\"\"\n\n if np.max(tau) < 1e-6:\n # If all entries are negative, then for the continuous case we also select the best of the base tests\n selection = 'discrete'\n\n # determine dimensionality\n d = len(tau)\n if selection == 'continuous':\n tau = np.ndarray.tolist(tau)\n Sigma = np.ndarray.tolist(Sigma)\n\n # define quadratic program in cvxopt\n P = matrix(Sigma)\n q = matrix(np.zeros(d))\n G = matrix(np.diag([-1.] * d))\n h = matrix(np.zeros(d))\n A = matrix(np.array([tau]))\n b = matrix([1.])\n\n initialization = matrix([1.] * d)\n solvers.options['reltol'] = 1e-40\n solvers.options['abstol'] = 1e-10\n solvers.options['show_progress'] = False\n solvers.options['maxiters'] = 10000\n sol = solvers.qp(P, q, G, h, A, b, initvals=initialization)\n\n beta_star = np.array(sol['x']).flatten()\n # normalize betastar\n beta_star = beta_star / np.linalg.norm(beta_star, ord=1)\n return beta_star\n else:\n arguments = tau / np.sqrt(np.diag(Sigma))\n # in case of division by zero, we do not consider it since it implies also that the nominator is zero\n arguments = np.array([argument if argument > -10e6 else -10e6 for argument in arguments])\n j = int(np.argmax(arguments))\n beta_star = [0] * d\n beta_star[j] = 1\n return np.array(beta_star)\n \n\ndef ost_test(tau, Sigma, alpha=0.05, selection='discrete', max_condition=1e-6, accuracy=1e-6, constraints='Sigma',\n pval=False):\n \"\"\"\n Runs the full test suggested in our paper.\n :param tau: observed statistic\n :param Sigma: covariance matrix\n :param alpha: level of test\n :param selection: continuous/discrete (discrete is not extensively tested)\n :param max_condition: at which condition number the covariance matrix is truncated.\n :param accuracy: threshold to determine whether an entry is zero\n :param constraints: if 'Sigma' we work with the constraints (Sigma beta) >=0. If 'positive' we work with beta >= 0\n :param pval: if true, returns the conditional p value instead of the test result\n :return: 1 (reject), 0 (no reject)\n \"\"\"\n assert constraints == 'Sigma' or constraints == 'positive', 'Constraints are not implemented'\n # if the selection is discrete we dont want any transformations\n if selection == 'discrete':\n constraints = 'positive'\n\n # check if there are entries with 0 variance\n zeros = [i for i in range(len(tau)) if Sigma[i][i] < 1e-15] # changed from 1e-10\n tau = np.delete(tau, zeros)\n Sigma = np.delete(Sigma, zeros, 0)\n Sigma = np.delete(Sigma, zeros, 1)\n\n if constraints == 'Sigma':\n # compute pseudoinverse to also handle singular covariances (see Appendix)\n r_cond = max_condition # parameter which precision to use\n Sigma_inv = np.linalg.pinv(Sigma, rcond=r_cond, hermitian=True)\n\n # use Remark 1 to convert the problem\n tau = Sigma_inv @ tau\n Sigma = Sigma_inv\n\n # Apply Theorem 1 in the canonical form with beta>=0 constraints\n beta_star = optimization(tau=tau, Sigma=Sigma, selection=selection)\n\n # determine active set\n non_zero = [1 if beta_i > accuracy else 0 for beta_i in beta_star]\n\n projector = np.diag(non_zero)\n effective_sigma = projector @ Sigma @ projector\n\n # Use the rank of effective Sigma to determine how many degrees of freedom the covariance has after conditioning\n # for non-singular original covariance, this is the same number as the number of active dimensions |mathcal{U}|,\n # however, for singular cases using the rank is the right way to go.\n tol = max_condition * np.max(np.linalg.eigvalsh(Sigma))\n r = np.linalg.matrix_rank(effective_sigma, tol=tol, hermitian=True)\n # go back to notation used in the paper\n l = r\n if l > 1:\n test_statistic = beta_star @ tau / np.sqrt(beta_star @ Sigma @ beta_star)\n threshold = chi_stats.ppf(q=1 - alpha, df=l)\n else:\n vminus = truncation(beta_star=beta_star, tau=tau, Sigma=Sigma, accuracy=accuracy)\n threshold = truncated_gaussian(var=beta_star @ Sigma @ beta_star, v_minus=vminus, level=alpha)\n test_statistic = beta_star @ tau\n if not pval:\n if test_statistic > threshold:\n # reject\n return 1\n else:\n # cannot reject\n return 0\n if pval:\n if l > 1:\n test_statistic = beta_star @ tau / np.sqrt(beta_star @ Sigma @ beta_star)\n pvalue = 1 - chi_stats.cdf(x=test_statistic, df=l)\n else:\n test_statistic = beta_star @ tau / np.sqrt(beta_star @ Sigma @ beta_star)\n vminus = truncation(beta_star=beta_star, tau=tau, Sigma=Sigma, accuracy=accuracy) / \\\n np.sqrt(beta_star @ Sigma @ beta_star)\n pvalue = 1 - (norm.cdf(x=test_statistic) - norm.cdf(x=vminus)) / (1 - norm.cdf(x=vminus))\n return pvalue\n\n\n# Run ost_test in our setting\ndef ost(seed, X, Y, alpha, kernel_type, l_minus, l_plus):\n assert X.shape == Y.shape\n assert kernel_type in [\"gaussian\", \"laplace\"]\n median_bandwidth = compute_median_bandwidth_subset(seed, X, Y)\n bandwidths = [median_bandwidth * (2 ** factor) for factor in range(l_minus, l_plus+1)]\n if kernel_type == \"gaussian\":\n kernels = [PTKGauss(bandwidths[u]) for u in range(len(bandwidths))]\n if kernel_type == \"laplace\":\n kernels = [PTKLaplace(bandwidths[u]) for u in range(len(bandwidths))]\n mmd = LinearMMD(kernels)\n tau, Sigma = mmd.estimate(X, Y)\n return ost_test(\n tau=tau, \n Sigma=Sigma, \n alpha=alpha, \n selection=\"continuous\",\n max_condition=1e-5, # changed from 1e-6\n constraints='Sigma',\n )\n", "id": "2184569", "language": "Python", "matching_score": 1.7482514381408691, "max_stars_count": 10, "path": "ost.py" }, { "content": "\"\"\"\nThis code was extracted from https://github.com/MPI-IS/tests-wo-splitting under The MIT License.\n<NAME>, <NAME>, <NAME>, <NAME>\nLearning Kernel Tests Without Data Splitting\nNeural Information Processing Systems 2020\nhttps://papers.nips.cc/paper/2020/file/44f683a84163b3523afe57c2e008bc8c-Paper.pdf\n\nThe download_mnist function downloads the MNIST dataset \nand downsamples it to 7x7 images. The data mnist_7x7.data \nis the same as the considered by the above authors. It \nshould be run only once. The load_mnist function loads\ndatasets consisting of images of various digits. \n\"\"\"\n\nimport pickle\nfrom sklearn.datasets import fetch_openml\nimport numpy as np\nfrom pathlib import Path\n\n\ndef download_mnist():\n \"\"\"\n Download MNIST dataset and downsample it to 7x7 images,\n save the downsampled dataset as mnist_7x7.data in the\n mnist_dataset directory.\n \"\"\"\n X, y = fetch_openml(\"mnist_784\", return_X_y=True)\n X = X.to_numpy()\n X = X / 255\n digits = {}\n for i in range(10):\n digits[str(i)] = []\n for i in range(len(y)):\n digits[y[i]].append(X[i])\n digits_7x7 = {}\n for i in range(10):\n current = np.array(digits[str(i)])\n n = len(current)\n # make the dataset 2D again\n current = np.reshape(current, (n, 28, 28))\n current = np.reshape(current, (n, 7, 4, 7, 4))\n current = current.mean(axis=(2, 4))\n digits_7x7[str(i)] = np.reshape(current, (n, 49))\n path = \"mnist_dataset/mnist_7x7.data\"\n f = open(path, 'wb')\n pickle.dump(digits_7x7, f)\n f.close()\n\n\ndef load_mnist():\n \"\"\"\n Returns P and Q_list where P consists of images of all digits \n in mnist_7x7.data, and Q_list contains 5 elements each consisting\n of images of fewer digits.\n This function should only be run after download_mnist().\n \"\"\"\n with open('mnist_dataset/mnist_7x7.data', 'rb') as handle:\n X = pickle.load(handle)\n P = np.vstack(\n (X['0'], X['1'], X['2'], X['3'], X['4'], X['5'], X['6'], X['7'], X['8'], X['9'])\n )\n Q1 = np.vstack((X['1'], X['3'], X['5'], X['7'], X['9']))\n Q2 = np.vstack((X['0'], X['1'], X['3'], X['5'], X['7'], X['9']))\n Q3 = np.vstack((X['0'], X['1'], X['2'], X['3'], X['5'], X['7'], X['9']))\n Q4 = np.vstack((X['0'], X['1'], X['2'], X['3'], X['4'], X['5'], X['7'], X['9']))\n Q5 = np.vstack(\n (X['0'], X['1'], X['2'], X['3'], X['4'], X['5'], X['6'], X['7'], X['9'])\n )\n Q_list = [Q1, Q2, Q3, Q4, Q5]\n return P, Q_list\n\n\nif __name__ == \"__main__\":\n Path(\"mnist_dataset\").mkdir(exist_ok=True)\n if Path(\"mnist_dataset/mnist_7x7.data\").is_file() == False:\n download_mnist()\n", "id": "12031336", "language": "Python", "matching_score": 1.356334924697876, "max_stars_count": 10, "path": "mnist.py" }, { "content": "\"\"\"\nDownload data in the directory data/ from \nhttps://drive.google.com/file/d/1CSj36IEW9x5_CCbiNXqBhqVqofiaGE0D/view?usp=sharing\nThis data has been obtained by running the generate_data_rbm.py and generate_data_nf.py scripts.\n\"\"\"\n\nfrom pathlib import Path\nimport requests, zipfile\nfrom tqdm import tqdm\n\nif Path('data').exists():\n raise ValueError(\"Directory ./data already exists!\")\n\nprint('Starting download')\nurl = 'https://drive.google.com/u/0/uc?id=1CSj36IEW9x5_CCbiNXqBhqVqofiaGE0D&export=download&confirm=t'\nfilename = \"data.zip\"\nresponse = requests.get(url, stream=True)\n# progress bar\ntotal_size_in_bytes= int(response.headers.get('content-length', 0))\nblock_size = 1024\nprogress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\nwith open(filename, 'wb') as file:\n for data in response.iter_content(block_size):\n progress_bar.update(len(data))\n file.write(data)\nprogress_bar.close()\nif total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:\n raise ValueError(\"Error: download failed. Try again.\")\nprint('Download completed')\n\nprint('Starting unzip')\nwith zipfile.ZipFile(filename) as zf:\n for member in tqdm(zf.infolist(), desc='Extracting '):\n try:\n zf.extract(member, './')\n except zipfile.error as e:\n pass\nprint('Unzip completed')\n\n# delete .zip file\nPath(\"./data.zip\").unlink()\n\n", "id": "9256954", "language": "Python", "matching_score": 1.228680968284607, "max_stars_count": 2, "path": "download_data.py" }, { "content": "\"\"\"\nWe generate data for the Gaussian-Bernoulli Restricted Boltzmann \nMachine experiment as proposed in Section 4.4 of our paper\nKSD Aggregated Goodness-of-fit Test\n<NAME>, <NAME>, <NAME>\nhttps://arxiv.org/pdf/2202.00824.pdf\nThe data is saved in the directory data/RBM.\n\"\"\"\n\nimport kgof.density as density\nimport kgof.data as data\nfrom pathlib import Path\nimport numpy as np\nimport time\n\n\ndef rbm_samples_scores(\n seed,\n m,\n sigma,\n dx=50,\n dh=40,\n burnin_number=2000,\n):\n \"\"\"\n Generate data for the Gaussian-Bernoulli Restricted Boltzmann Machine (RBM) experiment.\n The entries of the matrix B are perturbed.\n This experiment was first proposed by Liu et al., 2016 (Section 6)\n inputs: seed: non-negative integer\n m: number of samples\n sigma: standard deviation of Gaussian noise\n dx: dimension of observed output variable\n dh: dimension of binary latent variable\n burnin_number: number of burn-in iterations for Gibbs sampler\n outputs: 2-tuple consisting of\n (m,dx) array of samples generated using the perturbed RBM\n (m,dx) array of scores computed using the non-perturbed RBM (model)\n \"\"\"\n # the perturbed model is fixed, randomness comes from sampling\n rs = np.random.RandomState(0)\n\n # Model p\n B = rs.randint(0, 2, (dx, dh)) * 2 - 1.0\n b = rs.randn(dx)\n c = rs.randn(dh)\n p = density.GaussBernRBM(B, b, c)\n\n # Sample from q\n B_perturbed = B + rs.randn(dx, dh) * sigma\n q = density.GaussBernRBM(B_perturbed, b, c)\n ds = q.get_datasource()\n ds.burnin = burnin_number\n samples = ds.sample(m, seed=seed).data()\n\n # Compute score under p\n scores = p.grad_log(samples)\n\n return samples, scores\n\n\nPath(\"data/RBM\").mkdir(exist_ok=True, parents=True)\nrepetitions = 200\nm = 1000\nd = 50\ndh = 40\nsigmas = [0, 0.01, 0.02, 0.03, 0.04]\nseed_count = 0\nprint(\"Starting\")\nt = time.time()\nfor s in range(len(sigmas)):\n sigma = sigmas[s]\n X = np.empty((repetitions, m, d))\n score_X = np.empty((repetitions, m, d))\n for r in range(repetitions):\n seed_count += 1\n X[r], score_X[r] = rbm_samples_scores(seed_count, m, sigma, d, dh)\n if (r + 1) % 10 == 0:\n print(\n \"Step s =\",\n s + 1,\n \"/\",\n len(sigmas),\n \",\",\n r + 1,\n \"/\",\n repetitions,\n \"time:\",\n time.time() - t,\n )\n t = time.time()\n np.save(\"data/RBM/X_rbm_s\" + str(int(sigma * 100)) + \".npy\", X)\n np.save(\"data/RBM/score_X_rbm_s\" + str(int(sigma * 100)) + \".npy\", score_X)\nprint(\"RBM data has been saved in data/RBM.\")\n", "id": "10239621", "language": "Python", "matching_score": 0.15725137293338776, "max_stars_count": 2, "path": "generate_data_rbm.py" }, { "content": "def generate_seed(*args):\n \"\"\"\n Generate an integer by concatenating arguments, different arguments\n always give a different integer. \n We assume that all arguments are digits except the last one which \n can be an integer consisting of at most 4 digits.\n \"\"\"\n str_args = [str(x) for x in args]\n if len(str_args[-1]) == 1:\n str_args[-1] = \"\".join([\"000\", str_args[-1]])\n if len(str_args[-1]) == 2:\n str_args[-1] = \"\".join([\"00\", str_args[-1]])\n if len(str_args[-1]) == 3:\n str_args[-1] = \"\".join([\"0\", str_args[-1]])\n return int(\"\".join(str_args))\n", "id": "9260360", "language": "Python", "matching_score": 0.2155270278453827, "max_stars_count": 10, "path": "seed.py" } ]
2.485482
AbdulHadi404
[ { "content": "\nimport cv2\nimport numpy as np\nimport os\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport time\nimport datetime\n\n# Images Properties\ndef plt_show(image, title=\"\"):\n if len(image.shape) == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.axis(\"off\")\n plt.title(title)\n plt.imshow(image, cmap=\"Greys_r\")\n plt.show()\n\n# face detection \nclass FaceDetector(object):\n def __init__(self, xml_path):\n self.classifier = cv2.CascadeClassifier(xml_path)\n \n def detect(self, image, biggest_only=True):\n scale_factor = 1.2\n min_neighbors = 5\n min_size = (75, 75)\n biggest_only = True\n flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else cv2.CASCADE_SCALE_IMAGE\n faces_coord = self.classifier.detectMultiScale(image,\n scaleFactor=scale_factor,\n minNeighbors=min_neighbors,\n minSize=min_size,\n flags=flags)\n return faces_coord\n\n# Video Camera\nclass VideoCamera(object):\n def __init__(self, index=1):\n self.video = cv2.VideoCapture(index)\n self.index = index\n print (self.video.isOpened())\n\n def __del__(self):\n self.video.release()\n \n def get_frame(self, in_grayscale=False):\n _, frame = self.video.read()\n if in_grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return frame\n\n# Crop Images\ndef cut_faces(image, faces_coord):\n faces = []\n \n for (x, y, w, h) in faces_coord:\n w_rm = int(0.3 * w / 2)\n faces.append(image[y: y + h, x + w_rm: x + w - w_rm])\n \n return faces\n\n# Normalize Images\ndef normalize_intensity(images):\n images_norm = []\n for image in images:\n is_color = len(image.shape) == 3 \n if is_color:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n images_norm.append(cv2.equalizeHist(image))\n return images_norm\n\n# Resize Images\ndef resize(images, size=(100, 100)):\n images_norm = []\n for image in images:\n if image.shape < size:\n image_norm = cv2.resize(image, size, \n interpolation=cv2.INTER_AREA)\n else:\n image_norm = cv2.resize(image, size, \n interpolation=cv2.INTER_CUBIC)\n images_norm.append(image_norm)\n\n return images_norm\n\n# Normalize Faces\ndef normalize_faces(frame, faces_coord):\n faces = cut_faces(frame, faces_coord)\n faces = normalize_intensity(faces)\n faces = resize(faces)\n return faces\n\n# Rectangle Line\ndef draw_rectangle(image, coords):\n for (x, y, w, h) in coords:\n w_rm = int(0.2 * w / 2) \n cv2.rectangle(image, (x + w_rm, y), (x + w - w_rm, y + h), \n (102, 255, 0), 1)\n# acquire images from dataset\ndef collect_dataset():\n images = []\n labels = []\n labels_dic = {}\n members = [person for person in os.listdir(\"members/\")]\n for i, person in enumerate(members): # loop over\n labels_dic[i] = person\n for image in os.listdir(\"members/\" + person):\n images.append(cv2.imread(\"members/\" + person + '/' + image, \n 0))\n labels.append(i)\n return (images, np.array(labels), labels_dic)\n\nimages, labels, labels_dic = collect_dataset()\n\n\n# train image (algorithm sets)\nrec_eig = cv2.face.EigenFaceRecognizer_create()\nrec_eig.train(images, labels)\n\nrec_fisher = cv2.face.FisherFaceRecognizer_create()\nrec_fisher.train(images, labels)\n\nrec_lbph = cv2.face.LBPHFaceRecognizer_create()\nrec_lbph.train(images, labels)\n\nprint (\"Models Trained Succesfully\")\n\n\n# cascade face and mask\ndetector = FaceDetector(\"xml/frontal_face.xml\")\ndetector_mask = cv2.CascadeClassifier(\"xml/mask_cascade.xml\")\n# 0 webcam Laptop\n# 1 back cam (usb)\n# 2 front cam (usb)\nwebcam0 = VideoCamera(0)\nwebcam1 = VideoCamera(1)\n\n\nts = time.time() \ndate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\ntimeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')\n\n\n# Mask detection and Face recognition (in)\na= False\nwhile True: \n frame0 = webcam0.get_frame()\n mask = detector_mask.detectMultiScale(frame0, \n scaleFactor=1.2, \n minNeighbors=5, \n minSize=(100, 100),\n maxSize=(150, 150),\n flags=cv2.CASCADE_SCALE_IMAGE)\n for(x1,y1,x2,y2) in mask:\n cv2.rectangle(frame0,(x1,y1),(x1+x2,y1+y2),(0,255,0),2)\n cv2.putText(frame0, 'Using Mask',(x1, y1+y2 + 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (255,255,255), 2)\n a= True\n faces_coord = detector.detect(frame0, False) # detect more than one face\n col_names = ['Name','Date','Time','Mask']\n attendance = pd.DataFrame(columns = col_names)\n if len(faces_coord):\n faces = normalize_faces(frame0, faces_coord) # norm pipeline\n for i, face in enumerate(faces): # for each detected face\n collector = cv2.face.StandardCollector_create()\n rec_lbph.predict_collect(face, collector) # chosen algorithm\n conf = collector.getMinDist()\n pred = collector.getMinLabel()\n threshold = 76 # eigen, fisher, lbph [mean 3375,1175,65] [high lbph 76]\n print (\"Prediction Entry: \" + labels_dic[pred].capitalize() + \"\\nConfidence Entry: \" + str(round(conf))+ \"\\nMask : \" + str(a))\n \n if conf > threshold: # apply threshold\n cv2.putText(frame0, labels_dic[pred].capitalize(),\n (faces_coord[i][0], faces_coord[i][1] - 20),\n cv2.FONT_HERSHEY_DUPLEX, 1.0, (102, 255, 0), 1)\n attendance.loc[len(attendance)] = [labels_dic[pred],date,timeStamp,str(a)]\n Hour,Minute,Second=timeStamp.split(\":\")\n fileName=\"attendancein\\Attendance_\"+labels_dic[pred]+\"-\"+date+\"_\"+Hour+\"-\"+Minute+\"-\"+Second+\".csv\" # write to output file (in)\n attendance.to_csv(fileName,index=False)\n \n else:\n cv2.putText(frame0, \"Unknown\",\n (faces_coord[i][0], faces_coord[i][1] - 10),\n cv2.FONT_HERSHEY_DUPLEX, 1.0, (66, 55, 245), 1)\n draw_rectangle(frame0, faces_coord) # rectangle around face\n cv2.putText(frame0, \"ESC to exit\", (5, frame0.shape[0] - 5),\n cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"Entry Cam\", frame0) # live feed in external\n if cv2.waitKey(33) & 0xFF == 27:\n cv2.destroyAllWindows()\n break\n \n# mask detection and face recognition (out) \n \n frame1 = webcam1.get_frame()\n mask = detector_mask.detectMultiScale(frame1, \n scaleFactor=1.2, \n minNeighbors=5, \n minSize=(100, 100),\n maxSize=(150, 150),\n flags=cv2.CASCADE_SCALE_IMAGE)\n for(x1,y1,x2,y2) in mask:\n cv2.rectangle(frame1,(x1,y1),(x1+x2,y1+y2),(0,255,0),2)\n cv2.putText(frame1, 'Using Mask',(x1, y1+y2 + 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (255,255,255), 2)\n \n faces_coord = detector.detect(frame1, False) # detect more than one face\n col_names = ['Name','Date','Time']\n attendance = pd.DataFrame(columns = col_names)\n if len(faces_coord):\n faces = normalize_faces(frame1, faces_coord) # norm pipeline\n for i, face in enumerate(faces): # for each detected face\n collector = cv2.face.StandardCollector_create()\n rec_lbph.predict_collect(face, collector) # chosen algorithm\n conf = collector.getMinDist()\n pred = collector.getMinLabel()\n threshold = 75 # eigen, fisher, lbph [mean 3375,1175,65] [high lbph 76]\n print (\"Prediction Exit: \" + labels_dic[pred].capitalize() + \"\\nConfidence Exit: \" + str(round(conf)))\n \n if conf > threshold: # apply threshold\n cv2.putText(frame1, labels_dic[pred].capitalize(),\n (faces_coord[i][0], faces_coord[i][1] - 20),\n cv2.FONT_HERSHEY_DUPLEX, 1.0, (102, 255, 0), 1)\n attendance.loc[len(attendance)] = [labels_dic[pred],date,timeStamp]\n Hour,Minute,Second=timeStamp.split(\":\")\n fileName=\"attendanceout\\Attendance_\"+labels_dic[pred]+\"-\"+date+\"_\"+Hour+\"-\"+Minute+\"-\"+Second+\".csv\" # write to output file (out)\n attendance.to_csv(fileName,index=False)\n \n else:\n cv2.putText(frame1, \"Unknown\",\n (faces_coord[i][0], faces_coord[i][1] - 10),\n cv2.FONT_HERSHEY_DUPLEX, 1.0, (66, 55, 245), 1)\n draw_rectangle(frame1, faces_coord) # rectangle around face\n cv2.putText(frame1, \"ESC to exit\", (5, frame1.shape[0] - 5),\n cv2.FONT_HERSHEY_DUPLEX, 1, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"Exit Cam\", frame1) # live feed in external\n if cv2.waitKey(33) & 0xFF == 27:\n cv2.destroyAllWindows()\n break\n \ndel webcam0\ndel webcam1\n", "id": "3619991", "language": "Python", "matching_score": 8.222331047058105, "max_stars_count": 0, "path": "3- Recongize - Detection.py" }, { "content": "\nimport cv2\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\n\n\n# Images Properties\ndef plt_show(image, title=\"\"):\n if len(image.shape) == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.axis(\"off\")\n plt.title(title)\n plt.imshow(image, cmap=\"Greys_r\")\n plt.show()\n\n\n# Face Detection \nclass FaceDetector(object):\n def __init__(self, xml_path):\n self.classifier = cv2.CascadeClassifier(xml_path)\n \n def detect(self, image, biggest_only=True):\n scale_factor = 1.2\n min_neighbors = 5\n min_size = (75, 75)\n biggest_only = True\n flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else cv2.CASCADE_SCALE_IMAGE\n faces_coord = self.classifier.detectMultiScale(image,\n scaleFactor=scale_factor,\n minNeighbors=min_neighbors,\n minSize=min_size,\n flags=flags)\n return faces_coord\n\n\n# Video Camera\nclass VideoCamera(object):\n def __init__(self, index=1):\n self.video = cv2.VideoCapture(index)\n self.index = index\n print (self.video.isOpened())\n\n def __del__(self):\n self.video.release()\n \n def get_frame(self, in_grayscale=False):\n _, frame = self.video.read()\n if in_grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return frame\n\n\n# Crop Images\ndef cut_faces(image, faces_coord):\n faces = []\n \n for (x, y, w, h) in faces_coord:\n w_rm = int(0.3 * w / 2)\n faces.append(image[y: y + h, x + w_rm: x + w - w_rm])\n \n return faces\n\n\n# Normalize Images\ndef normalize_intensity(images):\n images_norm = []\n for image in images:\n is_color = len(image.shape) == 3 \n if is_color:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n images_norm.append(cv2.equalizeHist(image))\n return images_norm\n\n\n# resize images\ndef resize(images, size=(100, 100)):\n images_norm = []\n for image in images:\n if image.shape < size:\n image_norm = cv2.resize(image, size, \n interpolation=cv2.INTER_AREA)\n else:\n image_norm = cv2.resize(image, size, \n interpolation=cv2.INTER_CUBIC)\n images_norm.append(image_norm)\n\n return images_norm\n\n# normalize faces\ndef normalize_faces(frame, faces_coord):\n faces = cut_faces(frame, faces_coord)\n faces = normalize_intensity(faces)\n faces = resize(faces)\n return faces\n\n\n# rectangle line\ndef draw_rectangle(image, coords):\n for (x, y, w, h) in coords:\n w_rm = int(0.2 * w / 2) \n cv2.rectangle(image, (x + w_rm, y), (x + w - w_rm, y + h), \n (200, 200, 0), 4)\n\n# acquire images from dataset\ndef collect_dataset():\n images = []\n labels = []\n labels_dic = {}\n members = [person for person in os.listdir(\"members/\")]\n for i, person in enumerate(members):\n labels_dic[i] = person\n for image in os.listdir(\"members/\" + person):\n images.append(cv2.imread(\"members/\" + person + '/' + image, \n 0))\n labels.append(i)\n return (images, np.array(labels), labels_dic)\n\nimages, labels, labels_dic = collect_dataset()\n\n\n# train image (algorithm sets)\nrec_eig = cv2.face.EigenFaceRecognizer_create()\nrec_eig.train(images, labels)\n\nrec_fisher = cv2.face.FisherFaceRecognizer_create()\nrec_fisher.train(images, labels)\n\nrec_lbph = cv2.face.LBPHFaceRecognizer_create()\nrec_lbph.train(images, labels)\n\nprint (\"Models Trained Succesfully\")\n\n\n# Face Recognition\nwebcam = VideoCamera(0)\nframe = webcam.get_frame()\ndetector = FaceDetector(\"xml/frontal_face.xml\")\nframe = webcam.get_frame()\nfaces_coord = detector.detect(frame)\nfaces = normalize_faces(frame, faces_coord)\nface = faces[0]\nplt_show(face) \n\n\ncollector = cv2.face.StandardCollector_create()\n\nrec_eig.predict_collect(face, collector)\nconf = collector.getMinDist()\npred = collector.getMinLabel()\n\nprint (\"Eigen Faces -> Prediction: \" + labels_dic[pred] +\" Confidence: \" + str(round(conf)))\n\nrec_fisher.predict_collect(face, collector)\nconf = collector.getMinDist()\npred = collector.getMinLabel()\n\nprint (\"Fisher Faces -> Prediction: \" +labels_dic[pred] + \" Confidence: \" + str(round(conf)))\n\nrec_lbph.predict_collect(face, collector)\nconf = collector.getMinDist()\npred = collector.getMinLabel()\n\nprint (\"LBPH Faces -> Prediction: \" +labels_dic[pred] + \" Confidence: \" + str(round(conf)))\n\n", "id": "12223795", "language": "Python", "matching_score": 7.166155815124512, "max_stars_count": 0, "path": "2- Model Train.py" }, { "content": "\nimport cv2\nimport os\nfrom matplotlib import pyplot as plt\n\n\n# new thread\ncv2.startWindowThread()\n\ndef plt_show(image, title=\"\"):\n if len(image.shape) == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n plt.axis(\"off\")\n plt.title(title)\n plt.imshow(image, cmap=\"Greys_r\")\n plt.show()\n\n\nwebcam = cv2.VideoCapture(0)\n_, frame = webcam.read()\nwebcam.release()\nplt_show(frame) \n\n\n# Face-Detection\ndetector = cv2.CascadeClassifier(\"xml/frontal_face.xml\")\n\nscale_factor = 1.2\nmin_neighbors = 5\nmin_size = (50, 50)\nbiggest_only = True\nflags = cv2.CASCADE_FIND_BIGGEST_OBJECT | cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else cv2.CASCADE_SCALE_IMAGE\n \nfaces_coord = detector.detectMultiScale(frame,\n scaleFactor=scale_factor,\n minNeighbors=min_neighbors,\n minSize=min_size,\n flags=flags)\n\n# Video-Camera\nclass VideoCamera(object):\n def __init__(self, index=0):\n self.video = cv2.VideoCapture(index)\n self.index = index\n print (self.video.isOpened())\n\n def __del__(self):\n self.video.release()\n \n def get_frame(self, in_grayscale=False):\n _, frame = self.video.read()\n if in_grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return frame\n\n# Face Detection\nclass FaceDetector(object):\n def __init__(self, xml_path):\n self.classifier = cv2.CascadeClassifier(xml_path)\n \n def detect(self, image, biggest_only=True):\n scale_factor = 1.2\n min_neighbors = 5\n min_size = (75, 75)\n biggest_only = True\n flags = cv2.CASCADE_FIND_BIGGEST_OBJECT | cv2.CASCADE_DO_ROUGH_SEARCH if biggest_only else cv2.CASCADE_SCALE_IMAGE\n faces_coord = self.classifier.detectMultiScale(image,\n scaleFactor=scale_factor,\n minNeighbors=min_neighbors,\n minSize=min_size,\n flags=flags)\n return faces_coord\n\n# Live Face Detection\nwebcam = VideoCamera(0) # front camera\ndetector = FaceDetector(\"xml/frontal_face.xml\")\n\n# crop images\ndef cut_faces(image, faces_coord):\n faces = []\n \n for (x, y, w, h) in faces_coord:\n w_rm = int(0.2 * w / 2)\n faces.append(image[y: y + h, x + w_rm: x + w - w_rm])\n \n return faces\n\n# normalize images\ndef normalize_intensity(images):\n images_norm = []\n for image in images:\n is_color = len(image.shape) == 3 \n if is_color:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n images_norm.append(cv2.equalizeHist(image))\n return images_norm\n\n# resize images\ndef resize(images, size=(100, 100)):\n images_norm = []\n for image in images:\n if image.shape < size:\n image_norm = cv2.resize(image, size, \n interpolation = cv2.INTER_AREA)\n else:\n image_norm = cv2.resize(image, size, \n interpolation = cv2.INTER_CUBIC)\n images_norm.append(image_norm)\n\n return images_norm \n\n# normalize face\ndef normalize_faces(frame, faces_coord):\n faces = cut_faces(frame, faces_coord)\n faces = normalize_intensity(faces)\n faces = resize(faces)\n return faces\n\n# rectangle line\ndef draw_rectangle(image, coords):\n for (x, y, w, h) in coords:\n w_rm = int(0.2 * w / 2) \n cv2.rectangle(image, (x + w_rm, y), (x + w - w_rm, y + h), \n (200, 200, 0), 4)\n\n\n# get and save image \nfolder = \"members/\" + input('Name: ').lower() # input name\ncv2.namedWindow(\"Save Image\", cv2.WINDOW_AUTOSIZE)\nif not os.path.exists(folder):\n os.mkdir(folder)\n counter = 1\n timer = 0\n while counter < 11 : # take 10 pictures\n frame = webcam.get_frame()\n faces_coord = detector.detect(frame) # detector\n if len(faces_coord) and timer % 700 == 50: # timer\n faces = normalize_faces(frame, faces_coord) # norm pipeline\n cv2.imwrite(folder + '/' + str(counter) + '.jpg', faces[0])\n plt_show(faces[0], \"Images Saved:\" + str(counter))\n counter += 1\n draw_rectangle(frame, faces_coord) # rectangle around face\n cv2.imshow(\"Save Image\", frame) # show in external\n cv2.waitKey(50)\n timer += 50\n cv2.destroyAllWindows()\nelse:\n print (\"This name already exists.\")\n\ndel webcam", "id": "4601822", "language": "Python", "matching_score": 1.0896227359771729, "max_stars_count": 0, "path": "1- Collect-Image.py" }, { "content": "import sys\nimport os\nfrom tkinter import *\nfrom tkinter import font\nfrom tkinter.font import BOLD\nimport tkinter.messagebox\nfrom PIL import Image, ImageTk\n\n\nroot = Tk()\nroot.title('Smart Educational Institute')\nroot.iconbitmap('D:\\FYP FINAL\\seilogo.ico')\nroot.geometry(\"700x500\")\n\nroot.config(bg='yellow')\n\nimg = ImageTk.PhotoImage(file=\"img.jpg\")\nlab = Label(\n root,\n image=img\n)\nlab.place(x=0, y=0,)\n\n\ndef collect():\n os.system('\"%s\"' % '1- Collect-Image.py')\n\ndef model():\n os.system('\"%s\"' % '2- Model Train.py')\n\ndef attendance():\n os.system('\"%s\"' % '3- Recongize - Detection.py')\n\ndef save():\n os.system('\"%s\"' % '4 - Save Attendance.py')\n\ndef mongoface():\n os.system('\"%s\"' % '5- MongoSend.py')\n\ndef smartcard():\n os.system('\"%s\"' % '6- Save RFID Attendance.py')\n\ndef msmartcard():\n os.system('\"%s\"' % '7- MongoSend RFID.py')\n\n\n\nstatus=Label(root,text=\"FYP F204 -Bahria University Karachi Campus\", relief=SUNKEN,font='Courier 15 italic')\nstatus.pack(side=BOTTOM,fill=X)\n\nmy_label =Label(root,text=\" Welcome to Smart Education Institute\" ,relief=GROOVE,font='Times 15 bold')\nmy_label.pack(pady=10)\n\nmy_label2 =Label(root,text=\"----------------Face Attendance----------------\",font='Helvetica 12 bold',) \nmy_label2.pack(pady=10)\n\nB1=tkinter.Button(root,text=\"01 Enter New Student\",command= collect)\nB1.pack(pady=5)\n\nB2=tkinter.Button(root,text=\"02 Train Model \",command= model)\nB2.pack(pady=5)\n\nB3=tkinter.Button(root,text=\"03 Face Attendance & Mask Detection\",command= attendance)\nB3.pack(pady=5)\n\nB3=tkinter.Button(root,text=\"04 Save Face Attendace File\",command= save)\nB3.pack(pady=5)\n\nB4=tkinter.Button(root,text=\"05 Upload Face Attendance File\",command= mongoface)\nB4.pack(pady=5)\n\nmy_label3 =Label(root,text=\"----------------Smart Card----------------\",font='Helvetica 12 bold')\nmy_label3.pack(pady=10)\n\nB5=tkinter.Button(root,text=\"01 -Save Smart Card Attendance\",command=smartcard)\nB5.pack(pady=5)\nB5=tkinter.Button(root,text=\"02 -Upload Smart Card Attendance\",command= msmartcard)\nB5.pack(pady=5)\n\n\nroot.mainloop()\n", "id": "9454806", "language": "Python", "matching_score": 1.3757866621017456, "max_stars_count": 0, "path": "0-Window.py" }, { "content": "import json\nimport csv\nimport urllib.request\nimport time\nwith urllib.request.urlopen(\"http://localhost/test/test.php\") as url:\n data = json.loads(url.read().decode())\n\ndate = time.strftime('%Y-%m-%d')\nfname = \"rfidattendance\\RFID_Attendance_Of_\"+date+\".csv\"\n\nwith open(fname, \"w\") as file:\n csv_file = csv.writer(file,lineterminator='\\n')\n csv_file.writerow([\"id\",\"username\",\"serialnumber\",\"card_uid\",\"device_dep\",\"checkindate\",\"timein\",\"timeout\",\"card_out\"])\n for item in data:\n csv_file.writerow([item['id'],item['username'],item['serialnumber'],item['card_uid'],item['device_dep'],item['checkindate'],item['timein'],item['timeout'],item['card_out']])\n", "id": "2298504", "language": "Python", "matching_score": 1.111478328704834, "max_stars_count": 0, "path": "6- Save RFID Attendance.py" }, { "content": "try:\n import pymongo\n from pymongo import MongoClient\n import pandas as pd\n import json\n import time\nexcept Exception as e:\n print(\"Some Modules are Missing \")\n\ndate = time.strftime('%Y-%m-%d')\n\nclass MongoDB(object):\n\n def __init__(self, dBName=None, collectionName=None):\n\n self.dBName = dBName\n self.collectionName = collectionName\n\n #self.client = MongoClient(\"localhost\", 27017, maxPoolSize=50)\n self.client = MongoClient(\"\", 27017, maxPoolSize=50) #Insert MongoDb connection string in \"\"\"\n\n self.DB = self.client[self.dBName]\n self.collection = self.DB[self.collectionName]\n\n\n def InsertData(self, path=r'attendanceresult\\Attendance_Result_'+date+'.csv'):\n \"\"\"\n :param path: Path os csv File\n :return: None\n \"\"\"\n\n df = pd.read_csv(path)\n data = df.to_dict('records')\n\n self.collection.insert_many(data, ordered=False)\n print(\"All the Data has been Exported to Mongo DB Server .... \")\n\nif __name__ == \"__main__\":\n mongodb = MongoDB(dBName = 'myFirstDatabase', collectionName='Attendance_Of_'+date)\n mongodb.InsertData(path=r'attendanceresult\\Attendance_Result_'+date+'.csv')", "id": "367770", "language": "Python", "matching_score": 2.4252359867095947, "max_stars_count": 0, "path": "5- MongoSend.py" }, { "content": "import pandas as pd\nimport glob\nimport os\nimport time\n\n\ndate = time.strftime('%Y-%m-%d')\n\n# attendance in (entry)\npath = r'D:\\SEI-Mask-FaceAttendance\\attendancein' # path to read save entry\nall_files = glob.glob(os.path.join(path, \"*.csv\")) # advisable to use os.path.join as this makes concatenation OS independent\ndf_file = (pd.read_csv(f) for f in all_files)\nconc_df1 = pd.concat(df_file, ignore_index=True)\nprint(\"Attendance In:\\n\", conc_df1)\n\n# attendance out (exit)\npath = r'D:\\SEI-Mask-FaceAttendance\\attendanceout' # path to read save exit\nall_files = glob.glob(os.path.join(path, \"*.csv\")) # advisable to use os.path.join as this makes concatenation OS independent\ndf_file = (pd.read_csv(f) for f in all_files)\nconc_df2 = pd.concat(df_file, ignore_index=True)\nprint(\"Attendance Out:\\n\",conc_df2)\n\n\nresult = pd.merge(conc_df1,conc_df2, on=['Name'], how='left')\nresult.columns = ['Name','DateIn','TimeIn','Mask','DateOut','TimeOut']\ndecimals = 2\nresult['Engage-Min'] = (pd.to_datetime(result['TimeOut']) - pd.to_datetime(result['TimeIn'])).astype('>m8[m]').astype(int)\nresult['Engage-Hrs'] = (result['Engage-Min']/60).round(decimals)\nprint (\"Attendance In & Out:\\n\", result) # print result both entry & exit \nresult.to_csv(\"attendanceresult\\Attendance_Result_\"+date+\".csv\", index=True, header=True) # path to save print result", "id": "9603422", "language": "Python", "matching_score": 2.3225228786468506, "max_stars_count": 0, "path": "4 - Save Attendance.py" } ]
2.322523
samuelakanni
[ { "content": "# Generated by Django 3.0.5 on 2021-02-28 00:12\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('student', '0002_remove_student_status'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('course_name', models.CharField(max_length=50)),\n ('question_number', models.PositiveIntegerField()),\n ('total_marks', models.PositiveIntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Result',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('marks', models.PositiveIntegerField()),\n ('date', models.DateTimeField(auto_now=True)),\n ('exam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.Course')),\n ('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.Student')),\n ],\n ),\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('marks', models.PositiveIntegerField()),\n ('question', models.CharField(max_length=600)),\n ('option1', models.CharField(max_length=200)),\n ('option2', models.CharField(max_length=200)),\n ('option3', models.CharField(max_length=200)),\n ('option4', models.CharField(max_length=200)),\n ('answer', models.CharField(choices=[('Option1', 'Option1'), ('Option2', 'Option2'), ('Option3', 'Option3'), ('Option4', 'Option4')], max_length=200)),\n ('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.Course')),\n ],\n ),\n ]\n", "id": "8952205", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "quiz/migrations/0001_initial.py" }, { "content": "from django.db import models\n\nfrom student.models import Student\n\n\nclass Course(models.Model):\n objects = None\n course_name = models.CharField(max_length=50)\n question_number = models.PositiveIntegerField()\n total_marks = models.PositiveIntegerField()\n\n def __str__(self):\n return self.course_name\n\n\nclass Question(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n marks = models.PositiveIntegerField()\n question = models.CharField(max_length=600)\n option1 = models.CharField(max_length=200)\n option2 = models.CharField(max_length=200)\n option3 = models.CharField(max_length=200)\n option4 = models.CharField(max_length=200)\n cat = (('Option1', 'Option1'), ('Option2', 'Option2'), ('Option3', 'Option3'), ('Option4', 'Option4'))\n answer = models.CharField(max_length=200, choices=cat)\n\n\nclass Result(models.Model):\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n exam = models.ForeignKey(Course, on_delete=models.CASCADE)\n marks = models.PositiveIntegerField()\n date = models.DateTimeField(auto_now=True)\n", "id": "6705083", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "quiz/models.py" } ]
0
ual
[ { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport orca\n\nfrom activitysim.core import skim as askim\nfrom activitysim.core.util import quick_loc_df\n\nlogger = logging.getLogger('activitysim')\n\n\nclass NetworkLOS(object):\n\n\n def __init__(self, taz, maz, tap, maz2maz, maz2tap,\n taz_skim_dict, tap_skim_dict):\n\n self.taz_df = taz\n self.maz_df = maz\n self.tap_df = tap\n\n # print \"maz_df unique maz\", len(self.maz_df.index)\n\n # maz2maz_df\n self.maz2maz_df = maz2maz\n # create single index for fast lookup\n m = maz2maz.DMAZ.max() + 1\n maz2maz['i'] = maz2maz.OMAZ * m + maz2maz.DMAZ\n maz2maz.set_index('i', drop=True, inplace=True, verify_integrity=True)\n self.maz2maz_cardinality = m\n\n # maz2tap_df\n self.maz2tap_df = maz2tap\n # create single index for fast lookup\n m = maz2tap.TAP.max() + 1\n maz2tap['i'] = maz2tap.MAZ * m + maz2tap.TAP\n maz2tap.set_index('i', drop=True, inplace=True, verify_integrity=True)\n self.maz2tap_cardinality = m\n\n self.taz_skim_dict = taz_skim_dict\n self.taz_skim_stack = askim.SkimStack(taz_skim_dict)\n\n self.tap_skim_dict = tap_skim_dict\n self.tap_skim_stack = askim.SkimStack(tap_skim_dict)\n\n def get_taz(self, taz_list, attribute):\n return quick_loc_df(taz_list, self.taz_df, attribute)\n\n def get_tap(self, tap_list, attribute):\n return quick_loc_df(tap_list, self.tap_df, attribute)\n\n def get_maz(self, maz_list, attribute):\n return quick_loc_df(maz_list, self.maz_df, attribute)\n\n def get_tazpairs(self, otaz, dtaz, key):\n skim = self.taz_skim_dict.get(key)\n s = skim.get(otaz, dtaz)\n return s\n\n def get_tazpairs3d(self, otaz, dtaz, dim3, key):\n s = self.taz_skim_stack.lookup(otaz, dtaz, dim3, key)\n return s\n\n def get_tappairs(self, otap, dtap, key):\n skim = self.tap_skim_dict.get(key)\n s = skim.get(otap, dtap)\n\n n = (skim.data < 0).sum()\n p = (skim.data >= 0).sum()\n nan = np.isnan(skim.data).sum()\n print \"get_tappairs %s %s neg %s po %s nan\" % (key, n, p, nan)\n\n return s\n\n def get_tappairs3d(self, otap, dtap, dim3, key):\n s = self.tap_skim_stack.lookup(otap, dtap, dim3, key)\n return s\n\n def get_mazpairs(self, omaz, dmaz, attribute):\n\n # # this is slower\n # s = pd.merge(pd.DataFrame({'OMAZ': omaz, 'DMAZ': dmaz}),\n # self.maz2maz_df,\n # how=\"left\")[attribute]\n\n # synthetic index method i : omaz_dmaz\n i = np.asanyarray(omaz) * self.maz2maz_cardinality + np.asanyarray(dmaz)\n s = quick_loc_df(i, self.maz2maz_df, attribute)\n\n # FIXME - no point in returning series? unless maz and tap have same index?\n return np.asanyarray(s)\n\n def get_maztappairs(self, maz, tap, attribute):\n\n # synthetic i method : maz_tap\n i = np.asanyarray(maz) * self.maz2tap_cardinality + np.asanyarray(tap)\n s = quick_loc_df(i, self.maz2tap_df, attribute)\n\n # FIXME - no point in returning series? unless maz and tap have sme index?\n return np.asanyarray(s)\n\n def get_taps_mazs(self, maz, attribute=None, filter=None):\n\n # we return multiple tap rows for each maz, so we add an 'idx' row to tell caller\n # which maz-taz rows belong to which row in the original maz list\n # i.e. idx contains the index of the original maz series so we know which rows belong together\n # if maz is a series, then idx has the original maz series index values\n # otherwise it has the 0-based integer offset of the original maz\n\n if filter:\n maz2tap_df = self.maz2tap_df[ pd.notnull(self.maz2tap_df[filter]) ]\n else:\n maz2tap_df = self.maz2tap_df\n\n if attribute:\n # FIXME - not sure anyone needs this feature\n maz2tap_df = maz2tap_df[ ['MAZ', 'TAP', attribute]]\n # filter out null attribute rows\n maz2tap_df = maz2tap_df[ pd.notnull(self.maz2tap_df[attribute]) ]\n else:\n maz2tap_df = maz2tap_df[['MAZ', 'TAP']]\n\n if isinstance(maz, pd.Series):\n # idx based on index of original maz series\n maz_df = pd.DataFrame({'MAZ': maz, 'idx': maz.index})\n else:\n # 0-based index of original maz\n maz_df = pd.DataFrame({'MAZ': maz, 'idx': range(len(maz))})\n\n df = pd.merge(maz_df, maz2tap_df, how=\"inner\")\n\n return df\n\n\n def get_tappairs_mazpairs(network_los, omaz, dmaz, ofilter=None, dfilter=None):\n\n # get nearby boarding TAPs to origin\n omaz_btap_df = network_los.get_taps_mazs(omaz, ofilter)\n\n # get nearby alighting TAPs to destination\n dmaz_atap_df = network_los.get_taps_mazs(dmaz, dfilter)\n\n # expand to one row for every btab-atap pair\n atap_btap_df = pd.merge(omaz_btap_df, dmaz_atap_df, on='idx', how=\"inner\")\n atap_btap_df.rename(\n columns={'MAZ_x': 'omaz', 'TAP_x': 'btap', 'MAZ_y': 'dmaz', 'TAP_y': 'atap'},\n inplace=True)\n\n return atap_btap_df\n\n\n def __str__(self):\n\n return \"\\n\".join((\n \"taz (%s)\" % len(self.taz_df.index),\n \"maz (%s)\" % len(self.maz_df.index),\n \"tap (%s)\" % len(self.tap_df.index),\n \"maz2maz (%s)\" % len(self.maz2maz_df.index),\n \"maz2tap (%s)\" % len(self.maz2tap_df.index),\n \"taz_skim_dict (%s keys)\" % self.taz_skim_dict.key_count(),\n \"tap_skim_dict (%s keys)\" % self.tap_skim_dict.key_count(),\n \"taz_skim_stack (%s keys)\" % self.taz_skim_stack.key_count(),\n \"tap_skim_stack (%s keys)\" % self.tap_skim_stack.key_count(),\n ))\n\[email protected](cache=True)\ndef network_los(store, taz_skim_dict, tap_skim_dict):\n\n\n taz = store[\"TAZ\"]\n maz = store[\"MAZ\"]\n tap = store[\"TAP\"]\n maz2maz = store[\"MAZtoMAZ\"]\n maz2tap = store[\"MAZtoTAP\"]\n\n\n print \"taz index %s columns %s\" % (taz.index.name, taz.columns.values)\n print \"tap index %s columns %s\" % (tap.index.name, tap.columns.values)\n print \"maz index %s columns %s\" % (maz.index.name, maz.columns.values)\n\n print \"maz2maz index %s columns %s\" % (maz2maz.index.name, maz2maz.columns.values)\n print \"maz2tap index %s columns %s\" % (maz2tap.index.name, maz2tap.columns.values)\n\n # print \"tap index %s columns %s\" % (tap.index.name, tap.columns.values)\n # print \"tap_skim_offsets index %s columns %s\" % (tap_skim_offsets.index.name, tap_skim_offsets.columns.values)\n\n nlos = NetworkLOS(taz, maz, tap, maz2maz, maz2tap, taz_skim_dict, tap_skim_dict)\n\n return nlos\n\n", "id": "2427697", "language": "Python", "matching_score": 3.0885674953460693, "max_stars_count": 0, "path": "activitysim/example_multi/extensions/los.py" }, { "content": "\nimport logging\n\nimport orca\nfrom activitysim import abm\nfrom activitysim.core import tracing\nfrom activitysim.core import simulate as asim\nimport pandas as pd\nimport numpy as np\nimport os\nimport time\n\n\n# you will want to configure this with the locations of the canonical datasets\nDATA_REPO = \"C:/projects/sandag-asim/toRSG/output/\"\nDATA_REPO = \"E:/activitysim/project/output/\"\nDATA_REPO = \"/Users/jeff.doyle/work/activitysim-data/sandag_zone/output/\"\n\n\nCOMPARE_RESULTS = False\n\ntracing.config_logger()\nlogger = logging.getLogger('activitysim')\n\n\[email protected]()\ndef output_dir():\n if not os.path.exists('output'):\n os.makedirs('output') # make directory if needed\n return 'output'\n\n\[email protected]()\ndef data_dir():\n return os.path.join(DATA_REPO)\n\n\nimport extensions\n\n\ndef print_elapsed_time(msg=None, t0=None):\n # FIXME - development debugging code to be removed\n t1 = time.time()\n if msg:\n t = t1 - (t0 or t1)\n msg = \"Time to execute %s : %s seconds (%s seconds)\" % (msg, t, round(t, 3))\n logger.info(msg)\n return time.time()\n\ndef print_elapsed_time_per_unit(msg, t0, divisor):\n unit = 1000.\n t1 = time.time()\n if msg:\n t = t1 - (t0 or t1)\n per_unit = unit * t / divisor\n msg = \"Time to execute %s : %s seconds (%s per unit)\" % (msg, round(t, 3), round(per_unit, 4))\n logger.info(msg)\n return time.time()\n\n\ndef get_taz(VECTOR_TEST_SIZE):\n # select some random rows with attributes\n taz_df = network_los.taz_df[~np.isnan(network_los.taz_df.terminal_time)]\n random_taz = taz_df.sample(VECTOR_TEST_SIZE, replace=True)\n result = network_los.get_taz(random_taz.index.values, 'terminal_time')\n\n if COMPARE_RESULTS:\n\n # Int64Index\n result2 = network_los.get_taz(random_taz.index, 'terminal_time')\n assert list(result) == list(result2)\n\n # Series\n result2 = network_los.get_taz(pd.Series(data=random_taz.index.values), 'terminal_time')\n assert list(result) == list(result2)\n\n return result\n\n\ndef get_tap(VECTOR_TEST_SIZE):\n tap_df = network_los.tap_df\n random_tap = tap_df.sample(VECTOR_TEST_SIZE, replace=True)\n result = network_los.get_tap(random_tap.index.values, 'TAZ')\n\n if COMPARE_RESULTS:\n\n # Int64Index\n result2 = network_los.get_tap(random_tap.index, 'TAZ')\n assert list(result) == list(result2)\n\n # Series\n result2 = network_los.get_tap(pd.Series(data=random_tap.index.values), 'TAZ')\n assert list(result) == list(result2)\n\n return result\n\n\ndef get_maz(VECTOR_TEST_SIZE):\n maz_df = network_los.maz_df\n random_maz = maz_df.sample(VECTOR_TEST_SIZE, replace=True)\n result = network_los.get_maz(random_maz.index.values, 'milestocoast')\n\n if COMPARE_RESULTS:\n\n # Int64Index\n result2 = network_los.get_maz(random_maz.index, 'milestocoast')\n assert list(result) == list(result2)\n\n # Series\n result2 = network_los.get_maz(pd.Series(data=random_maz.index.values), 'milestocoast')\n assert list(result) == list(result2)\n\n return result\n\n\ndef taz_skims(VECTOR_TEST_SIZE):\n taz_df = network_los.taz_df\n\n otaz = taz_df.sample(VECTOR_TEST_SIZE, replace=True).index\n dtaz = taz_df.sample(VECTOR_TEST_SIZE, replace=True).index\n tod = np.random.choice(['AM', 'PM'], VECTOR_TEST_SIZE)\n sov_time = network_los.get_tazpairs3d(otaz, dtaz, tod, 'SOV_TIME')\n\n\ndef tap_skims(VECTOR_TEST_SIZE):\n tap_df = network_los.tap_df\n\n otap = tap_df.sample(VECTOR_TEST_SIZE, replace=True).index\n dtap = tap_df.sample(VECTOR_TEST_SIZE, replace=True).index\n tod = np.random.choice(['AM', 'PM'], VECTOR_TEST_SIZE)\n local_bus_fare = network_los.get_tappairs3d(otap, dtap, tod, 'LOCAL_BUS_FARE')\n\n\ndef get_maz_pairs(VECTOR_TEST_SIZE):\n maz2maz_df = network_los.maz2maz_df.sample(VECTOR_TEST_SIZE, replace=True)\n omaz = maz2maz_df.OMAZ\n dmaz = maz2maz_df.DMAZ\n walk_actual = network_los.get_mazpairs(omaz, dmaz, 'walk_actual')\n\n\ndef get_maz_tap_pairs(VECTOR_TEST_SIZE):\n maz2tap_df = network_los.maz2tap_df.sample(VECTOR_TEST_SIZE, replace=True)\n maz = maz2tap_df.MAZ\n tap = maz2tap_df.TAP\n drive_distance = network_los.get_maztappairs(maz, tap, \"drive_distance\")\n\n\ndef get_taps_mazs(VECTOR_TEST_SIZE):\n\n maz_df = network_los.maz_df.sample(VECTOR_TEST_SIZE, replace=True)\n omaz = maz_df.index\n maz_tap_distance = network_los.get_taps_mazs(omaz)\n # when called with attribute, only returns rows with non-null attributes\n attribute = 'drive_distance'\n maz_tap_distance = network_los.get_taps_mazs(omaz, attribute)\n\n\ndef set_random_seed():\n np.random.seed(0)\n\n\n# uncomment the line below to set random seed so that run results are reproducible\nset_random_seed()\norca.add_injectable(\"set_random_seed\", set_random_seed)\n\ntracing.config_logger()\n\nt0 = print_elapsed_time()\n\ntaz_skim_stack = orca.get_injectable('taz_skim_dict')\nt0 = print_elapsed_time(\"load taz_skim_dict\", t0)\n\ntap_skim_stack = orca.get_injectable('tap_skim_dict')\nt0 = print_elapsed_time(\"load tap_skim_dict\", t0)\n\nnetwork_los = orca.get_injectable('network_los')\nt0 = print_elapsed_time(\"load network_los\", t0)\n\n# test sizes for all implemented methods\nVECTOR_TEST_SIZEs = (10000, 100000, 1000000, 5000000, 10000000, 20000000)\n\n#VECTOR_TEST_SIZEs = []\n\nfor size in VECTOR_TEST_SIZEs:\n\n logger.info(\"VECTOR_TEST_SIZE %s\" % size)\n\n get_taz(size)\n t0 = print_elapsed_time_per_unit(\"get_taz\", t0, size)\n\n get_tap(size)\n t0 = print_elapsed_time_per_unit(\"get_tap\", t0, size)\n\n get_maz(size)\n t0 = print_elapsed_time_per_unit(\"get_maz\", t0, size)\n\n taz_skims(size)\n t0 = print_elapsed_time_per_unit(\"taz_skims\", t0, size)\n\n tap_skims(size)\n t0 = print_elapsed_time_per_unit(\"tap_skims\", t0, size)\n\n get_maz_pairs(size)\n t0 = print_elapsed_time_per_unit(\"get_maz_pairs\", t0, size)\n\n get_maz_tap_pairs(size)\n t0 = print_elapsed_time_per_unit(\"get_maz_tap_pairs\", t0, size)\n\n get_taps_mazs(size)\n t0 = print_elapsed_time_per_unit(\"get_taps_mazs\", t0, size)\n\n\n# # taz_skims() test sizes; comment out all other methods\n# VECTOR_TEST_SIZEs = (68374080, 568231216)\n# for size in VECTOR_TEST_SIZEs:\n# logger.info(\"VECTOR_TEST_SIZE %s\" % size)\n# taz_skims(size)\n# t0 = print_elapsed_time_per_unit(\"taz_skims\", t0, size)\n#\n# # get_maz_pairs() test sizes; comment out all other methods\n# VECTOR_TEST_SIZEs = (5073493, 10146986, 12176383, 15220479, 1522047900)\n# for size in VECTOR_TEST_SIZEs:\n# logger.info(\"VECTOR_TEST_SIZE %s\" % size)\n# get_maz_pairs(size)\n# t0 = print_elapsed_time_per_unit(\"get_maz_pairs\", t0, size)\n\n\nt0 = print_elapsed_time()\norca.run([\"best_transit_path\"])\nt0 = print_elapsed_time(\"best_transit_path\", t0)\n", "id": "4511304", "language": "Python", "matching_score": 2.951770067214966, "max_stars_count": 0, "path": "activitysim/example_multi/simulation.py" }, { "content": "# Convert SANDAG network los files to ActivitySim NetworkLOS format\n# <NAME>, <EMAIL>, 02/03/17\n\nimport sys, os.path, openmatrix\nimport pandas as pd, numpy as np\n\n############################################################\n# paramaters\n############################################################\n\n# settings\nfolder = \"/Users/jeff.doyle/work/activitysim-data/sandag_zone/\"\noutput_folder = \"/Users/jeff.doyle/work/activitysim-data/sandag_zone/output/\"\n# folder = \"C:/projects/sandag-asim/toRSG/\"\n# output_folder = \"C:/projects/sandag-asim/toRSG/output/\"\n\noutputDataStoreFileName = \"NetworkData.h5\"\noutputBikeLogsumMatrixFileName = \"bikelogsum.omx\"\n\n\"\"\"\n TAZ - there are 4996 TAZs with ids 1..4996\n MAZ - there are 23002 MAZs with ids 1..23002\n TAP - there are 1754 TAPs with ids 1..2498\n\"\"\"\nif __name__ == \"__main__\":\n\n # for f in ['impprem_AM.omx', 'impprem_AMo.omx', 'implocl_AM.omx', 'implocl_AMo.omx']:\n # with openmatrix.open_file(folder + f) as input_skims:\n # print \"\\n%s shape %s mappings\" % (f, input_skims.shape()), input_skims.listMappings()\n #\n # for skimName in input_skims.listMatrices():\n #\n # s = np.asanyarray(input_skims[skimName])\n #\n # print \"%s: %s, %s, %s\" % (skimName, np.sum(s < 0), np.sum(s == 0), np.sum(s > 0))\n #\n # assert False\n\n #create output folder\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n # read CSVs and convert to NetworkLOS format\n # https://github.com/UDST/activitysim/wiki/Multiple-Zone-Systems-Design\n bikeMgraLogsum = pd.read_csv(folder + \"bikeMgraLogsum.csv\")\n bikeMgraLogsum.rename(\n columns={'i': 'OMAZ', 'j': 'DMAZ', 'logsum':'bike_logsum', 'time':'bike_time'},\n inplace=True)\n\n walkMgraTapEquivMinutes = pd.read_csv(folder + \"walkMgraTapEquivMinutes.csv\")\n walkMgraTapEquivMinutes.rename(columns={'mgra': 'MAZ', 'tap': 'TAP'}, inplace=True)\n\n walkMgraEquivMinutes = pd.read_csv(folder + \"walkMgraEquivMinutes.csv\")\n walkMgraEquivMinutes.rename(\n columns={'i': 'OMAZ', 'j': 'DMAZ', 'percieved':'walk_perceived', 'actual':'walk_actual', 'gain':'walk_gain'},\n inplace=True)\n\n mgra13_based_input2012 = pd.read_csv(folder + \"mgra13_based_input2012.csv\")\n mgra13_based_input2012.rename(columns={'mgra': 'MAZ', 'taz': 'TAZ'}, inplace=True)\n\n Accessam = pd.read_csv(folder + \"Accessam.csv\")\n Taps = pd.read_csv(folder + \"Taps.csv\")\n Tap_ptype = pd.read_csv(folder + \"Tap_ptype.csv\")\n Zone_term = pd.read_csv(folder + \"Zone_term.csv\")\n Zone_park = pd.read_csv(folder + \"Zone_park.csv\")\n\n # read taz and tap skim to get zone ids\n with openmatrix.open_file(folder + \"impdan_AM.omx\") as taz_skim:\n taz_numbers = taz_skim.mapentries(\"Origin\")\n\n with openmatrix.open_file(folder + \"implocl_AMo.omx\") as tap_skim:\n tap_numbers = tap_skim.mapentries(\"Rows\")\n\n # TAZ\n TAZ = pd.DataFrame({\"offset\": range(len(taz_numbers)), \"TAZ\": taz_numbers})\n assert len(np.intersect1d(TAZ.TAZ, Zone_term.TAZ)) == len(Zone_term.TAZ)\n TAZ = TAZ.merge(Zone_term, how=\"left\")\n assert len(np.intersect1d(TAZ.index, Zone_park.TAZ)) == len(Zone_park.TAZ)\n TAZ = TAZ.merge(Zone_park, how=\"left\")\n TAZ.set_index(\"TAZ\", drop=True, inplace=True, verify_integrity=True)\n\n # MAZ\n MAZ = mgra13_based_input2012\n MAZ.set_index(\"MAZ\", drop=True, inplace=True, verify_integrity=True)\n\n # TAP\n TAP = pd.DataFrame({\"offset\": range(len(tap_numbers)), 'TAP': tap_numbers})\n assert len(np.intersect1d(TAP.TAP, Tap_ptype.TAP)) == len(Tap_ptype.TAP)\n TAP = TAP.merge(Tap_ptype, how=\"outer\")\n TAP = TAP.merge(Taps, how=\"outer\")\n TAP.set_index(\"TAP\", drop=True, inplace=True, verify_integrity=True)\n\n # Set LOTTAZ and spatial join TAZ for each TAP\n TAP['LOTTAZ'] = TAP['TAZ']\n TAP['TAZ'] = MAZ.loc[TAP.MAZ].TAZ.values\n\n # MAZtoMAZ\n MAZtoMAZ = pd.merge(bikeMgraLogsum, walkMgraEquivMinutes, how=\"outer\", on=['OMAZ','DMAZ'])\n\n # MAZtoTAP\n\n # expand from TAZtoTAP to MAZtoTAP\n tapsPerTaz = Accessam.groupby('TAZ').count()['TAP']\n Accessam.set_index('TAZ', drop=False, inplace=True)\n Accessam = Accessam.loc[\n MAZ.TAZ] # explode - one row per (taz,tap) pair -> one row for each maz in taz of (taz,tap)\n MAZ['TAPS'] = tapsPerTaz.loc[MAZ.TAZ].tolist()\n Accessam['MAZ'] = np.repeat(MAZ.index.tolist(), MAZ.TAPS.tolist())\n Accessam.drop('TAZ', axis=1, inplace=True)\n\n # prefix column names\n Accessam.columns = \\\n [c if c in ['MAZ', 'TAP'] else 'drive_%s' % c for c in Accessam.columns.values]\n walkMgraTapEquivMinutes.columns = \\\n [c if c in ['MAZ', 'TAP'] else 'walk_%s' % c for c in walkMgraTapEquivMinutes.columns.values]\n\n MAZtoTAP = pd.merge(Accessam, walkMgraTapEquivMinutes, how=\"outer\", on=['MAZ', 'TAP'])\n\n print \"Accessam unique maz\", len(Accessam.MAZ.unique())\n # print \"Accessam null drive_time\", Accessam.drive_time.isnull().sum()\n # print \"Accessam null drive_distance\", Accessam.drive_distance.isnull().sum()\n print \"walkMgraTapEquivMinutes unique maz\", len(walkMgraTapEquivMinutes.MAZ.unique())\n print \"MAZtoTAP unique maz\", len(MAZtoTAP.MAZ.unique())\n\n print MAZtoTAP.head(10)\n\n\n # write tables\n TAP.to_hdf(output_folder + outputDataStoreFileName, \"TAP\", complib='zlib', complevel=7)\n TAZ.to_hdf(output_folder + outputDataStoreFileName, \"TAZ\")\n MAZ.to_hdf(output_folder + outputDataStoreFileName, \"MAZ\")\n MAZtoMAZ.to_hdf(output_folder + outputDataStoreFileName, \"MAZtoMAZ\")\n MAZtoTAP.to_hdf(output_folder + outputDataStoreFileName, \"MAZtoTAP\")\n\n print(\"created \" + output_folder + outputDataStoreFileName)\n\n ######### TAZ skim\n\n output_taz_skim_file = 'taz_skims.omx'\n output_taz_skims = openmatrix.open_file(output_folder + output_taz_skim_file, \"w\")\n\n taz_skim_manifest = {\n 'impdan_AM.omx': {'*SCST_AM': 'SOV_COST__AM', '*STM_AM (Skim)': 'SOV_TIME__AM'},\n 'impdan_PM.omx': {'*SCST_PM': 'SOV_COST__PM', '*STM_PM (Skim)': 'SOV_TIME__PM'},\n }\n for f, key_map in taz_skim_manifest.iteritems():\n with openmatrix.open_file(folder + f) as input_skims:\n print \"%s shape %s mappings\" % (f, input_skims.shape()), input_skims.listMappings()\n\n for m in input_skims.listMappings():\n assert input_skims.mapping(m).keys() == taz_numbers\n\n # for skimName in input_skims.listMatrices():\n # print skimName\n\n for in_key, out_key in key_map.iteritems():\n print \"copying %s %s to %s\" % (f, in_key, out_key)\n output_taz_skims[out_key] = input_skims[in_key]\n\n # read bikeTazLogsum as convert to OMX\n bikeTazLogsum = pd.read_csv(folder + \"bikeTazLogsum.csv\")\n bikeTazLogsum['index_i'] = TAZ.loc[bikeTazLogsum.i].offset.tolist()\n bikeTazLogsum['index_j'] = TAZ.loc[bikeTazLogsum.j].offset.tolist()\n\n # bike_logsum\n logsum = np.zeros([len(taz_numbers), len(taz_numbers)])\n logsum[bikeTazLogsum['index_i'], bikeTazLogsum['index_j']] = bikeTazLogsum.logsum\n\n print \"output_taz_skims shape %s skim shape %s\" % (output_taz_skims.shape(), logsum.shape)\n output_taz_skims['bike_logsum'] = logsum\n\n # bike_time\n time = np.zeros([len(taz_numbers), len(taz_numbers)])\n time[bikeTazLogsum['index_i'], bikeTazLogsum['index_j']] = bikeTazLogsum.time\n output_taz_skims['bike_time'] = time\n\n output_taz_skims.createMapping('default_mapping', entries=taz_numbers, overwrite=False)\n output_taz_skims.close()\n\n # print summary of what we built\n print \"\\n##### Summary of %s\" % output_taz_skim_file\n with openmatrix.open_file(output_folder + output_taz_skim_file) as skims:\n print skims\n print \"\\n#####\\n\"\n\n ######### TAP skims\n\n sets = [\"locl\",\"prem\"]\n for aSet in sets:\n \n output_tap_skim_file = 'tap_skims_' + aSet + '.omx'\n output_tap_skims = openmatrix.open_file(output_folder + output_tap_skim_file, \"w\")\n\n tap_skim_files = ['imp' + aSet + '_AMo.omx', 'imp' + aSet + '_PMo.omx' ]\n \n if aSet == \"locl\":\n \n tap_skim_manifest = {\n 'implocl_AMo.omx': {\n 'Fare': 'LOCAL_BUS_FARE__AM',\n 'Initial Wait Time': 'LOCAL_BUS_INITIAL_WAIT__AM',\n 'Number of Transfers': 'LOCAL_BUS_NUM_TRANSFERS__AM',\n 'Total IV Time': 'LOCAL_BUS_IVT__AM',\n 'Transfer Wait Time': 'LOCAL_BUS_TRANSFER_WAIT__AM',\n 'Walk Time': 'LOCAL_BUS_WALK_TIME__AM'\n },\n 'implocl_PMo.omx': {\n 'Fare': 'LOCAL_BUS_FARE__PM',\n 'Initial Wait Time': 'LOCAL_BUS_INITIAL_WAIT__PM',\n 'Number of Transfers': 'LOCAL_BUS_NUM_TRANSFERS__PM',\n 'Total IV Time': 'LOCAL_BUS_IVT__PM',\n 'Transfer Wait Time': 'LOCAL_BUS_TRANSFER_WAIT__PM',\n 'Walk Time': 'LOCAL_BUS_WALK_TIME__PM'\n }\n }\n \n elif aSet == \"prem\":\n \n tap_skim_manifest = {\n 'impprem_AMo.omx': {\n 'Fare': 'PREM_BUS_FARE__AM',\n 'IVT:BRT': 'PREM_BUS_IVT_BRT__AM',\n 'IVT:CR': 'PREM_BUS_IVT_CR__AM',\n 'IVT:EXP': 'PREM_BUS_IVT_EXP__AM',\n 'IVT:LB': 'PREM_BUS_IVT_LB__AM',\n 'IVT:LR': 'PREM_BUS_IVT_LR__AM',\n 'IVT:Sum': 'PREM_BUS_IVT_SUM__AM',\n 'Initial Wait Time': 'PREM_BUS_INITIAL_WAIT__AM',\n 'Length:BRT': 'PREM_BUS_LENGTH_BRT__AM',\n 'Length:CR': 'PREM_BUS_LENGTH_CR__AM',\n 'Length:EXP': 'PREM_BUS_LENGTH_EXP__AM',\n 'Length:LB': 'PREM_BUS_LENGTH_LB__AM',\n 'Length:LR': 'PREM_BUS_LENGTH_LR__AM',\n 'Main Mode': 'PREM_BUS_MAIN_MODE__AM',\n 'Number of Transfers': 'PREM_BUS_NUM_TRANSFERS__AM',\n 'Transfer Wait Time': 'PREM_BUS_TRANSFER_WAIT__AM',\n 'Walk Time': 'PREM_BUS_WALK_TIME__AM'\n },\n 'impprem_PMo.omx': {\n 'Fare': 'PREM_BUS_FARE__PM',\n 'IVT:BRT': 'PREM_BUS_IVT_BRT__PM',\n 'IVT:CR': 'PREM_BUS_IVT_CR__PM',\n 'IVT:EXP': 'PREM_BUS_IVT_EXP__PM',\n 'IVT:LB': 'PREM_BUS_IVT_LB__PM',\n 'IVT:LR': 'PREM_BUS_IVT_LR__PM',\n 'IVT:Sum': 'PREM_BUS_IVT_SUM__PM',\n 'Initial Wait Time': 'PREM_BUS_INITIAL_WAIT__PM',\n 'Length:BRT': 'PREM_BUS_LENGTH_BRT__PM',\n 'Length:CR': 'PREM_BUS_LENGTH_CR__PM',\n 'Length:EXP': 'PREM_BUS_LENGTH_EXP__PM',\n 'Length:LB': 'PREM_BUS_LENGTH_LB__PM',\n 'Length:LR': 'PREM_BUS_LENGTH_LR__PM',\n 'Main Mode': 'PREM_BUS_MAIN_MODE__PM',\n 'Number of Transfers': 'PREM_BUS_NUM_TRANSFERS__PM',\n 'Transfer Wait Time': 'PREM_BUS_TRANSFER_WAIT__PM',\n 'Walk Time': 'PREM_BUS_WALK_TIME__PM'\n }\n }\n \n for f, key_map in tap_skim_manifest.iteritems():\n with openmatrix.open_file(folder + f) as input_skims:\n print \"%s shape %s mappings\" % (f, input_skims.shape()), input_skims.listMappings()\n \n for m in input_skims.listMappings():\n assert input_skims.mapping(m).keys() == tap_numbers\n \n # for skimName in input_skims.listMatrices():\n # print skimName\n \n for in_key, out_key in key_map.iteritems():\n print \"copying %s %s to %s\" % (f, in_key, out_key)\n output_tap_skims[out_key] = input_skims[in_key]\n\n\n output_tap_skims.createMapping('default_mapping', entries=tap_numbers, overwrite=False)\n output_tap_skims.close()\n \n # print summary of what we just built\n print \"\\n##### Summary of %s\" % output_tap_skim_file\n with openmatrix.open_file(output_folder + output_tap_skim_file) as skims:\n print skims\n print \"\\n#####\\n\"\n\n", "id": "3350608", "language": "Python", "matching_score": 4.3974456787109375, "max_stars_count": 0, "path": "activitysim/example_multi/import_data.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport numpy as np\nimport pandas as pd\nimport openmatrix as omx\n\n\n\ninput_folder = \"/Users/jeff.doyle/work/activitysim-data/sandag_zone/output/\"\noutput_folder = \"./output/\"\n\ndata_file='NetworkData.h5'\nskim_files=['taz_skims.omx', 'tap_skims_locl.omx', 'tap_skims_prem.omx']\n\n\nif __name__ == \"__main__\":\n\n if data_file:\n with pd.HDFStore(input_folder+data_file, mode='r') as hdf:\n\n df = hdf['/TAZ']\n df.to_csv(output_folder+'taz.csv', index=True)\n\n df = hdf['/TAP']\n df.to_csv(output_folder+'tap.csv', index=True)\n\n for key in hdf.keys():\n print \"\\n========== %s\\n\" % key\n df = hdf[key]\n\n print \"len\", len(df.index)\n\n print df.columns.values\n\n for c in ['TAZ', 'TAP', 'MAZ', 'OMAZ', 'DMAZ']:\n if c in df.columns:\n print \"%s min: %s max: %s\" % (c, df[c].min(), df[c].max())\n\n if 'TAZ'in df.columns:\n print df.TAZ.value_counts().head(20)\n #print df\n\n\n # process all skims\n for skim_file in skim_files:\n with omx.open_file(input_folder+skim_file) as skims:\n #skims = omx.open_file(folder+skim_file)\n\n print \"\\n##### %s %s\" % (skim_file, skims.shape())\n\n print \"mappings:\", skims.listMappings()\n\n skimsToProcess = skims.listMatrices()\n for skimName in skimsToProcess:\n print skimName\n #skims.close()\n\n\n\n", "id": "2143325", "language": "Python", "matching_score": 1.615665316581726, "max_stars_count": 0, "path": "activitysim/example_multi/dump_data.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport numpy as np\nimport openmatrix as omx\nimport orca\n\nfrom activitysim.core import skim as askim\nfrom activitysim.core import tracing\n\nlogger = logging.getLogger('activitysim')\n\n\"\"\"\nRead in the omx files and create the skim objects\n\"\"\"\n\n\ndef add_to_skim_dict(skim_dict, omx_file, cache_skim_key_values, offset_int=None):\n\n if offset_int is None:\n\n if 'default_mapping' not in omx_file.listMappings():\n\n raise RuntimeError(\"Could not find 'default_mapping' in omx file.\"\n \"\\nYou might need to rerun import_data.py to rebuild the skims files.\")\n\n offset_map = omx_file.mapentries('default_mapping')\n skim_dict.offset_mapper.set_offset_list(offset_map)\n else:\n skim_dict.offset_mapper.set_offset_int(offset_int)\n\n skims_in_omx = omx_file.listMatrices()\n for skim_name in skims_in_omx:\n key, sep, key2 = skim_name.partition('__')\n skim_data = omx_file[skim_name]\n\n if not sep:\n # no separator - this is a simple 2d skim - we load them all\n skim_dict.set(key, skim_data)\n else:\n # there may be more time periods in the skim than are used by the model\n # cache_skim_key_values is a list of time periods (from settings) that are used\n # FIXME - assumes that the only types of key2 are time_periods\n if key2 in cache_skim_key_values:\n skim_dict.set((key, key2), skim_data)\n\n\n\[email protected](cache=True)\ndef taz_skim_dict(data_dir, settings):\n\n logger.info(\"loading taz_skim_dict\")\n\n skims_file = os.path.join(data_dir, settings[\"taz_skims_file\"])\n cache_skim_key_values = settings['time_periods']['labels']\n\n skim_dict = askim.SkimDict()\n\n with omx.open_file(skims_file) as omx_file:\n add_to_skim_dict(skim_dict, omx_file, cache_skim_key_values)\n\n return skim_dict\n\n\[email protected](cache=True)\ndef tap_skim_dict(data_dir, settings):\n\n logger.info(\"loading tap_skim_dict\")\n\n cache_skim_key_values = settings['time_periods']['labels']\n skim_dict = askim.SkimDict()\n\n for skims_file in settings[\"tap_skims_files\"]:\n skims_file_path = os.path.join(data_dir, skims_file)\n with omx.open_file(skims_file_path) as omx_file:\n add_to_skim_dict(skim_dict, omx_file, cache_skim_key_values)\n\n return skim_dict\n\n", "id": "5034363", "language": "Python", "matching_score": 4.463231086730957, "max_stars_count": 0, "path": "activitysim/example_multi/extensions/skims.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport openmatrix as omx\nimport orca\n\nfrom activitysim.core import skim as askim\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nRead in the omx files and create the skim objects\n\"\"\"\n\n\n# cache this so we don't open it again and again - skim code is not closing it....\[email protected](cache=True)\ndef omx_file(data_dir, settings):\n logger.debug(\"opening omx file\")\n\n fname = os.path.join(data_dir, settings[\"skims_file\"])\n file = omx.open_file(fname)\n\n pipeline.close_on_exit(file, fname)\n\n return file\n\n\[email protected](cache=True)\ndef skim_dict(omx_file, cache_skim_key_values):\n\n logger.info(\"skims injectable loading skims\")\n\n skim_dict = askim.SkimDict()\n skim_dict.offset_mapper.set_offset_int(-1)\n\n skims_in_omx = omx_file.listMatrices()\n for skim_name in skims_in_omx:\n key, sep, key2 = skim_name.partition('__')\n skim_data = omx_file[skim_name]\n if not sep:\n # no separator - this is a simple 2d skim - we load them all\n skim_dict.set(key, skim_data)\n else:\n # there may be more time periods in the skim than are used by the model\n # cache_skim_key_values is a list of time periods (frem settings) that are used\n # FIXME - assumes that the only types of key2 are time_periods\n if key2 in cache_skim_key_values:\n skim_dict.set((key, key2), skim_data)\n\n return skim_dict\n\n\[email protected](cache=True)\ndef skim_stack(skim_dict):\n\n logger.debug(\"loading skim_stack\")\n return askim.SkimStack(skim_dict)\n", "id": "8758343", "language": "Python", "matching_score": 2.7726399898529053, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/skims.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom activitysim.core.util import quick_loc_series\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OffsetMapper(object):\n\n def __init__(self, offset_int=None):\n self.offset_series = None\n self.offset_int = offset_int\n\n def set_offset_list(self, offset_list):\n\n assert isinstance(offset_list, list)\n assert self.offset_int is None\n\n # for performance, check if this is a simple int-based series\n first_offset = offset_list[0]\n if (offset_list == range(first_offset, len(offset_list)+first_offset)):\n offset_int = -1 * first_offset\n # print \"set_offset_list substituting offset_int of %s\" % offset_int\n self.set_offset_int(offset_int)\n return\n\n if self.offset_series is None:\n self.offset_series = pd.Series(data=range(len(offset_list)), index=offset_list)\n else:\n # make sure it offsets are the same\n assert (offset_list == self.offset_series.index).all()\n\n def set_offset_int(self, offset_int):\n\n # should be some kind of integer\n assert long(offset_int) == offset_int\n assert self.offset_series is None\n\n if self.offset_int is None:\n self.offset_int = offset_int\n else:\n # make sure it is the same\n assert offset_int == self.offset_int\n\n def map(self, zone_ids):\n\n # print \"\\nmap_offsets zone_ids\", zone_ids\n\n if self.offset_series is not None:\n assert(self.offset_int is None)\n assert isinstance(self.offset_series, pd.Series)\n\n offsets = np.asanyarray(quick_loc_series(zone_ids, self.offset_series))\n\n elif self.offset_int:\n # should be some kind of integer\n assert long(self.offset_int) == self.offset_int\n assert (self.offset_series is None)\n offsets = zone_ids + self.offset_int\n else:\n offsets = zone_ids\n\n # print \"map_offsets offsets\", offsets\n\n return offsets\n\n\nclass SkimWrapper(object):\n \"\"\"\n Container for skim arrays.\n\n Parameters\n ----------\n data : 2D array\n offset : int, optional\n An optional offset that will be added to origin/destination\n values to turn them into array indices.\n For example, if zone IDs are 1-based, an offset of -1\n would turn them into 0-based array indices.\n\n \"\"\"\n def __init__(self, data, offset_mapper=None):\n\n self.data = data\n self.offset_mapper = offset_mapper if offset_mapper is not None else OffsetMapper()\n\n def get(self, orig, dest):\n \"\"\"\n Get impedence values for a set of origin, destination pairs.\n\n Parameters\n ----------\n orig : 1D array\n dest : 1D array\n\n Returns\n -------\n values : 1D array\n\n \"\"\"\n # only working with numpy in here\n orig = np.asanyarray(orig)\n dest = np.asanyarray(dest)\n out_shape = orig.shape\n\n # filter orig and dest to only the real-number pairs\n notnan = ~(np.isnan(orig) | np.isnan(dest))\n orig = orig[notnan].astype('int')\n dest = dest[notnan].astype('int')\n\n orig = self.offset_mapper.map(orig)\n dest = self.offset_mapper.map(dest)\n\n result = self.data[orig, dest]\n\n # add the nans back to the result\n out = np.empty(out_shape)\n out[notnan] = result\n out[~notnan] = np.nan\n\n return out\n\n\nclass SkimDict(object):\n \"\"\"\n A SkimDict object is a wrapper around a dict of multiple skim objects,\n where each object is identified by a key. It operates like a\n dictionary - i.e. use brackets to add and get skim objects.\n\n Note that keys are either strings or tuples of two strings (to support stacking of skims.)\n \"\"\"\n\n def __init__(self):\n self.skims = {}\n self.offset_mapper = OffsetMapper()\n\n def set(self, key, skim_data):\n \"\"\"\n Set skim data for key\n\n Parameters\n ----------\n key : hashable\n The key (identifier) for this skim object\n skim_data : Skim\n The skim object\n\n Returns\n -------\n Nothing\n \"\"\"\n\n if not isinstance(key, str):\n assert isinstance(key, tuple) and len(key) == 2\n assert isinstance(key[0], str) and isinstance(key[1], str)\n\n self.skims[key] = np.asanyarray(skim_data)\n\n # print \"\\n### %s\" % (key,)\n # print \"type(skim_data)\", type(skim_data)\n # print \"skim_data.shape\", skim_data.shape\n\n def get(self, key):\n \"\"\"\n Get an available skim object (not the lookup)\n\n Parameters\n ----------\n key : hashable\n The key (identifier) for this skim object\n\n Returns\n -------\n skim: Skim\n The skim object\n \"\"\"\n return SkimWrapper(self.skims[key], self.offset_mapper)\n\n def wrap(self, left_key, right_key):\n \"\"\"\n return a SkimDictWrapper for self\n \"\"\"\n return SkimDictWrapper(self, left_key, right_key)\n\n\nclass SkimDictWrapper(object):\n \"\"\"\n A SkimDictWrapper object is an access wrapper around a SkimDict of multiple skim objects,\n where each object is identified by a key. It operates like a\n dictionary - i.e. use brackets to add and get skim objects - but also\n has information on how to lookup against the skim objects.\n Specifically, this object has a dataframe, a left_key and right_key.\n It is assumed that left_key and right_key identify columns in df. The\n parameter df is usually set by the simulation itself as it's a result of\n interacting choosers and alternatives.\n\n When the user calls skims[key], key is an identifier for which skim\n to use, and the object automatically looks up impedances of that skim\n using the specified left_key column in df as the origin and\n the right_key column in df as the destination. In this way, the user\n does not do the O-D lookup by hand and only specifies which skim to use\n for this lookup. This is the only purpose of this object: to\n abstract away the O-D lookup and use skims by specifying which skim\n to use in the expressions.\n\n Note that keys are either strings or tuples of two strings (to support stacking of skims.)\n \"\"\"\n\n def __init__(self, skim_dict, left_key, right_key):\n self.skim_dict = skim_dict\n self.left_key = left_key\n self.right_key = right_key\n self.df = None\n\n def set_df(self, df):\n \"\"\"\n Set the dataframe\n\n Parameters\n ----------\n df : DataFrame\n The dataframe which contains the origin and destination ids\n\n Returns\n -------\n Nothing\n \"\"\"\n self.df = df\n\n def lookup(self, key):\n \"\"\"\n Generally not called by the user - use __getitem__ instead\n\n Parameters\n ----------\n key : hashable\n The key (identifier) for this skim object\n\n Returns\n -------\n impedances: pd.Series\n A Series of impedances which are elements of the Skim object and\n with the same index as df\n \"\"\"\n\n # The skim object to perform the lookup\n # using df[left_key] as the origin and df[right_key] as the destination\n skim = self.skim_dict.get(key)\n\n # assert self.df is not None, \"Call set_df first\"\n # origins = self.df[self.left_key].astype('int')\n # destinations = self.df[self.right_key].astype('int')\n # if self.offset:\n # origins = origins + self.offset\n # destinations = destinations + self.offset\n\n assert self.df is not None, \"Call set_df first\"\n s = skim.get(self.df[self.left_key],\n self.df[self.right_key])\n return pd.Series(s, index=self.df.index)\n\n def __getitem__(self, key):\n \"\"\"\n Get the (df implicit) lookup for an available skim object\n\n Parameters\n ----------\n key : hashable\n The key (identifier) for the skim object\n\n Returns\n -------\n impedances: pd.Series\n A Series of impedances which are elements of the Skim object and\n with the same index as df\n \"\"\"\n return self.lookup(key)\n\n\nclass SkimStack(object):\n\n def __init__(self, skim_dict):\n\n self.skims_data = {}\n self.skim_keys_to_indexes = {}\n self.offset_mapper = skim_dict.offset_mapper\n\n # pass to make dictionary of dictionaries where highest level is unique\n # first items of the tuples and the 2nd level is the second items of\n # the tuples\n for key, skim_data in skim_dict.skims.iteritems():\n if not isinstance(key, tuple) or not len(key) == 2:\n logger.debug(\"SkimStack __init__ skipping key: %s\" % key)\n continue\n logger.debug(\"SkimStack __init__ loading key: %s\" % (key,))\n skim_key1, skim_key2 = key\n # logger.debug(\"SkimStack init key: key1='%s' key2='%s'\" % (skim_key1, skim_key2))\n # FIXME - this copys object reference\n self.skims_data.setdefault(skim_key1, {})[skim_key2] = skim_data\n\n # print \"\\n### %s\" % (key,)\n # print \"type(skim_data)\", type(skim_data)\n # print \"skim_data.shape\", skim_data.shape\n\n # second pass to turn the each highest level value into a 3D array\n # with a dictionary to make second level keys to indexes\n for skim_key1, value in self.skims_data.iteritems():\n # FIXME - this actually copies/creates new stacked data\n self.skims_data[skim_key1] = np.dstack(value.values())\n self.skim_keys_to_indexes[skim_key1] = dict(zip(value.keys(), range(len(value))))\n\n logger.info(\"SkimStack.__init__ loaded %s keys with %s total skims\"\n % (len(self.skim_keys_to_indexes),\n sum([len(d) for d in self.skim_keys_to_indexes.values()])))\n\n def __str__(self):\n\n return \"\\n\".join(\n \"%s %s\" % (key1, sub_dict)\n for key1, sub_dict in self.skim_keys_to_indexes.iteritems())\n\n # def key_count(self):\n # return len(self.skim_keys_to_indexes.keys())\n #\n # def contains(self, key):\n # return key in self.skims_data\n\n def get(self, key):\n return self.skims_data[key], self.skim_keys_to_indexes[key]\n\n def lookup(self, orig, dest, dim3, key):\n\n orig = self.offset_mapper.map(orig)\n dest = self.offset_mapper.map(dest)\n\n assert key in self.skims_data, \"SkimStack key %s missing\" % key\n\n stacked_skim_data = self.skims_data[key]\n skim_keys_to_indexes = self.skim_keys_to_indexes[key]\n\n # skim_indexes = dim3.map(skim_keys_to_indexes).astype('int')\n # this should be faster than map\n skim_indexes = np.vectorize(skim_keys_to_indexes.get)(dim3)\n\n return stacked_skim_data[orig, dest, skim_indexes]\n\n def wrap(self, left_key, right_key, skim_key):\n \"\"\"\n return a SkimStackWrapper for self\n \"\"\"\n return SkimStackWrapper(stack=self,\n left_key=left_key, right_key=right_key, skim_key=skim_key)\n\n\nclass SkimStackWrapper(object):\n \"\"\"\n A SkimStackWrapper object wraps a skims object to add an additional wrinkle of\n lookup functionality. Upon init the separate skims objects are\n processed into a 3D matrix so that lookup of the different skims can\n be performed quickly for each row in the dataframe. In this very\n particular formulation, the keys are assumed to be tuples with two\n elements - the second element of which will be taken from the\n different rows in the dataframe. The first element can then be\n dereferenced like an array. This is useful, for instance, to have a\n certain skim vary by time of day - the skims are set with keys of\n ('SOV', 'AM\"), ('SOV', 'PM') etc. The time of day is then taken to\n be different for every row in the tours table, and the 'SOV' portion\n of the key can be used in __getitem__.\n\n To be more explicit, the input is a dictionary of Skims objects, each of\n which contains a 2D matrix. These are stacked into a 3D matrix with a\n mapping of keys to indexes which is applied using pandas .map to a third\n column in the object dataframe. The three columns - left_key and\n right_key from the Skims object and skim_key from this one, are then used to\n dereference the 3D matrix. The tricky part comes in defining the key which\n matches the 3rd dimension of the matrix, and the key which is passed into\n __getitem__ below (i.e. the one used in the specs). By convention,\n every key in the Skims object that is passed in MUST be a tuple with 2\n items. The second item in the tuple maps to the items in the dataframe\n referred to by the skim_key column and the first item in the tuple is\n then available to pass directly to __getitem__.\n\n The sum conclusion of this is that in the specs, you can say something\n like out_skim['SOV'] and it will automatically dereference the 3D matrix\n using origin, destination, and time of day.\n\n Parameters\n ----------\n skims: Skims\n This is the Skims object to wrap\n skim_key : str\n This identifies the column in the dataframe which is used to\n select among Skim object using the SECOND item in each tuple (see\n above for a more complete description)\n \"\"\"\n\n def __init__(self, stack, left_key, right_key, skim_key):\n\n self.stack = stack\n\n self.left_key = left_key\n self.right_key = right_key\n self.skim_key = skim_key\n self.df = None\n\n def set_df(self, df):\n \"\"\"\n Set the dataframe\n\n Parameters\n ----------\n df : DataFrame\n The dataframe which contains the origin and destination ids\n\n Returns\n -------\n Nothing\n \"\"\"\n self.df = df\n\n def __getitem__(self, key):\n \"\"\"\n Get an available skim object\n\n Parameters\n ----------\n key : hashable\n The key (identifier) for this skim object\n\n Returns\n -------\n skim: Skim\n The skim object\n \"\"\"\n\n assert self.df is not None, \"Call set_df first\"\n orig = self.df[self.left_key].astype('int')\n dest = self.df[self.right_key].astype('int')\n dim3 = self.df[self.skim_key]\n\n skim_values = self.stack.lookup(orig, dest, dim3, key)\n\n return pd.Series(skim_values, self.df.index)\n", "id": "6657997", "language": "Python", "matching_score": 3.8438069820404053, "max_stars_count": 0, "path": "activitysim/activitysim/core/skim.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport numpy as np\nimport pandas as pd\nimport numpy.testing as npt\nimport pandas.util.testing as pdt\nimport pytest\n\nfrom .. import skim\n\n\[email protected]\ndef data():\n return np.arange(100, dtype='int').reshape((10, 10))\n\n\ndef test_basic(data):\n sk = skim.SkimWrapper(data)\n\n orig = [5, 9, 1]\n dest = [2, 9, 6]\n\n npt.assert_array_equal(\n sk.get(orig, dest),\n [52, 99, 16])\n\n\ndef test_offset_int(data):\n sk = skim.SkimWrapper(data, skim.OffsetMapper(-1))\n\n orig = [6, 10, 2]\n dest = [3, 10, 7]\n\n npt.assert_array_equal(\n sk.get(orig, dest),\n [52, 99, 16])\n\n\ndef test_offset_list(data):\n\n offset_mapper = skim.OffsetMapper()\n offset_mapper.set_offset_list([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n # should have figured out it could use an int offset instead of list\n assert offset_mapper.offset_int == -1\n\n offset_mapper = skim.OffsetMapper()\n offset_mapper.set_offset_list([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n\n sk = skim.SkimWrapper(data, offset_mapper)\n\n orig = [60, 100, 20]\n dest = [30, 100, 70]\n\n npt.assert_array_equal(\n sk.get(orig, dest),\n [52, 99, 16])\n\n\ndef test_skim_nans(data):\n sk = skim.SkimWrapper(data)\n\n orig = [5, np.nan, 1, 2]\n dest = [np.nan, 9, 6, 4]\n\n npt.assert_array_equal(\n sk.get(orig, dest),\n [np.nan, np.nan, 16, 24])\n\n\ndef test_skims(data):\n\n skim_dict = skim.SkimDict()\n\n skim_dict.set('AM', data)\n skim_dict.set('PM', data*10)\n\n skims = skim_dict.wrap(\"taz_l\", \"taz_r\")\n\n df = pd.DataFrame({\n \"taz_l\": [1, 9, 4],\n \"taz_r\": [2, 3, 7],\n })\n\n skims.set_df(df)\n\n pdt.assert_series_equal(\n skims[\"AM\"],\n pd.Series(\n [12, 93, 47],\n index=[0, 1, 2]\n ).astype('float64')\n )\n\n pdt.assert_series_equal(\n skims[\"PM\"],\n pd.Series(\n [120, 930, 470],\n index=[0, 1, 2]\n ).astype('float64')\n )\n\n\ndef test_3dskims(data):\n\n skim_dict = skim.SkimDict()\n\n skim_dict.set((\"SOV\", \"AM\"), data)\n skim_dict.set((\"SOV\", \"PM\"), data*10)\n\n stack = skim.SkimStack(skim_dict)\n\n skims3d = stack.wrap(left_key=\"taz_l\", right_key=\"taz_r\", skim_key=\"period\")\n\n df = pd.DataFrame({\n \"taz_l\": [1, 9, 4],\n \"taz_r\": [2, 3, 7],\n \"period\": [\"AM\", \"PM\", \"AM\"]\n })\n\n skims3d.set_df(df)\n\n pdt.assert_series_equal(\n skims3d[\"SOV\"],\n pd.Series(\n [12, 930, 47],\n index=[0, 1, 2]\n ),\n check_dtype=False\n )\n", "id": "12742256", "language": "Python", "matching_score": 1.583565592765808, "max_stars_count": 0, "path": "activitysim/activitysim/core/test/test_skim.py" }, { "content": "import os\nimport psutil\nimport gc\n\nfrom operator import itemgetter\n\nimport numpy as np\nimport pandas as pd\n\nfrom zbox import toolz as tz\n\n\ndef memory_info():\n gc.collect()\n process = psutil.Process(os.getpid())\n bytes = process.memory_info().rss\n mb = (bytes / (1024 * 1024.0))\n gb = (bytes / (1024 * 1024 * 1024.0))\n return \"memory_info: %s MB (%s GB)\" % (int(mb), round(gb, 2))\n\n\ndef left_merge_on_index_and_col(left_df, right_df, join_col, target_col):\n \"\"\"\n like pandas left merge, but join on both index and a specified join_col\n\n FIXME - for now return a series of ov values from specified right_df target_col\n\n Parameters\n ----------\n left_df : pandas DataFrame\n index name assumed to be same as that of right_df\n right_df : pandas DataFrame\n index name assumed to be same as that of left_df\n join_col : str\n name of column to join on (in addition to index values)\n should have same name in both dataframes\n target_col : str\n name of column from right_df whose joined values should be returned as series\n\n Returns\n -------\n target_series : pandas Series\n series of target_col values with same index as left_df\n i.e. values joined to left_df from right_df with index of left_df\n \"\"\"\n assert left_df.index.name == right_df.index.name\n\n # want to know name previous index column will have after reset_index\n idx_col = right_df.index.name\n\n # SELECT target_col FROM full_sample LEFT JOIN unique_sample on idx_col, join_col\n merged = \\\n pd.merge(\n left_df[[join_col]].reset_index(),\n right_df[[join_col, target_col]].reset_index(),\n on=[idx_col, join_col],\n how=\"left\")\n\n merged.set_index(idx_col, inplace=True)\n\n return merged[target_col]\n\n\ndef reindex(series1, series2):\n \"\"\"\n This reindexes the first series by the second series. This is an extremely\n common operation that does not appear to be in Pandas at this time.\n If anyone knows of an easier way to do this in Pandas, please inform the\n UrbanSim developers.\n\n The canonical example would be a parcel series which has an index which is\n parcel_ids and a value which you want to fetch, let's say it's land_area.\n Another dataset, let's say of buildings has a series which indicate the\n parcel_ids that the buildings are located on, but which does not have\n land_area. If you pass parcels.land_area as the first series and\n buildings.parcel_id as the second series, this function returns a series\n which is indexed by buildings and has land_area as values and can be\n added to the buildings dataset.\n\n In short, this is a join on to a different table using a foreign key\n stored in the current table, but with only one attribute rather than\n for a full dataset.\n\n This is very similar to the pandas \"loc\" function or \"reindex\" function,\n but neither of those functions return the series indexed on the current\n table. In both of those cases, the series would be indexed on the foreign\n table and would require a second step to change the index.\n\n Parameters\n ----------\n series1, series2 : pandas.Series\n\n Returns\n -------\n reindexed : pandas.Series\n\n \"\"\"\n\n # turns out the merge is much faster than the .loc below\n df = pd.merge(series2.to_frame(name='left'),\n series1.to_frame(name='right'),\n left_on=\"left\",\n right_index=True,\n how=\"left\")\n return df.right\n\n # return pd.Series(series1.loc[series2.values].values, index=series2.index)\n\n\ndef other_than(groups, bools):\n \"\"\"\n Construct a Series that has booleans indicating the presence of\n something- or someone-else with a certain property within a group.\n\n Parameters\n ----------\n groups : pandas.Series\n A column with the same index as `bools` that defines the grouping\n of `bools`. The `bools` Series will be used to index `groups` and\n then the grouped values will be counted.\n bools : pandas.Series\n A boolean Series indicating where the property of interest is present.\n Should have the same index as `groups`.\n\n Returns\n -------\n others : pandas.Series\n A boolean Series with the same index as `groups` and `bools`\n indicating whether there is something- or something-else within\n a group with some property (as indicated by `bools`).\n\n \"\"\"\n counts = groups[bools].value_counts()\n merge_col = groups.to_frame(name='right')\n pipeline = tz.compose(\n tz.curry(pd.Series.fillna, value=False),\n itemgetter('left'),\n tz.curry(\n pd.DataFrame.merge, right=merge_col, how='right', left_index=True,\n right_on='right'),\n tz.curry(pd.Series.to_frame, name='left'))\n gt0 = pipeline(counts > 0)\n gt1 = pipeline(counts > 1)\n\n return gt1.where(bools, other=gt0)\n\n\ndef quick_loc_df(loc_list, target_df, attribute):\n \"\"\"\n faster replacement for target_df.loc[loc_list][attribute]\n\n pandas DataFrame.loc[] indexing doesn't scale for large arrays (e.g. > 1,000,000 elements)\n\n Parameters\n ----------\n loc_list : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)\n target_df : pandas.DataFrame containing column named attribute\n attribute : name of column from loc_list to return\n\n Returns\n -------\n pandas.Series\n \"\"\"\n\n left_on = \"left\"\n\n if isinstance(loc_list, pd.Int64Index):\n left_df = pd.DataFrame({left_on: loc_list.values})\n elif isinstance(loc_list, pd.Series):\n left_df = loc_list.to_frame(name=left_on)\n elif isinstance(loc_list, np.ndarray):\n left_df = pd.DataFrame({left_on: loc_list})\n else:\n raise RuntimeError(\"quick_loc_df loc_list of unexpected type %s\" % type(loc_list))\n\n df = pd.merge(left_df,\n target_df[[attribute]],\n left_on=left_on,\n right_index=True,\n how=\"left\")\n\n # regression test\n # assert list(df[attribute]) == list(target_df.loc[loc_list][attribute])\n\n return df[attribute]\n\n\ndef quick_loc_series(loc_list, target_series):\n \"\"\"\n faster replacement for target_series.loc[loc_list]\n\n pandas Series.loc[] indexing doesn't scale for large arrays (e.g. > 1,000,000 elements)\n\n Parameters\n ----------\n loc_list : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)\n target_series : pandas.Series\n\n Returns\n -------\n pandas.Series\n \"\"\"\n\n left_on = \"left\"\n\n if isinstance(loc_list, pd.Int64Index):\n left_df = pd.DataFrame({left_on: loc_list.values})\n elif isinstance(loc_list, pd.Series):\n left_df = loc_list.to_frame(name=left_on)\n elif isinstance(loc_list, np.ndarray):\n left_df = pd.DataFrame({left_on: loc_list})\n else:\n raise RuntimeError(\"quick_loc_series loc_list of unexpected type %s\" % type(loc_list))\n\n df = pd.merge(left_df,\n target_series.to_frame(name='right'),\n left_on=left_on,\n right_index=True,\n how=\"left\")\n\n # regression test\n # assert list(df.right) == list(target_series.loc[loc_list])\n\n return df.right\n", "id": "11612383", "language": "Python", "matching_score": 3.041804552078247, "max_stars_count": 0, "path": "activitysim/activitysim/core/util.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as pdt\nimport pytest\n\nfrom ..util import reindex\nfrom ..util import other_than\nfrom ..util import quick_loc_series\nfrom ..util import quick_loc_df\n\n\[email protected](scope='module')\ndef people():\n return pd.DataFrame({\n 'household': [1, 2, 2, 3, 3, 3, 4, 4, 4, 4],\n 'ptype': [1, 2, 1, 3, 1, 2, 3, 2, 2, 1]},\n index=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])\n\n\ndef test_other_than(people):\n expected = pd.Series(\n [False, False, True, True, True, False, True, True, True, True],\n index=people.index, name='left')\n\n bools = people['ptype'] == 2\n others = other_than(people['household'], bools)\n\n pdt.assert_series_equal(others, expected)\n\n\ndef test_reindex():\n s = pd.Series([.5, 1.0, 1.5], index=[2, 1, 3])\n s2 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])\n assert list(reindex(s, s2).values) == [1.0, .5, 1.5]\n\n\ndef test_quick_loc_df():\n\n df = pd.DataFrame({'attrib': ['1', '2', '3', '4', '5']}, index=[1, 2, 3, 4, 5])\n\n loc_list = np.asanyarray([2, 1, 3, 4, 4, 5, 1])\n attrib_list = [str(i) for i in loc_list]\n\n assert list(quick_loc_df(loc_list, df, 'attrib')) == attrib_list\n assert list(quick_loc_df(loc_list, df, 'attrib')) == list(df.loc[loc_list]['attrib'])\n\n\ndef test_quick_loc_series():\n\n series = pd.Series(['1', '2', '3', '4', '5'], index=[1, 2, 3, 4, 5])\n\n loc_list = np.asanyarray([2, 1, 3, 4, 4, 5, 1])\n attrib_list = [str(i) for i in loc_list]\n\n assert list(quick_loc_series(loc_list, series)) == attrib_list\n assert list(quick_loc_series(loc_list, series)) == list(series.loc[loc_list])\n", "id": "12306098", "language": "Python", "matching_score": 0.7412199974060059, "max_stars_count": 0, "path": "activitysim/activitysim/core/test/test_util.py" }, { "content": "import pytest\nimport orca\n\nfrom lcog import datasources\n\n\[email protected]\ndef expected_injectables():\n expected_injectables = ['store', 'aggregate_geos', 'year', 'dictionary']\n return expected_injectables\n\n\[email protected]\ndef expected_tables():\n expected_tables = ['parcels',\n 'buildings',\n 'jobs',\n 'households',\n 'households_pums',\n 'travel_data',\n 'nodes',\n 'edges',\n 'annual_employment_control_totals',\n 'annual_household_control_totals',\n 'zonings',\n 'locations',\n 'block_groups',\n 'blocks',\n 'zones',\n 'plans',\n 'zone_districts',\n 'zone_subdistricts',\n 'plan_types',\n 'zone_types',\n 'plan_compatible_zones',\n 'building_types',\n 'allowable_building_types',\n 'building_sqft_per_job',\n 'site_proposals',\n 'target_vacancies']\n return expected_tables\n\n\ndef test_injectable_list(expected_injectables):\n print(orca.list_injectables())\n assert orca.list_injectables() == expected_injectables\n\n\ndef test_table_list(expected_tables):\n print(orca.list_tables())\n assert orca.list_tables() == expected_tables\n", "id": "9706239", "language": "Python", "matching_score": 1.4481573104858398, "max_stars_count": 2, "path": "bayarea/tests/test_datasources.py" }, { "content": "import pandas as pd\nfrom spandex import TableLoader\nimport pandas.io.sql as sql\n\nloader = TableLoader()\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\n## Export to HDF5- get path to output file\nh5_path = loader.get_path('out/regeneration/summaries/bayarea_v3.h5') ## Path to the output file\n\n#Buildings\nbuildings = db_to_df('select * from building').set_index('building_id')\nif 'id' in buildings.columns:\n del buildings['id']\nbuildings['building_type_id'] = 0\nbuildings.building_type_id[buildings.development_type_id == 1] = 1\nbuildings.building_type_id[buildings.development_type_id == 2] = 3\nbuildings.building_type_id[buildings.development_type_id == 5] = 12\nbuildings.building_type_id[buildings.development_type_id == 7] = 10\nbuildings.building_type_id[buildings.development_type_id == 9] = 5\nbuildings.building_type_id[buildings.development_type_id == 10] = 4\nbuildings.building_type_id[buildings.development_type_id == 13] = 8\nbuildings.building_type_id[buildings.development_type_id == 14] = 7\nbuildings.building_type_id[buildings.development_type_id == 15] = 9\nbuildings.building_type_id[buildings.development_type_id == 13] = 8\nbuildings.building_type_id[buildings.development_type_id == 17] = 6\nbuildings.building_type_id[buildings.development_type_id == 24] = 16\n\n#Parcels\nparcels = db_to_df('select * from parcel').set_index('parcel_id')\nparcels['shape_area'] = parcels.acres * 4046.86\nif 'id' in parcels.columns:\n del parcels['id']\nif 'geom' in parcels.columns:\n del parcels['geom']\nif 'centroid' in parcels.columns:\n del parcels['centroid']\n\n#Jobs\njobs = db_to_df('select * from jobs').set_index('job_id')\nif 'id' in jobs.columns:\n del jobs['id']\n\n#Households\nhh = db_to_df('select * from households').set_index('household_id')\nif 'id' in hh.columns:\n del hh['id']\nhh = hh.rename(columns = {'hinc':'income'})\nfor col in hh.columns:\n hh[col] = hh[col].astype('int32')\n \n#Zones\nzones_path = loader.get_path('juris/reg/zones/zones.csv')\nzones = pd.read_csv(zones_path).set_index('zone_id')\n\n#Putting tables in the HDF5 file\nstore = pd.HDFStore(h5_path)\nstore['parcels'] = parcels # http://urbansim.org/Documentation/Parcel/ParcelTable\nstore['buildings'] = buildings # http://urbansim.org/Documentation/Parcel/BuildingsTable\nstore['households'] = hh # http://urbansim.org/Documentation/Parcel/HouseholdsTable\nstore['jobs'] = jobs # http://urbansim.org/Documentation/Parcel/JobsTable\nstore['zones'] = zones # http://urbansim.org/Documentation/Parcel/ZonesTable\nstore.close()", "id": "12858773", "language": "Python", "matching_score": 1.6481971740722656, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/export_to_h5.py" }, { "content": "import pandas as pd\n\n# system vars\npath_to_usim = '/home/mgardner/src/bayarea_urbansim/'\nusim_data_dir = 'data/'\nusim_output_dir = 'output/'\nusim_h5_file = '2015_09_01_bayarea_v3.h5'\npath_to_asim = '/home/mgardner/src/activitysim/'\nasim_data_dir = 'example/data/'\nasim_h5_file = 'mtc_asim.h5'\n\n# load both data stores\nusim_store = pd.HDFStore(path_to_usim + usim_data_dir + usim_h5_file)\nasim_store = pd.HDFStore(path_to_asim + asim_data_dir + asim_h5_file)\n\n# replace asim households with usim households\nusim_households = usim_store['households'].copy()\nusim_store.close()\nasim_col_names = asim_store['households'].columns\nasim_index_name = asim_store['households'].index.name\nasim_households = usim_households\nasim_households.columns = asim_col_names.tolist() + \\\n usim_households.columns.tolist()[len(asim_col_names):]\nasim_households.index.name = asim_index_name\nasim_store.put('households', asim_households, format='table')\n\n# drop asim persons with no households in the updated households table\nasim_persons = asim_store['persons']\npersons_mask = asim_persons.household_id.isin(asim_store['households'].index)\nasim_persons = asim_persons[persons_mask]\nasim_store.put('persons', asim_persons, format='table')\n\n# replace asim land_use/taz_data with usim taz baseyear summaries\nusim_taz_filename = 'baseyear_taz_summaries_2010.csv'\nusim_taz_summaries = pd.read_csv(\n path_to_usim + usim_output_dir + usim_taz_filename)\nasim_taz_summaries = asim_store['land_use/taz_data']\nassert len(asim_taz_summaries) == len(usim_taz_summaries)\nasim_taz_persist = asim_taz_summaries[[ # these need to get updated somehow\n 'HSENROLL', 'COLLFTE', 'COLLPTE', 'TOPOLOGY', 'ZERO']] # persisting is a stop-gap!\nasim_index_name = asim_taz_summaries.index.name\nusim_taz_summaries.set_index('zone_id', inplace=True)\nusim_taz_summaries.index.name = asim_index_name\nusim_taz_summaries.rename(\n columns={'GQPOP': 'gqpop', 'AREA_TYPE': 'area_type'}, inplace=True)\nusim_taz_summaries.loc[:, 'hhlds'] = usim_taz_summaries['TOTHH']\nusim_taz_summaries.loc[:, 'sftaz'] = usim_taz_summaries.index.values\nusim_taz_summaries = pd.merge(\n usim_taz_summaries, asim_taz_persist, left_index=True, right_index=True)\nasim_store.put('land_use/taz_data', usim_taz_summaries, format='table')\n\n\n# close up shop\nasim_store.close()\n", "id": "12835230", "language": "Python", "matching_score": 1.2602256536483765, "max_stars_count": 0, "path": "activitysim/example/extensions/data_exchange.py" }, { "content": "import numpy as np\nimport pandas as pd\n\nimport orca\n\n\[email protected](\"land_use\")\ndef total_households(land_use):\n return land_use.local.TOTHH\n\n\[email protected](\"land_use\")\ndef total_employment(land_use):\n return land_use.local.TOTEMP\n\n\[email protected](\"land_use\")\ndef total_acres(land_use):\n return land_use.local.TOTACRE\n\n\[email protected](\"land_use\")\ndef county_id(land_use):\n return land_use.local.COUNTY\n\n\[email protected](\"land_use\")\ndef household_density(land_use):\n return land_use.total_households / land_use.total_acres\n\n\[email protected](\"land_use\")\ndef employment_density(land_use):\n return land_use.total_employment / land_use.total_acres\n\n\[email protected](\"land_use\")\ndef density_index(land_use):\n # FIXME - avoid div by 0\n return (land_use.household_density * land_use.employment_density) / \\\n (land_use.household_density + land_use.employment_density).clip(lower=1)\n\n\[email protected](\"land_use\")\ndef county_name(land_use, settings):\n assert \"county_map\" in settings\n inv_map = {v: k for k, v in settings[\"county_map\"].items()}\n return land_use.county_id.map(inv_map)\n", "id": "841385", "language": "Python", "matching_score": 1.002624273300171, "max_stars_count": 0, "path": "activitysim/activitysim/abm/test/extensions/landuse.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef size_term(land_use, destination_choice_coeffs):\n \"\"\"\n This method takes the land use data and multiplies various columns of the\n land use data by coefficients from the spec table in order\n to yield a size term (a linear combination of land use variables).\n\n Parameters\n ----------\n land_use : DataFrame\n A dataframe of land use attributes - the column names should match\n the index of destination_choice_coeffs\n destination_choice_coeffs : Series\n A series of coefficients for the land use attributes - the index\n describes the link to the land use table, and the values are floating\n points numbers used to do the linear combination\n\n Returns\n -------\n values : Series\n The index will be the same as land use, and the values will the\n linear combination of the land use table columns specified by the\n coefficients series.\n \"\"\"\n coeffs = destination_choice_coeffs\n\n # first check for missing column in the land_use table\n missing = coeffs[~coeffs.index.isin(land_use.columns)]\n\n if len(missing) > 0:\n logger.warn(\"%s missing columns in land use\" % len(missing.index))\n for v in missing.index.values:\n logger.warn(\"missing: %s\" % v)\n\n return land_use[coeffs.index].dot(coeffs)\n\n\[email protected]()\ndef size_terms(configs_dir):\n f = os.path.join(configs_dir, 'destination_choice_size_terms.csv')\n return pd.read_csv(f, index_col='segment')\n\n\[email protected]()\ndef destination_size_terms(land_use, size_terms):\n land_use = land_use.to_frame()\n size_terms = size_terms.to_frame()\n df = pd.DataFrame({key: size_term(land_use, row) for key, row in size_terms.iterrows()},\n index=land_use.index)\n df.index.name = \"TAZ\"\n return df\n", "id": "1515265", "language": "Python", "matching_score": 2.4844746589660645, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/size_terms.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\nimport orca\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef land_use(store):\n\n df = store[\"land_use/taz_data\"]\n\n logger.info(\"loaded land_use %s\" % (df.shape,))\n\n # replace table function with dataframe\n orca.add_table('land_use', df)\n\n return df\n\n\norca.broadcast('land_use', 'households', cast_index=True, onto_on='TAZ')\n", "id": "4394416", "language": "Python", "matching_score": 2.756227493286133, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/landuse.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport orca\nimport pandas as pd\n\n\[email protected]()\ndef accessibility(store):\n df = store[\"skims/accessibility\"]\n # FIXME - should eventually replace when activity model is stable\n # FIXME - but will break regression tests\n # df.columns = [\"%s_regress\" % c.upper() for c in df.columns]\n df.columns = [c.upper() for c in df.columns]\n\n # replace table function with dataframe\n orca.add_table('accessibility', df)\n return df\n\n\n# this would be accessibility around the household location - be careful with\n# this one as accessibility at some other location can also matter\norca.broadcast('accessibility', 'households', cast_index=True, onto_on='TAZ')\n", "id": "6188557", "language": "Python", "matching_score": 2.167058229446411, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/accessibility.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport misc\nimport tables\nimport models\n", "id": "7499824", "language": "Python", "matching_score": 0.07331850379705429, "max_stars_count": 0, "path": "activitysim/activitysim/abm/__init__.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\nres_codes = {'single': [str(i) for i in range(11, 17)] + ['19'],\n 'multi': ['10', '17', '18', '61', '88'] +\n [str(i) for i in range(20, 30)],\n 'mixed': ['48', '89']}\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_cnc_pt)\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\n## Register intermediate table and columns.\n\n# The purpose of this intermediate table is to compute certain fields,\n# like non_residential_sqft and residential_units, before grouping together\n# records with the same parc_py_id. Thus, single-family condominium units\n# would each be assumed to have one residential unit, and this count would\n# be summed when grouping later.\n\n\[email protected]()\ndef parcels_in2(parcels_in):\n return pd.DataFrame(index=parcels_in.index)\n\n\nin2 = sim.column('parcels_in2', cache=True)\n\n\n@in2\ndef res_type2(land_use_type_id='parcels_in.use_code'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n@in2\ndef building_sqft2(bldg_sqft='parcels_in.bldg_sqft', tla='parcels_in.tla'):\n # Alternate inputs:\n # - \"NET_RNT_AR\"\n sqft = pd.concat([bldg_sqft, tla])\n return sqft.groupby(level=0).max()\n\n\n@in2\ndef non_residential_sqft2(building_sqft='parcels_in2.building_sqft2',\n res_type='parcels_in2.res_type2',\n residential_units='parcels_in2.residential_units2'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n \n\n@in2\ndef residential_units2(tot_units='parcels_in.units',\n res_type='parcels_in2.res_type2', land_use_type_id='parcels_in.use_code'):\n \n units = pd.Series(index=res_type.index)\n tot_units = tot_units.reindex(units.index, copy=False)\n land_use_type_id = land_use_type_id.reindex(units.index, copy=False)\n\n # If not residential, assume zero residential units.\n units[res_type.isnull()] = 0\n\n # If single family residential, assume one residential unit.\n units[res_type == 'single'] = 1\n\n # If non-single residential, assume all units are all residential,\n # even if mixed-use.\n units[res_type == 'multi'] = tot_units\n units[res_type == 'mixed'] = tot_units\n\n # Note in the points layer, certain condos and PUDs (as denoted by lutype 29)\n # are split up into one record per residential unit with the unit column left\n # unpopulated or as 0. In such cases, we assign a value of 1 so that when \n # we group by parc_py_id, the resulting MF residential unit counts make sense,\n # e.g. in Rossmoor.\n units[land_use_type_id.isin(['29',])*np.logical_or((tot_units==0), tot_units.isnull())] = 1\n\n return units\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n # Rename \"parc_py_id\" to \"apn\" to satisfy unique constraint.\n # There are duplicate APNs that should be merged together, but\n index = parcels_in.parc_py_id.dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nregroup = lambda c: c.groupby(tf.parc_py_id)\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '013'\n\n\n@out\ndef parcel_id_local(apn='parcels_in.apn'):\n return regroup(apn).first()\n\n\n@out\ndef land_use_type_id(code='parcels_in.use_code'):\n return regroup(code).first()\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='parcels_in.land_value', sign='parcels_in.lnd_val_sn'):\n sign = sign.replace({'-': -1, '+': +1}).astype(float)\n value = value * sign\n return regroup(value).sum()\n\n\n@out\ndef improvement_value(value='parcels_in.imp_val',\n sign='parcels_in.imp_val_sn'):\n sign = sign.replace({'-': -1, '+': +1}).astype(float)\n value = value * sign\n return regroup(value).sum()\n\n\n@out\ndef year_assessed(date='parcels_in.c_ded_dt'):\n # Assume that current deed date is date of assessment.\n year = date.astype(float).floordiv(10000)\n year.replace(0, np.nan, inplace=True)\n return regroup(year).median()\n\n\n@out\ndef year_built(yr='parcels_in.yr_built', yr_hs='parcels_in.yr_hs_blt'):\n # Alternate inputs:\n # - \"yr_blt_msc\"\n year = pd.concat([yr, yr_hs]).astype(float)\n year.replace(0, np.nan, inplace=True)\n return regroup(year).median()\n\n\n@out\ndef building_sqft(building_sqft='parcels_in2.building_sqft2'):\n return regroup(building_sqft).sum()\n\n\n@out\ndef non_residential_sqft(non_residential_sqft=\n 'parcels_in2.non_residential_sqft2'):\n return regroup(non_residential_sqft).sum()\n\n\n@out\ndef residential_units(parcels_in, residential_units='parcels_in2.residential_units2'):\n return regroup(residential_units).sum()\n \n@out\ndef condo_identifier(address='parcels_in.s_str_nbr', street='parcels_in.s_str_nm', zipcode='parcels_in.s_zip'):\n address = regroup(address).first()\n street = regroup(street).first()\n zipcode = regroup(zipcode).first()\n code = address + street + zipcode\n code[code.isnull()] = ''\n #code[code == ''] = None\n return code\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories(stories='parcels_in.stories'):\n return regroup(stories).median()\n\n\n@out\ndef tax_exempt(code='parcels_in.n_tax_code'):\n return regroup(code).first().notnull().astype(int)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_cnc(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans\n df.res_type[df.res_type.isnull()] = ''\n df_to_db(df, 'attributes_cnc', schema=staging)\n\nsim.run(['export_cnc'])\n", "id": "11682785", "language": "Python", "matching_score": 6.45671272277832, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/cnc.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\nres_codes = {'single': ['RSFR'],\n 'multi': ['RAPT', 'RCON', 'RDUP', 'RMFD', 'RMOB', 'RMSC',\n 'RCOO', 'RQUA', 'RTIM', 'RTRI', 'VRES'],\n 'mixed': []}\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_scl, index_col='parcel')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\[email protected](cache=True)\ndef scvta():\n # Will need to group by the site address fields later to aggregate\n # condos with multiple parcels but only a single polygon. Currently,\n # only one condo will join to the geometries in the shapefile.\n df = loader.get_attributes('built/parcel/2010/scl/Scvta031210.dbf')\n\n # Strip non-numeric characters in parcel numbers. Affects three records.\n df['PARCEL_NUM'] = df.PARCEL_NUM.str.replace('[^0-9]', '')\n\n # There are only 11 duplicated parcel numbers. From manual inspection,\n # it appears that the \"ASSESSED_V\" field is zero for one of the entries\n # in each pair of duplicates. Keep the entry with ASSESSED_V > 0.\n df = df[~df.PARCEL_NUM.isin(df.PARCEL_NUM[df.PARCEL_NUM.duplicated()]) |\n (df.ASSESSED_V > 0)]\n\n df.set_index('PARCEL_NUM', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(scvta):\n index = pd.Series(scvta.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '085'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='scvta.STD_USE_CO'):\n code[code == ''] = None\n return code\n\n@out\ndef condo_identifier(address='scvta.SITE_HOUSE', street='scvta.SITE_STREE', zipcode='scvta.SITE_ZIP'):\n code = address + street + zipcode.astype('str')\n code[code == ''] = None\n return code\n \n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='scvta.ASSESSED_V', percent_im='scvta.PERCENT_IM'):\n # Alternate inputs:\n # - \"SALE_AMOUN\"\n return 0.0001 * (10000 - percent_im) * value\n\n\n@out\ndef improvement_value(value='scvta.ASSESSED_V',\n percent_im='scvta.PERCENT_IM'):\n # Alternate inputs:\n # - \"SALE_AMOUN\"\n return 0.0001 * percent_im * value\n\n\n@out\ndef year_assessed(date='scvta.SALE_DATE'):\n # Alternate inputs:\n # - \"YEAR_SOLD_\": less data available and inconsistent with \"SALE_DATE\"\n #\n # A better approach may be needed. For example, could use the max of\n # \"SALE_DATE\" and \"YEAR_SOLD_\" when both are available.\n date.replace(0, np.nan, inplace=True)\n return date.floordiv(10000)\n\n\n@out\ndef year_built(year='scvta.YEAR_BUILT'):\n # Alternate inputs:\n # - \"EFF_YEAR_B\"\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft='scvta.SQ_FT'):\n return sqft\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(tot_units='scvta.NUMBER_OF1',\n res_type='parcels_out.res_type', land_use_type_id='parcels_out.land_use_type_id'):\n # Alternate inputs:\n # - NUMBER_O_1\n # - NUMBER_O_2\n # - NUMBER_O_3\n # We previously assumed \"NUMBER_OF_\" is number of units, but are not certain.\n # Some values are unreasonably high (e.g., 9100).\n # Now we assume \"NUMBER_OF1\" is number of residential units. The values are much more reasonable.\n \n units = pd.Series(index=res_type.index)\n tot_units = tot_units.reindex(units.index, copy=False)\n land_use_type_id = land_use_type_id.reindex(units.index, copy=False)\n\n # If not residential, assume zero residential units.\n units[res_type.isnull()] = 0\n\n # If single family residential, assume one residential unit.\n units[res_type == 'single'] = 1\n\n # If non-single residential, assume all units are all residential,\n # even if mixed-use.\n units[res_type == 'multi'] = tot_units\n units[res_type == 'mixed'] = tot_units\n units[land_use_type_id.isin(['RCON',])*np.logical_or((tot_units==0), tot_units.isnull())] = 1\n\n return units\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories(stories='scvta.NUMBER_OF1'):\n # Field name confirmed by inspecting the Sobrato Office Tower\n # (APN 26428171), at 488 Almaden Blvd, San Jose, which is a\n # single parcel with a 17-story building.\n return stories\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_scl(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_scl', schema=staging)\n\nsim.run(['export_scl'])\n", "id": "3434845", "language": "Python", "matching_score": 5.026947975158691, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/scl.py" }, { "content": "import os\nimport string\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\nres_codes = {'single': ['01', '51', '52', '53'],\n 'multi': [string.zfill(i, 2) for i in\n range(2, 6) + range(7, 10) + range(89, 99)],\n 'mixed': []}\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_smt, index_col='apn')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\[email protected](cache=True)\ndef roll():\n mdb = loader.get_path(\n 'built/parcel/2010/smt/Property Characteristics/ASSESSOR_ROLL.mdb'\n )\n csv = os.path.splitext(mdb)[0] + '.csv'\n\n if not os.path.exists(csv):\n with open(csv, 'w') as f:\n # Export of MS Access database requires mdbtools.\n subprocess.check_call(['mdb-export', mdb, '2009_ROLL'], stdout=f)\n\n df = pd.read_csv(csv, dtype={'APN': str}, low_memory=False)\n\n # Deduplicate by taking the last record, sorted by sequence ID (\"SEQ\").\n # Alternatively, could sort by date (\"DOR\") for similar results.\n df.sort('SEQ', ascending=True, inplace=True)\n df.drop_duplicates('APN', take_last=True, inplace=True)\n\n df.set_index('APN', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\[email protected](cache=True)\ndef situs():\n csv = loader.get_path(\n 'built/parcel/2010/smt/Property Characteristics/SITUS_SNPSHT.csv'\n )\n df = pd.read_csv(csv, dtype={'APN': str}, low_memory=False)\n df = df[df.APN.notnull()]\n df.set_index('APN', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '081'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='situs.USE_CODE'):\n return code\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='roll.ASSLAND'):\n # Alternate inputs:\n # - \"TEMPLAND\"\n return value\n\n\n@out\ndef improvement_value(value='roll.ASSIMPS'):\n # Alternate inputs:\n # - \"TEMPIMPS\"\n return value\n\n\n@out\ndef year_assessed(date='roll.DOR'):\n # Assumed that \"DOR\" represents date of record or similar.\n date.replace(0, np.nan, inplace=True)\n return date.floordiv(10000)\n\n\n@out\ndef year_built():\n return np.nan\n\n\n@out\ndef building_sqft():\n # Workaround since np.nan would raise an exception when injecting below.\n return pd.Series()\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(res_type='parcels_out.res_type'):\n return utils.get_residential_units(tot_units=pd.Series(),\n res_type=res_type)\n \n@out\ndef condo_identifier():\n code = ' '\n return code\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories():\n return np.nan\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_smt(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df.res_type[df.res_type.isnull()] = ''\n df_to_db(df, 'attributes_smt', schema=staging)\n\nsim.run(['export_smt'])\n", "id": "6989595", "language": "Python", "matching_score": 5.639614582061768, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/smt.py" }, { "content": "import string\n\nimport numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\n# Residential status was classified based on \"LandUseDesc\" column.\nres_codes = {'single': ['0010', '0051'],\n 'multi': [string.zfill(num, 4) for num in\n [0, 1, 5, 11, 12] + range(15, 51) + range(52, 58)],\n 'mixed': ['0013', '0014', '0177', '0201', '0392']}\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_son, index_col='apn')\n\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\[email protected](cache=True)\ndef abag():\n def format_apn(num):\n s = string.zfill(num, 9)\n assert len(s) <= 9\n return \"{}-{}-{}\".format(s[:3], s[3:6], s[6:])\n\n csv = loader.get_path('built/parcel/2010/son/ABAG_SonomaCounty.txt')\n df = pd.read_csv(csv, low_memory=False, parse_dates=[17])\n\n # Rename \".Units\" to \"Units\".\n df.rename(columns={'.Units': 'Units'}, inplace=True)\n\n # Create apn index based on FeeParcel column.\n df['apn'] = df.FeeParcel.floordiv(1000).apply(format_apn)\n df.set_index('apn', inplace=True)\n\n assert df.index.is_unique\n assert not df.index.hasnans()\n\n return df\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '097'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='abag.LandUse'):\n return code\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='abag.LandValue'):\n return value\n\n\n@out\ndef improvement_value(value='abag.StructureValue'):\n return value\n\n\n@out\ndef year_assessed(date='abag.LastSaleDate'):\n year = date.apply(lambda d: d.year if d else np.nan)\n year.replace([-1, 0], np.nan, inplace=True)\n return year\n\n\n@out\ndef year_built(year='abag.YearBuilt'):\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft='abag.BuildingSizePRIMARY'):\n return sqft\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(tot_units='abag.Units',\n res_type='parcels_out.res_type'):\n return utils.get_residential_units(tot_units, res_type)\n\n@out\ndef condo_identifier(merged_situs = 'abag.MergedSitus', year_built = 'abag.YearBuilt', city_code = 'abag.SitusCityCode', lu_code = 'abag.LandUse'):\n \n merged_situs[merged_situs.str.len() < 8] = ' '\n merged_situs[merged_situs.isnull()] = ' '\n merged_situs = merged_situs.str.slice(0,8)\n lu_code[lu_code.str.len() != 4] = '9999'\n lu_code[lu_code.isnull()] = '9999'\n year_built[year_built.isnull()] = 9999\n year_built[year_built == 0] = 8888\n year_built = year_built.astype('str')\n city_code[city_code.isnull()] = ' '\n code = merged_situs + lu_code + year_built + city_code\n code[code.isnull()] = ''\n return code\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories():\n return np.nan\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_son(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_son', schema=staging)\n\nsim.run(['export_son'])\n", "id": "9539786", "language": "Python", "matching_score": 5.550317287445068, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/son.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\nres_codes = {'single': ([1100] + range(1120, 1151) + range(1200, 1501) +\n range(1900, 2000)),\n 'multi': (range(600, 1100) + [1700] + range(2000, 3000) +\n range(5000, 5300) + range(7000, 7701) + [7800]),\n 'mixed': (range(3900, 4000) + [4101] + [4191] + [4240] +\n [9401] + [9491])}\nexempt_codes = range(1, 1000)\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_ala, index_col='apn_sort')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\[email protected](cache=True)\ndef ie670():\n filepath = \\\n loader.get_path('built/parcel/2010/ala/assessor_nov10/IE670c.txt')\n df = pd.read_table(filepath, sep='\\t', index_col=False, low_memory=False)\n df.set_index(\"Assessor's Parcel Number (APN) sort format\", inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\[email protected](cache=True)\ndef ie673():\n filepath = \\\n loader.get_path('built/parcel/2010/ala/assessor_nov10/IE673c.txt')\n df = pd.read_table(filepath, sep='\\t', index_col=False)\n df.set_index('APNsort', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '001'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(parcels_out, code='ie673.UseCode'):\n # Alternate inputs:\n # - \"Use Code\": from IE670, values are identical\n return code.reindex(parcels_out.index, copy=False).fillna(0).astype(int)\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='ie670.Land value'):\n # Alternate inputs:\n # - \"CLCA land value\": less data available\n return value\n\n\n@out\ndef improvement_value(value='ie670.Improvements value'):\n # Alternate inputs:\n # - \"CLCA improvements value\": less data available\n return value\n\n\n@out\ndef year_assessed(date='ie670.Last document date (CCYYMMDD)'):\n # Alternate inputs:\n # - \"Last document prefix\": not always numeric\n # - \"Last document input date (CCYYMMDD)\"\n # - \"Property characteristic change date (CCYYMMDD)\": from IE673\n date.replace(0, np.nan, inplace=True)\n return date.floordiv(10000)\n\n\n@out\ndef year_built(year='ie673.YearBuilt'):\n return year.str.strip().replace('', np.nan).astype(float)\n\n\n@out\ndef building_sqft(sqft='ie673.BldgArea'):\n return sqft\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(tot_units='ie673.Units',\n res_type='parcels_out.res_type'):\n return utils.get_residential_units(tot_units, res_type)\n \n@out\ndef condo_identifier():\n code = ' '\n return code\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories(stories='ie673.Stories'):\n # 1 story = 10 Alameda County stories.\n return 0.1 * stories\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_ala(parcels_out):\n df = parcels_out.to_frame()\n\n # Cast \"land_use_type_id\" to string for compatibility with other counties.\n df['land_use_type_id'] = df.land_use_type_id.astype(str)\n\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_ala', schema=staging)\n\nsim.run(['export_ala'])\n", "id": "76344", "language": "Python", "matching_score": 5.994341850280762, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/ala.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\n# Residential status was classified using SELECT DISTINCT usecode, remark1.\nres_codes = {'single': ['1000', '1500'],\n 'multi': ['1100', '2000', '2100', '2700'],\n 'mixed': []}\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_sol)\n\n\[email protected](cache=True)\ndef parcels_in():\n # Build DataFrame from subset of TableFrame columns.\n column_names = ['apn', 'usecode', 'valland', 'valimp', 'pdyrblt',\n 'pdareafst', 'pdareasnd']\n df = tf[column_names]\n\n # Deduplicate DataFrame by taking the last record, but this might\n # not be the right approach.\n df.drop_duplicates('apn', take_last=True, inplace=True)\n\n df.set_index('apn', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '095'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='parcels_in.usecode'):\n return code.astype(str)\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='parcels_in.valland'):\n return value\n\n\n@out\ndef improvement_value(value='parcels_in.valimp'):\n return value\n\n\n@out\ndef year_assessed():\n return np.nan\n\n\n@out\ndef year_built(year='parcels_in.pdyrblt'):\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft_fst='parcels_in.pdareafst',\n sqft_snd='parcels_in.pdareasnd'):\n return sqft_fst + sqft_snd\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(res_type='parcels_out.res_type'):\n units = pd.Series(index=res_type.index)\n\n # If single family residential, assume one residential unit.\n units[res_type == 'single'] = 1\n \n # If multi family residential, assume one residential unit for now too (leave further assumed additions to the imputation section).\n units[res_type == 'multi'] = 1\n\n return units\n \n@out\ndef condo_identifier():\n code = ' '\n return code\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories():\n return np.nan\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_sol(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_sol', schema=staging)\n\nsim.run(['export_sol'])\n", "id": "2943620", "language": "Python", "matching_score": 5.754430294036865, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/sol.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\nres_codes = {'single': ['11', '12', '13', '14'],\n 'multi': ['10', '15', '20', '21', '57'],\n 'mixed': []}\nexempt_codes = ['60', '61', '80']\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_mar, index_col='parcel')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '041'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='parcels_in.usecode'):\n # Alternate inputs:\n # - \"landuse\"\n # - \"county_lu\"\n # - \"existlu\"\n # - \"potlu\"\n return code\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='parcels_in.colndval'):\n # Alternate inputs:\n # - \"metlndval\"\n # - \"mettotval\"\n # - \"saleprice\"\n # - \"est05landv\"\n return value\n\n\n@out\ndef improvement_value(value='parcels_in.coimpval'):\n # Alternate inputs:\n # - \"metimpval\"\n # - \"mettotval\"\n # - \"saleprice\"\n # - \"est05imprv\"\n return value\n\n\n@out\ndef year_assessed(year='parcels_in.saledate'):\n # Alternate inputs:\n # - \"deeddate\": often same as saledate\n # - \"lastdate\": often during year preceding baseyear\n # - \"baseyear\": less data available\n year = year.str.slice(0, 4).astype(float)\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef year_built(year='parcels_in.yearbuilt'):\n year = year.astype(float)\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft='parcels_in.existsqft'):\n # Alternate inputs:\n # - \"potsqft\"\n # - \"buildingsq\"\n # - \"e05tpbsqft\"\n return sqft\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(tot_units='parcels_in.exunits',\n res_type='parcels_out.res_type'):\n # Alternate inputs:\n # - \"liv_units\": appears to include commercial units\n # - \"potunits\"\n # - \"unitsnumbe\"\n # - \"b_units\"\n return utils.get_residential_units(tot_units, res_type)\n \n@out\ndef condo_identifier():\n code = ' '\n return code\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories(stories='parcels_in.storiesnum'):\n return stories.astype(float)\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n # \"exemption\" column is value of exemptions, which includes welfare.\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_mar(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df.res_type[df.res_type.isnull()] = ''\n df_to_db(df, 'attributes_mar', schema=staging)\n\nsim.run(['export_mar'])\n", "id": "7376199", "language": "Python", "matching_score": 6.45706844329834, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/mar.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\n# Use codes were classified manually because the assessor classifications\n# are meant for property tax purposes. These classifications should be\n# reviewed and revised.\nres_codes = {'single': [],\n 'multi': [],\n 'mixed': []}\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.FIXME, index_col='FIXME')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n pass\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='FIXME'):\n return code\n\n\n@out\ndef res_type(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_res_type(land_use_type_id, res_codes)\n\n\n@out\ndef land_value(value='FIXME'):\n return value\n\n\n@out\ndef improvement_value(value='FIXME'):\n return value\n\n\n@out\ndef year_assessed(year='FIXME'):\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef year_built(year='FIXME'):\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft='FIXME'):\n return sqft\n\n\n@out\ndef non_residential_sqft(building_sqft='parcels_out.building_sqft',\n res_type='parcels_out.res_type',\n residential_units='parcels_out.residential_units'):\n return utils.get_nonresidential_sqft(building_sqft, res_type,\n residential_units)\n\n\n@out\ndef residential_units(tot_units='ie673.Units',\n res_type='parcels_out.res_type'):\n return utils.get_residential_units(tot_units, res_type)\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories(stories='FIXME'):\n return stories\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_FIXME(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_FIXME', schema=staging)\n\nsim.run(['export_FIXME'])\n", "id": "10914317", "language": "Python", "matching_score": 3.6497931480407715, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/template.py" }, { "content": "import numpy as np\nimport pandas as pd\n\n\n# Assume that each residential unit in a mixed-used parcel occupies\n# 1500 sqft, since residential vs. non-residential sqft is not known.\nsqft_per_res_unit = 1500.\n\n\ndef get_res_type(land_use_type_id, res_codes):\n lu = pd.Series(index=land_use_type_id.index, dtype=object)\n for name, codes in res_codes.items():\n lu[land_use_type_id.isin(codes)] = name\n return lu\n\n\ndef get_nonresidential_sqft(building_sqft, res_type, residential_units):\n sqft = pd.Series(index=res_type.index)\n building_sqft = building_sqft.reindex(sqft.index, copy=False)\n\n # If not residential, assume all area is non-residential.\n sqft[res_type.isnull()] = building_sqft\n\n # If residential, assume zero non-residential area.\n sqft[(res_type == 'single') | (res_type == 'multi')] = 0\n\n # If mixed-use, assume residential units occupy some area.\n sqft[res_type == 'mixed'] = (building_sqft -\n sqft_per_res_unit * residential_units)\n\n # Non-residential area must not be negative.\n sqft[(sqft.notnull()) & (sqft < 0)] = 0\n\n return sqft\n\n\ndef get_residential_units(tot_units, res_type):\n units = pd.Series(index=res_type.index)\n tot_units = tot_units.reindex(units.index, copy=False)\n\n # If not residential, assume zero residential units.\n units[res_type.isnull()] = 0\n\n # If single family residential, assume one residential unit.\n units[res_type == 'single'] = 1\n\n # If non-single residential, assume all units are all residential,\n # even if mixed-use.\n units[res_type == 'multi'] = tot_units\n units[res_type == 'mixed'] = tot_units\n \n # If multi-family and zero units, assume one residential unit for now (leave further imputation to the imputation section).\n units[(res_type == 'multi')& np.logical_or((units == 0), (units.isnull()))] = 1\n\n return units\n\n\ndef get_sqft_per_unit(building_sqft, non_residential_sqft, residential_units):\n per_unit = (1. * building_sqft - non_residential_sqft) / residential_units\n per_unit.replace(np.inf, np.nan, inplace=True)\n return per_unit\n\n\ndef get_tax_exempt(land_use_type_id, exempt_codes):\n exempt = pd.Series(index=land_use_type_id.index, dtype=int)\n exempt[land_use_type_id.isin(exempt_codes)] = 1\n exempt[~land_use_type_id.isin(exempt_codes)] = 0\n return exempt\n", "id": "5365984", "language": "Python", "matching_score": 2.9978086948394775, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/utils.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\nexempt_codes = []\n\n\n## Register input tables.\n\n\ntf = TableFrame(staging.parcels_nap, index_col='asmt')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\[email protected](cache=True)\ndef buildings():\n df = loader.get_attributes('built/parcel/2010/nap/Napa_buildings.dbf')\n\n # Usually duplicate records are similar, but sometimes the last record\n # appears to have more information.\n df.drop_duplicates('FeeParcel', take_last=True, inplace=True)\n\n df.set_index('FeeParcel', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\[email protected](cache=True)\ndef taxroll():\n df = loader.get_attributes('built/parcel/2010/nap/Napa_taxroll.dbf')\n\n # Take the last of duplicate records for consistency with buildings\n # table, but this might not be the right approach.\n df.drop_duplicates('Asmt', take_last=True, inplace=True)\n\n df.set_index('Asmt', inplace=True)\n assert df.index.is_unique\n assert not df.index.hasnans()\n return df\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '055'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='parcels_in.landuse1'):\n # Take last land use code when deduplicating.\n return code.groupby(level=0).last()\n\n\n@out\ndef land_value(value='taxroll.CurrentMar'):\n # Alternate inputs:\n # - \"CurrentNet\"\n # Need to separate land value and improvement value.\n # Assumed crude 50-50 split.\n return 0.5 * value\n\n\n@out\ndef improvement_value(value='taxroll.CurrentMar'):\n # Alternate inputs:\n # - \"CurrentNet\"\n # Need to separate land value and improvement value.\n # Assumed crude 50-50 split.\n return 0.5 * value\n\n\n@out\ndef year_assessed(year='taxroll.TaxYear'):\n # It is not clear what year \"CurrentMar\" refers to, but assume it\n # refers to \"TaxYear\".\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef year_built(year='buildings.YearBuilt'):\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft='buildings.BuildingSi'):\n return sqft\n\n\n@out\ndef non_residential_sqft(sqft='buildings.SqFtOffice'):\n return sqft\n\n\n# @out\n# def residential_units(units='buildings.NumUnitsRe'):\n # return units\n# @out\n# def residential_units(tot_units = 'buildings.NumUnitsRe', res_type = 'parcels_in.landuse1'):\n\n # units = pd.Series(index=res_type.index)\n # tot_units = tot_units.reindex(units.index, copy=False)\n # units[tot_units > 0] = tot_units\n \n ## If single family residential and no current units, assume one residential unit.\n # units[res_type.isin(['11','111'])*np.logical_or((tot_units==0), tot_units.isnull())] = 1\n \n # return units\n \n@out\ndef residential_units(tot_units = 'buildings.NumUnitsRe', res_type = 'parcels_in.landuse1'):\n\n units = pd.Series(index=tot_units.index)\n res_type = res_type.groupby(level=0).last()\n res_type = res_type.reindex(tot_units.index, copy = False)\n units[tot_units > 0] = tot_units\n \n # If single family residential and no current units, assume one residential unit.\n units[res_type.isin(['11','111','05','23','31','32','39','312','313','314','315','322','323','324','325','392','393','394','395','3101','3201','3901','12'])*np.logical_or((tot_units==0), tot_units.isnull())] = 1\n \n # If land use implies 2 units and no current units, assume 2 residential units.\n units[res_type.isin(['212', '3921', '2122'])*np.logical_or((tot_units==0), tot_units.isnull())] = 2\n \n # If land use implies 3 units and no current units, assume 3 residential units.\n units[res_type.isin(['21','213','3931','2133'])*np.logical_or((tot_units==0), tot_units.isnull())] = 3\n \n # If land use implies 4 units and no current units, assume 4 residential units.\n units[res_type.isin(['214', '3941', '2144'])*np.logical_or((tot_units==0), tot_units.isnull())] = 4\n \n # If land use implies 7 units and no current units, assume 7 residential units.\n units[res_type.isin(['215'])*np.logical_or((tot_units==0), tot_units.isnull())] = 7\n \n # If land use implies 14 units and no current units, assume 14 residential units.\n units[res_type.isin(['216'])*np.logical_or((tot_units==0), tot_units.isnull())] = 14\n \n # If land use implies 30 units and no current units, assume 30 residential units.\n units[res_type.isin(['217'])*np.logical_or((tot_units==0), tot_units.isnull())] = 30\n\n # If land use implies 50 units and no current units, assume 50 residential units.\n units[res_type.isin(['218'])*np.logical_or((tot_units==0), tot_units.isnull())] = 50\n \n return units\n \n@out\ndef res_type(tot_units = 'buildings.NumUnitsRe', res_type = 'parcels_in.landuse1'):\n\n units = pd.Series(index=tot_units.index)\n res_type = res_type.groupby(level=0).last()\n res_type = res_type.reindex(tot_units.index, copy = False)\n \n # If single family residential\n res_type[res_type.isin(['11','111','05','23','31','32','39','312','313','314','315','322','323','324','325','392','393','394','395','3101','3201','3901','12'])] = 'single'\n \n # If multifamily\n res_type[res_type.isin(['212', '3921', '2122','21','213','3931','2133','214', '3941', '2144','215','216','217','218'])] = 'multi'\n\n res_type[~res_type.isin(['single','multi'])] = 'other'\n \n return res_type\n \n@out\ndef condo_identifier():\n code = ' '\n return code\n\n\n@out\ndef sqft_per_unit(building_sqft='parcels_out.building_sqft',\n non_residential_sqft='parcels_out.non_residential_sqft',\n residential_units='parcels_out.residential_units'):\n return utils.get_sqft_per_unit(building_sqft, non_residential_sqft,\n residential_units)\n\n\n@out\ndef stories(stories='parcels_in.floor'):\n # Take greatest number of stories when deduplicating.\n return stories.groupby(level=0).max().astype(float)\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_nap(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_nap', schema=staging)\n\nsim.run(['export_nap'])\n", "id": "2877312", "language": "Python", "matching_score": 5.2896833419799805, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/nap.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.io import df_to_db\nimport urbansim.sim.simulation as sim\n\nimport utils\n\n\nloader = TableLoader()\nstaging = loader.tables.staging\n\n\n## Assumptions.\n\n\nexempt_codes = []\n\n\n## Register input tables.\n\n\n# Alternate index column is \"mapblklot\", but it was not unique for\n# parcels in Treasure Island.\ntf = TableFrame(staging.parcels_sfr, index_col='blklot')\nsim.add_table('parcels_in', tf, copy_col=False)\n\n\n## Register output table.\n\n\[email protected](cache=True)\ndef parcels_out(parcels_in):\n index = pd.Series(parcels_in.index).dropna().unique()\n df = pd.DataFrame(index=index)\n df.index.name = 'apn'\n return df\n\n\n## Register output columns.\n\n\nout = sim.column('parcels_out', cache=True)\n\n\n@out\ndef county_id():\n return '075'\n\n\n@out\ndef parcel_id_local():\n pass\n\n\n@out\ndef land_use_type_id(code='parcels_in.landuse'):\n # Alternate inputs:\n # - \"usetype\"\n # - \"mixeduse\"\n code[(code == 'MISSING DATA') | (code == 'Missing Data')] = None\n return code\n\n\n@out\ndef res_type(restype='parcels_in.restype'):\n return restype\n\n\n@out\ndef land_value(value='parcels_in.landval'):\n # Alternate inputs:\n # - \"lastsalepr\"\n # - \"assessedva\"\n # - \"estimatedv\"\n return value.astype(float)\n\n\n@out\ndef improvement_value(value='parcels_in.strucval'):\n # Alternate inputs:\n # - \"lastsalepr\"\n # - \"assessedva\"\n # - \"estimatedv\"\n return value.astype(float)\n\n\n@out\ndef year_assessed(date='parcels_in.lastsale'):\n year = date.apply(lambda d: d.year if d else np.nan)\n year.replace([-1, 0], np.nan, inplace=True)\n return year\n\n\n@out\ndef year_built(year='parcels_in.yrbuilt'):\n year.replace(0, np.nan, inplace=True)\n return year\n\n\n@out\ndef building_sqft(sqft='parcels_in.bldgsqft'):\n # Alternate inputs:\n # - \"lidarsqft\"\n # - \"high_sqft\"\n # - \"total_uses\"\n # - \"costarrba\"\n # - \"totalsqft\"\n # - \"netsqft\"\n # May want to aggregate multiple inputs to avoid zeros that represent\n # missing data.\n return sqft.astype(float)\n\n\n@out\ndef non_residential_sqft(sqft='parcels_in.commlsqft_'):\n # Alternate inputs:\n # - \"commsqft\"\n return sqft.astype(float)\n\n\n@out\ndef residential_units(units='parcels_in.resunits'):\n return units\n \n@out\ndef condo_identifier():\n code = ' '\n return code\n\n\n@out\ndef sqft_per_unit(ressqft='parcels_in.ressqft',\n residential_units='parcels_out.residential_units'):\n # Alternate inputs:\n # - \"ressqft_ex\"\n per_unit = 1. * ressqft / residential_units\n per_unit.replace(np.inf, np.nan, inplace=True)\n return per_unit\n\n\n@out\ndef stories(stories='parcels_in.stories'):\n # Alternate inputs:\n # - \"lidarstori\": always zero\n return stories\n\n\n@out\ndef tax_exempt(land_use_type_id='parcels_out.land_use_type_id'):\n return utils.get_tax_exempt(land_use_type_id, exempt_codes)\n\n\n## Export back to database.\n\n\[email protected]()\ndef export_sfr(parcels_out):\n df = parcels_out.to_frame()\n assert df.index.is_unique\n assert not df.index.hasnans()\n df_to_db(df, 'attributes_sfr', schema=staging)\n\nsim.run(['export_sfr'])\n", "id": "6382186", "language": "Python", "matching_score": 3.10918927192688, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/counties/sfr.py" }, { "content": "from spandex import TableLoader\nfrom spandex.spatialtoolz import geom_unfilled\nfrom spandex.io import exec_sql\n\nloader = TableLoader()\n\n################\n#### Approach 1: Merge geometries (and aggregate attributes) based on a common identifier\n################\nprint 'PARCEL AGGREGATION: Merge geometries (and aggregate attributes) based on a common identifier'\n\nexec_sql(\"\"\"\n-- SCL\ndrop table if exists condos_scl;\n\nSELECT \nmax(county_id) as county_id,\nmax(apn) as apn,\nmax(parcel_id_local) as parcel_id_local,\nmax(land_use_type_id) as land_use_type_id,\nmax(res_type) as res_type,\nsum(land_value) as land_value,\nsum(improvement_value) as improvement_value,\nmax(year_assessed) as year_assessed,\nmax(year_built) as year_built,\nsum(building_sqft) as building_sqft,\nsum(non_residential_sqft) as non_residential_sqft,\nsum(residential_units) as residential_units,\nmax(sqft_per_unit) as sqft_per_unit,\nmax(stories) as stories,\nmax(tax_exempt) as tax_exempt,\ncondo_identifier,\nST_CollectionExtract(ST_Multi(ST_Union(geom)), 3) AS geom,\nmax(gid) as gid,\n'merged' as imputation_flag,\nmax(development_type_id) as development_type_id\ninto condos_scl\nFROM parcels\nwhere county_id = '085' AND land_use_type_id = 'RCON' AND length(condo_identifier)>3\nGROUP BY condo_identifier;\n\ndelete from parcels where county_id = '085' AND land_use_type_id = 'RCON' AND length(condo_identifier)>3 ;\n\ninsert into parcels select * from condos_scl;\n\n-- CNC\ndrop table if exists condos_cnc;\n\nSELECT \nmax(county_id) as county_id,\nmax(apn) as apn,\nmax(parcel_id_local) as parcel_id_local,\nmax(land_use_type_id) as land_use_type_id,\nmax(res_type) as res_type,\nsum(land_value) as land_value,\nsum(improvement_value) as improvement_value,\nmax(year_assessed) as year_assessed,\nmax(year_built) as year_built,\nsum(building_sqft) as building_sqft,\nsum(non_residential_sqft) as non_residential_sqft,\nsum(residential_units) as residential_units,\nmax(sqft_per_unit) as sqft_per_unit,\nmax(stories) as stories,\nmax(tax_exempt) as tax_exempt,\ncondo_identifier,\nST_CollectionExtract(ST_Multi(ST_Union(geom)), 3) AS geom,\nmax(gid) as gid,\n'merged' as imputation_flag,\nmax(development_type_id) as development_type_id\ninto condos_cnc\nFROM parcels\nwhere county_id = '013' AND land_use_type_id = '29' AND length(condo_identifier)>3\nGROUP BY condo_identifier;\n\ndelete from parcels where county_id = '013' AND land_use_type_id = '29' AND length(condo_identifier)>3 ; \n\ninsert into parcels select * from condos_cnc;\n\n-- SON\n\ndrop table if exists condos_son;\n\nSELECT \nmax(county_id) as county_id,\nmax(apn) as apn,\nmax(parcel_id_local) as parcel_id_local,\nmax(land_use_type_id) as land_use_type_id,\nmax(res_type) as res_type,\nsum(land_value) as land_value,\nsum(improvement_value) as improvement_value,\nmax(year_assessed) as year_assessed,\nmax(year_built) as year_built,\nsum(building_sqft) as building_sqft,\nsum(non_residential_sqft) as non_residential_sqft,\nsum(residential_units) as residential_units,\nmax(sqft_per_unit) as sqft_per_unit,\nmax(stories) as stories,\nmax(tax_exempt) as tax_exempt,\ncondo_identifier,\nST_CollectionExtract(ST_Multi(ST_Union(geom)), 3) AS geom,\nmax(gid) as gid,\n'merged' as imputation_flag,\nmax(development_type_id) as development_type_id\ninto condos_son\nFROM parcels\nwhere county_id = '097' AND length(condo_identifier)>3 and res_type = 'multi'\nGROUP BY condo_identifier;\n\ndelete from parcels where county_id = '097' AND length(condo_identifier)>3 and res_type = 'multi';\n\ninsert into parcels select * from condos_son;\n\"\"\")\n\n\n################\n#### Approach 2: Merge geometries (and aggregate attributes) based on within-interior-ring status\n################\nprint 'PARCEL AGGREGATION: Merge geometries (and aggregate attributes) based on within-interior-ring status'\n\nexec_sql(\"\"\"\ndrop table if exists unfilled;\ndrop table if exists unfilled_exterior;\ndrop table if exists aggregation_candidates;\ndrop table if exists parcels_small;\n\"\"\")\n\nloader.database.refresh()\nt = loader.tables\n\n##Identify parcels with interior rings. This is an indication of encircling common space that is typical in condo projects.\ndf = geom_unfilled(t.public.parcels, 'unfilled')\n\nexec_sql(\"\"\"\nALTER TABLE unfilled\n ALTER COLUMN geom TYPE geometry(MultiPolygon) USING ST_Multi(geom);\nSELECT UpdateGeometrySRID('unfilled', 'geom', 2768);\n\"\"\")\n\n#Calculate area and delete exterior polygons below certain threshold size \nexec_sql(\"\"\"\nALTER TABLE parcels ADD COLUMN calc_area numeric;\nUPDATE parcels SET calc_area = ST_Area(geom);\nselect * into parcels_small from parcels where (calc_area < 550000) and res_type='multi';\nALTER TABLE parcels_small ADD PRIMARY KEY (gid);\nCREATE INDEX small_parcel_gidx on parcels_small using gist (geom);\n\"\"\")\n\nexec_sql(\"\"\"\nALTER TABLE unfilled ADD COLUMN calc_area numeric;\nUPDATE unfilled SET calc_area = ST_Area(geom);\ndelete from unfilled where calc_area > 550000;\n\"\"\")\n\nexec_sql(\"\"\"\nSELECT gid, ST_Collect(ST_MakePolygon(geom)) As geom\ninto unfilled_exterior\nFROM (\n SELECT gid, ST_ExteriorRing((ST_Dump(geom)).geom) As geom\n FROM unfilled\n ) s\nGROUP BY gid;\nALTER TABLE unfilled_exterior ADD PRIMARY KEY (gid);\nCREATE INDEX exterior_gidx on unfilled_exterior using gist (geom);\n\"\"\")\n\nexec_sql(\"\"\"\nwith a as(\nSELECT a.*, b.gid as parent_gid FROM parcels_small a, unfilled_exterior b WHERE a.geom && b.geom AND ST_Contains(b.geom, a.geom)\n)\nselect distinct * into aggregation_candidates from a;\n\"\"\")\n\nexec_sql(\"\"\"\n\ndelete from parcels where gid in (select distinct gid from aggregation_candidates);\n\nwith a as(\nSELECT \nmax(county_id) as county_id,\nmax(apn) as apn,\nmax(parcel_id_local) as parcel_id_local,\nmax(land_use_type_id) as land_use_type_id,\nmax(res_type) as res_type,\nsum(land_value) as land_value,\nsum(improvement_value) as improvement_value,\nmax(year_assessed) as year_assessed,\nmax(year_built) as year_built,\nsum(building_sqft) as building_sqft,\nsum(non_residential_sqft) as non_residential_sqft,\nsum(residential_units) as residential_units,\nmax(sqft_per_unit) as sqft_per_unit,\nmax(stories) as stories,\nmax(tax_exempt) as tax_exempt,\nmax(condo_identifier) as condo_identifier,\nST_CollectionExtract(ST_Multi(ST_Union(geom)), 3) AS geom,\ngid,\nmax(development_type_id) as development_type_id,\nmax(parent_gid) as parent_gid\nFROM aggregation_candidates\nGROUP BY gid\n), b as(\nSELECT \nmax(county_id) as county_id,\nmax(apn) as apn,\nmax(parcel_id_local) as parcel_id_local,\nmax(land_use_type_id) as land_use_type_id,\nmax(res_type) as res_type,\nsum(land_value) as land_value,\nsum(improvement_value) as improvement_value,\nmax(year_assessed) as year_assessed,\nmax(year_built) as year_built,\nsum(building_sqft) as building_sqft,\nsum(non_residential_sqft) as non_residential_sqft,\nsum(residential_units) as residential_units,\nmax(sqft_per_unit) as sqft_per_unit,\nmax(stories) as stories,\nmax(tax_exempt) as tax_exempt,\nmax(condo_identifier) as condo_identifier,\nST_CollectionExtract(ST_Multi(ST_Union(geom)), 3) AS geom,\nmax(gid) as gid,\n'merged' as imputation_flag,\nmax(development_type_id) as development_type_id\nFROM a\nGROUP BY parent_gid\n)\ninsert into parcels\nselect * from b;\n\"\"\")\n\n\n################\n#### Approach 3: Merge geometries (and aggregate attributes) if duplicate stacked parcel geometry\n################\nprint 'PARCEL AGGREGATION: Merge geometries (and aggregate attributes) if duplicate stacked parcel geometry'\n\nprint 'Collapsing and aggregating stacked parcels'\nexec_sql(\"\"\"\ndrop table if exists stacked;\ndrop table if exists stacked_merged;\n\nSELECT * into stacked FROM parcels\nwhere gid in (SELECT distinct p2.gid FROM parcels p1, parcels p2\nWHERE p1.geom && p2.geom AND p1.geom=p2.geom AND p1.gid <> p2.gid);\n\nSELECT \nmax(county_id) as county_id,\nmax(apn) as apn,\nmax(parcel_id_local) as parcel_id_local,\nmax(land_use_type_id) as land_use_type_id,\nmax(res_type) as res_type,\nsum(land_value) as land_value,\nsum(improvement_value) as improvement_value,\nmax(year_assessed) as year_assessed,\nmax(year_built) as year_built,\nsum(building_sqft) as building_sqft,\nsum(non_residential_sqft) as non_residential_sqft,\nsum(residential_units) as residential_units,\nmax(sqft_per_unit) as sqft_per_unit,\nmax(stories) as stories,\nmax(tax_exempt) as tax_exempt,\nmax(condo_identifier) as condo_identifier,\ngeom,\nmax(gid) as gid,\n'merged' as imputation_flag,\nmax(development_type_id) as development_type_id\ninto stacked_merged\nFROM stacked\nGROUP BY geom;\n\ndelete from parcels where gid in (select distinct gid from stacked);\n\ninsert into parcels\nselect * from stacked_merged;\n\"\"\")\n\n## Update parcel area post-aggregation\nexec_sql(\"\"\"\nUPDATE parcels SET calc_area = ST_Area(geom);\n\"\"\")", "id": "7313444", "language": "Python", "matching_score": 2.386556625366211, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/geom_aggregation.py" }, { "content": "#!/usr/bin/env python\n\nimport os\nimport subprocess\nimport sys\n\nfrom spandex import TableLoader\n\n\npython = sys.executable\nroot_path = os.path.dirname(__file__)\n\n\ndef run(filename):\n \"\"\"\"Run Python file relative to script without blocking.\"\"\"\n path = os.path.join(root_path, filename)\n return subprocess.Popen([python, path])\n\n\ndef check_run(filename):\n \"\"\"Run Python file relative to script, block, assert exit code is zero.\"\"\"\n path = os.path.join(root_path, filename)\n return subprocess.check_call([python, path])\n\n\nprint(\"PREPROCESSING: Loading shapefiles by county.\")\n\n# Load shapefile data inputs, fix invalid geometries, and reproject.\n# aka load_county_parcel_data_to_staging\ncheck_run('load.py')\n\n\nprint(\"PROCESSING: Loading parcel attributes by county.\")\n\n# Run county attribute processing scripts.\n# aka homogenize_and_clean_county_parcel_attributes\n# a lot of these scripts drop duplicate APN parcels\ncounty_names = ['ala', 'cnc', 'mar', 'nap', 'scl', 'sfr', 'smt', 'sol', 'son']\nfor name in county_names:\n filename = os.path.join('counties', name + '.py')\n check_run(filename)\n\n\nprint(\"PROCESSING: Combining to create regional parcels table.\")\n\n# Join the county attributes and geometries and union the counties together.\n# aka merge_county_parcel_tables_into_one_spatial_table\nloader = TableLoader()\nsql_path = os.path.join(root_path, 'join_counties.sql')\nwith open(sql_path) as sql:\n with loader.database.cursor() as cur:\n cur.execute(sql.read())\n \n \nprint(\"PROCESSING: Aggregating condos and stacked parcels.\")\n \n# Aggregate parcels for multi-geometry representation of buildings\n# aka spatially_combine_those_parcels\n# this attempts to combine parcel geometries in ways\n# that might relate to the way a developer would think about them\n# filling in common spaces around buildings, combining condos together\ncheck_run('geom_aggregation.py')\n\n\nprint(\"PROCESSING: Imputing attributed based on POINT data sources.\")\n \n# Applying information based on Redfin, Costar, employment points etc.\n# aka assign_historic_sales_data_to_parcels_by_spatial_proximity\n# assigns redfin and costar data to parcels\n# this also does various checks for the reasonableness of parcel attributes\n# (for example, year built within a reasonable year, price reasonable, etc)\n# and then it also seems to make some decisions about when to assign\n# the redfin sale price to a parcel\ncheck_run('point_based_imputation.py')\n\n\nprint(\"POSTPROCESSING: Applying spatial operations: tag with location identifiers.\")\n\n# Apply spatial operations to append location identifiers.\n# aka tag parcels with administrative names (e.g. TAZ number)\ncheck_run('spatialops.py')\n\n\nprint(\"PROCESSING: Synthesizing/scaling to match aggregate zonal totals.\")\n\n# Matching aggregate zonal totals\n# aka move_jobs_and_houses_to_match_aggregate_taz_2010_totals_csv\ncheck_run('match_aggregate.py')\n\n\nprint(\"PROCESSING: Adding/populating price fields to the building table.\")\n\n# Imputing base-year residential and non-residential prices\n# aka predict_prices_of_buildings_and_parcels_with_regressions\ncheck_run('price_imputation.py')\n\n\nprint(\"PROCESSING: Allocating households and jobs to buildings.\")\n\n# Allocating households and jobs to buildings\n# aka put_people_jobs_on_parcels\ncheck_run('demand_agent_allocation.py')\n\n\nprint(\"SUMMARIZING: Generating data summaries.\")\n\n# Output summary CSV files by county and TAZ.\n# aka summarize_by_taz_and_county\ncheck_run('summaries.py')\n\n# Output core tables to HDF5 for UrbanSim.\n# aka export_database_to_h5\ncheck_run('export_to_h5.py')\n", "id": "9638708", "language": "Python", "matching_score": 3.2736430168151855, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/run.py" }, { "content": "import logging\n\nimport pandas as pd\nfrom spandex import TableLoader\nfrom spandex.io import df_to_db, exec_sql, logger\nfrom spandex.spatialtoolz import conform_srids\n\n\nlogger.setLevel(logging.INFO)\n\n\nshapefiles = {\n #'staging.controls_blocks':\n #'hh/control_sm/block10_gba.shp',\n\n #'staging.controls_blockgroups':\n #'hh/control_sm/blockgroup10_gba.shp',\n\n #'staging.nat_farms':\n #'nat/farm/williamson_act.shp',\n\n #'staging.nat_slopes_gt6':\n #'nat/slope/gt6pctslope_1km.shp',\n\n #'staging.nat_slopes_gt12':\n #'nat/slope/gt12pctslope_1km',\n\n #'staging.nat_water':\n #'nat/water/bayarea_allwater.shp',\n\n #'staging.nat_water_wetlands':\n #'nat/wetlands/wetlands.shp',\n\n 'staging.parcels_ala':\n 'built/parcel/2010/ala/parcelsAlaCo2010/asr_parcel.shp',\n\n 'staging.parcels_cnc_poly':\n 'built/parcel/2010/cnc/raw10/CAD_AO_ParcelPoly_0410.shp',\n\n 'staging.parcels_cnc_pt':\n 'built/parcel/2010/cnc/raw10/CAD_AO_ParcelPoints_int0410.shp',\n\n 'staging.parcels_nap':\n 'built/parcel/2010/nap/Napa_Parcels.shp',\n\n 'staging.parcels_nap_tract':\n 'built/parcel/2010/nap/Napa_Census_tract.shp',\n\n 'staging.parcels_mar':\n 'built/parcel/2005/parcels2005_mar.shp',\n\n 'staging.parcels_scl':\n 'built/parcel/2010/scl/parcels2010_scl.shp',\n\n 'staging.parcels_sfr':\n 'built/parcel/2010/sfr/parcels2010_sfr.shp',\n\n 'staging.parcels_smt':\n 'built/parcel/2010/smt/shapefiles/ACTIVE_PARCELS_APN.shp',\n\n 'staging.parcels_sol':\n 'built/parcel/2010/sol/Parcels.shp',\n\n 'staging.parcels_sol_zoning':\n 'built/parcel/2010/sol/zoning.shp',\n\n 'staging.parcels_son':\n 'built/parcel/2010/son/PAR_PARCELS.shp',\n\n # Geometry type is MultiPolygonZM.\n #'staging.parcels_son_exlu':\n #'built/parcel/2010/son/parcels2010_son/Final2010exlu.shp',\n\n 'staging.taz':\n 'juris/reg/zones/taz1454.shp',\n \n 'staging.establishment_points':\n 'emp/micro/est10_gt1/est10_esri_gt1.shp',\n}\n\n\n# Install PostGIS and create staging schema.\nloader = TableLoader()\nwith loader.database.cursor() as cur:\n cur.execute(\"\"\"\n CREATE EXTENSION IF NOT EXISTS postgis;\n CREATE SCHEMA IF NOT EXISTS staging;\n \"\"\")\nloader.database.refresh()\n\n# Load shapefiles specified above to the project database.\nloader.load_shp_map(shapefiles)\n\n# Fix invalid geometries and reproject.\nstaging = loader.tables.staging\nconform_srids(loader.srid, schema=staging, fix=True)\n\n# Load county land use code mapping.\ncsv = loader.get_path('built/parcel/2010/rtp13_processing_notes/lucodes.csv')\ndf = pd.read_csv(csv, dtype=str)\ndf.dropna(how='any', inplace=True,\n subset=['county_id', 'land_use_type_id', 'development_type_id'])\ndf.index.name = 'index'\ndf_to_db(df, 'lucodes', schema=staging)\n\n# Add county land use code mapping unique constraint.\nexec_sql(\"\"\"\nALTER TABLE staging.lucodes ADD CONSTRAINT lucodes_unique\nUNIQUE (county_id, land_use_type_id);\n\"\"\")", "id": "10362380", "language": "Python", "matching_score": 1.1674100160598755, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/load.py" }, { "content": "from pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport pandas as pd\nimport geopandas\nimport json\nimport time\nimport fiona\nfrom string import join\nfrom shapely.geometry import shape\n\nMONGO = True\nJURIS = None\nFEASIBILITY = True\n\ncid = \"ZC7yyAyA8jkDFnRtf\" # parcels\ncsvname = \"output/parcels.csv\"\n\nif FEASIBILITY:\n cid = \"hMm5FqbDCPa4ube6Y\" # feasibility\n csvname = \"output/feasibility.csv\"\n\nif MONGO:\n client = MongoClient()\n #client.drop_database(\"baus\")\n db = client.togethermap\nelse:\n outf = open(\"parcels.json\", \"w\")\n\ndf = pd.read_csv(csvname, index_col=\"geom_id\")\n\ncnt = 0\nfeatures = []\n\nprint time.ctime()\n\n\ndef add_bbox(p):\n bounds = shape(p['geometry']).bounds\n minx, miny, maxx, maxy = bounds\n poly = {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [ [minx, miny], [minx, maxy], [maxx, maxy],\n [maxx, miny], [minx, miny] ]\n ]\n }\n p['bbox'] = poly\n return p\n\n\ndef export_features(features):\n global MONGO, db, outf\n if MONGO:\n db.places.insert_many(features)\n else:\n outf.write(join([json.dumps(f) for f in features], \"\\n\"))\n\n\nwith fiona.drivers():\n with fiona.open('/home/ubuntu/data/parcels4326.shp') as shp:\n \n for f in shp:\n\n cnt += 1\n if cnt % 10000 == 0:\n print \"Done reading rec %d\" % cnt\n\n if len(features) == 10000:\n\n print \"Exporting 10k recs\"\n export_features(features)\n print \"Done exporting 10k recs\"\n\n features = []\n\n geom_id = int(f[\"properties\"][\"GEOM_ID\"])\n try:\n rec = df.loc[geom_id]\n except:\n # don't need to keep it, it's not in parcels.csv\n continue\n \n if JURIS and rec[\"juris\"] != JURIS:\n continue\n\n f[\"properties\"] = rec.to_dict()\n f[\"properties\"][\"geom_id\"] = geom_id\n del f[\"id\"]\n\n f[\"creatorUID\"] = \"ceTir2NKMN87Gq7wj\"\n f[\"creator\"] = \"<NAME>\"\n f[\"createDate\"] = \"2015-08-29T05:10:00.446Z\"\n f[\"updateDate\"] = \"2015-08-29T05:10:00.446Z\"\n f[\"collectionId\"] = cid\n f['_id'] = str(ObjectId())\n f[\"post_count\"] = 0\n\n f = add_bbox(f)\n\n features.append(f)\n \nif len(features):\n export_features(features)\n\nprint time.ctime()\n", "id": "12132133", "language": "Python", "matching_score": 1.9803245067596436, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/export2.py" }, { "content": "from monary import Monary\nimport numpy as np\nimport pandas as pd\nimport time\n\nmon = Monary()\n\ncolumns = [\n 'properties.total_residential_units',\n 'properties.total_job_spaces',\n 'properties.parcel_id',\n 'properties.max_dua',\n 'properties.max_far'\n]\n\nt1 = time.time()\n\nnumpy_arrays = mon.query(\n 'togethermap',\n 'places',\n {'collectionId': 'ZC7yyAyA8jkDFnRtf'},\n columns,\n ['float32']*len(columns)\n)\n\ndf = np.matrix(numpy_arrays).transpose() \ndf = pd.DataFrame(df, columns=columns)\n\nprint time.time()-t1\nprint df.describe()\n", "id": "3516171", "language": "Python", "matching_score": 1.5232731103897095, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/import.py" }, { "content": "import sys\nimport json\nfrom flask import Flask, jsonify\nfrom flask.ext.cors import CORS\nimport pandas as pd\nimport orca\n\nsys.path.append(\".\")\nimport models\n\napp = Flask(__name__)\n# app.debug = True\ncors = CORS(app)\n\nMAX_PARCELS_RETURNED = 5000\n\nprint \"Loading\"\n\nstore = pd.HDFStore('data/bayarea_v3.h5')\n\nflds = ['parcel_id', 'parcel_acres', 'total_residential_units',\n 'total_job_spaces', 'x', 'y', 'pda']\nparcels = orca.get_table('parcels').to_frame(flds)\nflds = ['building_id', 'residential_units', 'job_spaces',\n 'building_type_id', 'parcel_id', 'building_sqft']\nbuildings = orca.get_table('buildings').to_frame(flds)\n# households = orca.get_table('households').to_frame()\n# jobs = orca.get_table('jobs').to_frame()\nflds = ['parcel_id', 'max_dua', 'max_far', 'max_height', 'type1',\n 'type2', 'type3', 'type4']\nzoning = orca.get_table('zoning_baseline').to_frame(flds)\n\nprint \"Ready\"\n\n\[email protected]('/extract/<query>')\ndef get_data(query):\n\n print \"Got query:\", query\n\n # global parcels, buildings, households, jobs, zoning_baseline\n global parcels, buildings, zoning_baseline\n\n p = parcels.reset_index().query(query).set_index('parcel_id')\n\n print \"Len parcels:\", len(p)\n\n if len(p) > MAX_PARCELS_RETURNED:\n return jsonify(**{\n \"status\": \"Error: can only ask for %d parcels\" %\n MAX_PARCELS_RETURNED\n })\n\n d = {}\n d['parcels'] = json.loads(p.to_json(orient='index'))\n\n z = zoning[zoning.index.isin(p.index)]\n d['zoning'] = json.loads(z.to_json(orient='index'))\n\n b = buildings[buildings.parcel_id.isin(p.index)]\n d['buildings'] = json.loads(b.to_json(orient='index'))\n\n # h = households[households.building_id.isin(b.index)]\n # d['households'] = json.loads(h.to_json(orient='index'))\n\n # j = jobs[jobs.building_id.isin(b.index)]\n # d['jobs'] = json.loads(j.to_json(orient='index'))\n\n return jsonify(**{\n \"status\": \"Success\",\n \"data\": d\n })\n\n\nif __name__ == '__main__':\n\n # from tornado.wsgi import WSGIContainer\n # from tornado.httpserver import HTTPServer\n # from tornado.ioloop import IOLoop\n\n # http_server = HTTPServer(WSGIContainer(app))\n # http_server.listen(5000)\n # IOLoop.instance().start()\n app.run('0.0.0.0', 1984)\n", "id": "10357743", "language": "Python", "matching_score": 2.771074056625366, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/serve_json.py" }, { "content": "import models\nimport pandas as pd\nimport urbansim.sim.simulation as sim\n\nparcels = sim.get_table(\"parcels\")\nbuildings = sim.get_table(\"buildings\")\nhouseholds = sim.get_table(\"households\")\njobs = sim.get_table(\"jobs\")\n\ns = buildings.parcel_id.isin(parcels.index)\n\nprint \"Building's parcel id in parcel index\\n\", s.value_counts()\n\ns = households.building_id.isin(buildings.index)\n\nprint \"Households with no building assigned: \\n\", \\\n (households.building_id == -1).value_counts()\nprint \"Household's building id in building index\\n\", s.value_counts()\n\ns = jobs.building_id.isin(buildings.index)\n\nprint \"Jobs with no building assigned: \\n\", \\\n (jobs.building_id == -1).value_counts()\nprint \"Job's building id in building index\\n\", s.value_counts()\n\nprint \"Len jobs\"\n\nprint len(jobs)\n\nprint \"Num job spaces\"\n\nprint buildings.job_spaces.sum()\n", "id": "11731454", "language": "Python", "matching_score": 1.3645964860916138, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/check_data.py" }, { "content": "import pandas as pd, numpy as np\nimport pandas.io.sql as sql\nfrom spandex import TableLoader\nfrom spandex.io import exec_sql, df_to_db\n\n#Connect to the database\nloader = TableLoader()\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\ndef unit_choice(chooser_ids, alternative_ids, probabilities):\n \"\"\"\n Have a set of choosers choose from among alternatives according\n to a probability distribution. Choice is binary: each\n alternative can only be chosen once.\n\n Parameters\n ----------\n chooser_ids : 1d array_like\n Array of IDs of the agents that are making choices.\n alternative_ids : 1d array_like\n Array of IDs of alternatives among which agents are making choices.\n probabilities : 1d array_like\n The probability that an agent will choose an alternative.\n Must be the same shape as `alternative_ids`. Unavailable\n alternatives should have a probability of 0.\n\n Returns\n -------\n choices : pandas.Series\n Mapping of chooser ID to alternative ID. Some choosers\n will map to a nan value when there are not enough alternatives\n for all the choosers.\n\n \"\"\"\n chooser_ids = np.asanyarray(chooser_ids)\n alternative_ids = np.asanyarray(alternative_ids)\n probabilities = np.asanyarray(probabilities)\n\n choices = pd.Series([np.nan] * len(chooser_ids), index=chooser_ids)\n\n if probabilities.sum() == 0:\n # return all nan if there are no available units\n return choices\n\n # probabilities need to sum to 1 for np.random.choice\n probabilities = probabilities / probabilities.sum()\n\n # need to see if there are as many available alternatives as choosers\n n_available = np.count_nonzero(probabilities)\n n_choosers = len(chooser_ids)\n n_to_choose = n_choosers if n_choosers < n_available else n_available\n\n chosen = np.random.choice(\n alternative_ids, size=n_to_choose, replace=False, p=probabilities)\n\n # if there are fewer available units than choosers we need to pick\n # which choosers get a unit\n if n_to_choose == n_available:\n chooser_ids = np.random.choice(\n chooser_ids, size=n_to_choose, replace=False)\n\n choices[chooser_ids] = chosen\n\n return choices\n\n# Load TAZ-level synthetic population\nhh_path = loader.get_path('hh/synth/hhFile.p2011s3a1.2010.csv')\nhh = pd.read_csv(hh_path)\nhh = hh[hh['HHT'] > 0] #Filter out GQ households\nhh = hh.set_index('HHID')\nhh.index.name = 'household_id'\nhh = hh.rename(columns = {'TAZ':'taz'})\nhh['building_id'] = -1\n\n# Get the taz-level dwelling unit controls just for reference. This file also contains the employment totals by sector/zone.\ntaz_controls_csv = loader.get_path('hh/taz2010_imputation.csv')\ntargetunits = pd.read_csv(taz_controls_csv, index_col='taz1454')\n\ntargetunits['hh'] = hh.groupby('taz').size()\n\ndf = targetunits[['targetunits', 'hh']]\n\ndf['occupancy'] = df.hh*1.0/df.targetunits\n\nprint 'Number of zones over-occupied with households will be: %s' % (df.occupancy>1).sum()\n\n##Get the buildings, the alternatives we will be allocating to\nbuildings = db_to_df('select * from buildings;')\nbuildings = buildings.set_index('building_id')\nempty_units = buildings[buildings.residential_units>0].residential_units.order(ascending=False)\nalternatives = buildings[['development_type_id','parcel_id','taz']]\nalternatives = alternatives.ix[np.repeat(empty_units.index.values,empty_units.values.astype('int'))]\n\ntaz_hh_counts = hh.groupby('taz').size()\n\nfor taz in np.unique(hh.taz):\n num_hh = taz_hh_counts[taz_hh_counts.index.values==taz].values[0]\n chooser_ids = hh.index[hh.taz==taz].values\n print 'There are %s households in TAZ %s' % (num_hh, taz)\n alts = alternatives[alternatives.taz==taz]\n alternative_ids = alts.index.values\n probabilities = np.ones(len(alternative_ids))\n num_resunits = len(alts)\n print 'There are %s residential units in TAZ %s' % (num_resunits, taz)\n choices = unit_choice(chooser_ids,alternative_ids,probabilities)\n #households_urbansim.building_id[np.in1d(households_urbansim.index.values,chooser_ids)] = choices.values\n hh.loc[chooser_ids,'building_id'] = choices\n if num_hh > num_resunits:\n print 'Warning: number of households exceeds number of resunits in TAZ %s' % taz\ntargetunits['hh_allocated'] = pd.merge(hh, buildings, left_on = 'building_id', right_index = True).groupby('taz_x').size()\n\ndf = targetunits[['targetunits', 'hh', 'hh_allocated']]\ndf['occupancy'] = df.hh*1.0/df.targetunits\n\nprint df.head()\n#summary_output_path = loader.get_path('out/regeneration/summaries/hh_summary.csv')\ndf_to_db(df, 'summary_hh', schema=loader.tables.public)\n\n\n\n################\n#####JOBS#######\n################\n\nsector_columns = []\nfor col in targetunits.columns:\n if col.startswith('e'):\n if col.endswith('_10'):\n sector_columns.append(col)\n\nemp_targets = targetunits[sector_columns]\n\n\ntotal_jobs = int(emp_targets.etot_10.sum())\n\njob_id = np.int32(np.arange(total_jobs) + 1)\n\ntaz_id = np.int64(np.zeros(total_jobs))\n\nsector_id = np.int32(np.zeros(total_jobs))\n\n## Prepare jobs table\ni = 0\n#regional_data = regional_data[regional_data.block11.notnull()].fillna(0)\nif 'etot_10' in sector_columns: sector_columns.remove('etot_10')\nfor taz in emp_targets.index.values:\n for sector in sector_columns:\n num_jobs = int(emp_targets.loc[taz, sector])\n if num_jobs > 0:\n j = i + num_jobs\n taz_id[i:j]=taz\n sector_num = int(sector.split('_')[0].split('e')[1]) \n sector_id[i:j]=sector_num\n i = j\n \njobs_table = {'job_id':job_id,'taz':taz_id,'sector_id':sector_id}\n\njobs_table = pd.DataFrame(jobs_table)\njobs_table = jobs_table.set_index('job_id')\njobs_table['building_id'] = -1\n\ntaz_job_counts = jobs_table.groupby('taz').size()\n\n#building_sqft_per_job assumptions for the initial allocation\nbuilding_sqft_per_job = {'BR':355,\n 'GV':355,\n 'HO':1161,\n 'HP':355,\n 'IH':661,\n 'IL':661,\n 'IW':661,\n 'MF':400,\n 'MR':383,\n 'OF':355,\n 'RT':445,\n 'SC':470,\n 'SF':400,\n 'LD':1000,\n 'VP':1000,\n 'other':1000}\n\n##Calculate job spaces per building\nbuildings['sqft_per_job'] = buildings.development_type_id.map(building_sqft_per_job)\nbuildings['job_spaces'] = (buildings.non_residential_sqft / buildings.sqft_per_job).fillna(0).astype('int')\n\n##Universe of job space alternatives\nempty_units = buildings[buildings.job_spaces > 0].job_spaces.order(ascending=False)\nalternatives = buildings[['development_type_id','parcel_id','taz']]\nalternatives = alternatives.ix[np.repeat(empty_units.index.values,empty_units.values.astype('int'))]\n\njobs = jobs_table\n\n##Allocate jobs from TAZ to building\nfor taz in np.unique(jobs.taz):\n num_jobs = taz_job_counts[taz_job_counts.index.values==taz].values[0]\n chooser_ids = jobs.index[jobs.taz==taz].values\n print 'There are %s jobs in TAZ %s' % (num_jobs, taz)\n alts = alternatives[alternatives.taz==taz]\n alternative_ids = alts.index.values\n probabilities = np.ones(len(alternative_ids))\n num_jobspaces = len(alts)\n print 'There are %s job spaces in TAZ %s' % (num_jobspaces, taz)\n choices = unit_choice(chooser_ids,alternative_ids,probabilities)\n jobs.loc[chooser_ids,'building_id'] = choices\n if num_jobs > num_jobspaces:\n print 'Warning: number of jobs exceeds number of job spaces in TAZ %s' % taz\n\ntargetunits['jobs_allocated'] = pd.merge(jobs, buildings, left_on = 'building_id', right_index = True).groupby('taz_x').size()\ntargetunits['jobs'] = jobs.groupby('taz').size()\ntargetunits['job_spaces'] = buildings.groupby('taz').job_spaces.sum()\n\ndf = targetunits[['job_spaces', 'jobs', 'jobs_allocated']]\ndf['occupancy'] = df.jobs_allocated*1.0/df.job_spaces\ndf['diff'] = df.jobs - df.job_spaces\n# summary_output_path = loader.get_path('out/regeneration/summaries/emp_summary.csv')\ndf_to_db(df, 'summary_emp', schema=loader.tables.public)\n\nprint df.head(50)\n\nprint jobs.building_id.isnull().sum()\n\nprint hh.building_id.isnull().sum()\n\ntargetunits['sqft_per_job'] = targetunits.targetnonressqft/targetunits.etot_10\n\nprint targetunits['sqft_per_job'].describe()\n\njobs.building_id[jobs.building_id.isnull()] = -1\nhh.building_id[hh.building_id.isnull()] = -1\n\n#EXPORT DEMAND AGENTS TO DB\nprint 'Loading jobs and households back to database'\ndf_to_db(jobs, 'jobs', schema=loader.tables.public)\ndf_to_db(hh, 'households', schema=loader.tables.public)\n\n#EXPORT BUILDING TABLE BACK TO DB\nbuildings['residential_sqft'] = buildings.residential_units * buildings.sqft_per_unit\nbuildings2 = buildings[['parcel_id', 'development_type_id', 'improvement_value', 'residential_units', \n 'residential_sqft', 'sqft_per_unit', 'non_residential_sqft', 'building_sqft', 'nonres_rent_per_sqft', 'res_price_per_sqft', 'stories', 'year_built',\n 'redfin_sale_price', 'redfin_sale_year', 'redfin_home_type', 'costar_property_type', 'costar_rent']]\ndevtype_devid_xref = {'SF':1, 'MF':2, 'MFS':3, 'MH':4, 'MR':5, 'GQ':6, 'RT':7, 'BR':8, 'HO':9, 'OF':10, 'OR':11, 'HP':12, 'IW':13, \n 'IL':14, 'IH':15, 'VY':16, 'SC':17, 'SH':18, 'GV':19, 'VP':20, 'PG':21, 'PL':22, 'AP':23, 'LD':24, 'other':-1}\nfor dev in devtype_devid_xref.keys():\n buildings2.development_type_id[buildings2.development_type_id == dev] = devtype_devid_xref[dev]\nbuildings2.development_type_id = buildings2.development_type_id.astype('int')\nbuildings2.residential_units = buildings2.residential_units.astype('int')\nbuildings2.residential_sqft = buildings2.residential_sqft.astype('int')\nbuildings2.non_residential_sqft = np.round(buildings2.non_residential_sqft).astype('int')\nbuildings2.stories = np.ceil(buildings2.stories).astype('int')\nbuildings2.year_built = np.round(buildings2.year_built).astype('int')\ndf_to_db(buildings2, 'building', schema=loader.tables.public)", "id": "10931071", "language": "Python", "matching_score": 4.855395793914795, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/demand_agent_allocation.py" }, { "content": "import pandas as pd, numpy as np\nimport pandas.io.sql as sql\nfrom spandex import TableLoader\n\n#Connect to the database\nloader = TableLoader()\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\ndef unit_choice(chooser_ids, alternative_ids, probabilities):\n \"\"\"\n Have a set of choosers choose from among alternatives according\n to a probability distribution. Choice is binary: each\n alternative can only be chosen once.\n\n Parameters\n ----------\n chooser_ids : 1d array_like\n Array of IDs of the agents that are making choices.\n alternative_ids : 1d array_like\n Array of IDs of alternatives among which agents are making choices.\n probabilities : 1d array_like\n The probability that an agent will choose an alternative.\n Must be the same shape as `alternative_ids`. Unavailable\n alternatives should have a probability of 0.\n\n Returns\n -------\n choices : pandas.Series\n Mapping of chooser ID to alternative ID. Some choosers\n will map to a nan value when there are not enough alternatives\n for all the choosers.\n\n \"\"\"\n chooser_ids = np.asanyarray(chooser_ids)\n alternative_ids = np.asanyarray(alternative_ids)\n probabilities = np.asanyarray(probabilities)\n\n choices = pd.Series([np.nan] * len(chooser_ids), index=chooser_ids)\n\n if probabilities.sum() == 0:\n # return all nan if there are no available units\n return choices\n\n # probabilities need to sum to 1 for np.random.choice\n probabilities = probabilities / probabilities.sum()\n\n # need to see if there are as many available alternatives as choosers\n n_available = np.count_nonzero(probabilities)\n n_choosers = len(chooser_ids)\n n_to_choose = n_choosers if n_choosers < n_available else n_available\n\n chosen = np.random.choice(\n alternative_ids, size=n_to_choose, replace=False, p=probabilities)\n\n # if there are fewer available units than choosers we need to pick\n # which choosers get a unit\n if n_to_choose == n_available:\n chooser_ids = np.random.choice(\n chooser_ids, size=n_to_choose, replace=False)\n\n choices[chooser_ids] = chosen\n\n return choices", "id": "10744015", "language": "Python", "matching_score": 0.041979603469371796, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/utils.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os.path\nimport logging\n\nimport pytest\n\nimport orca\nimport pandas as pd\n\nfrom .. import tracing as tracing\n\n\ndef close_handlers():\n for logger_name in ['activitysim', 'orca']:\n logger = logging.getLogger(logger_name)\n logger.handlers = []\n logger.propagate = True\n logger.setLevel(logging.NOTSET)\n\n\ndef add_canonical_dirs():\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n\ndef test_bad_custom_config_file(capsys):\n\n add_canonical_dirs()\n\n custom_config_file = os.path.join(os.path.dirname(__file__), 'configs', 'xlogging.yaml')\n tracing.config_logger(custom_config_file=custom_config_file)\n\n logger = logging.getLogger('activitysim')\n\n file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]\n assert len(file_handlers) == 1\n asim_logger_baseFilename = file_handlers[0].baseFilename\n\n logger = logging.getLogger(__name__)\n logger.info('test_bad_custom_config_file')\n logger.info('log_info')\n logger.warn('log_warn1')\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert \"could not find conf file\" in out\n assert 'log_warn1' in out\n assert 'log_info' not in out\n\n close_handlers()\n\n logger.warn('log_warn2')\n\n with open(asim_logger_baseFilename, 'r') as content_file:\n content = content_file.read()\n assert 'log_warn1' in content\n assert 'log_warn2' not in content\n\n\ndef test_config_logger(capsys):\n\n add_canonical_dirs()\n\n tracing.config_logger()\n\n logger = logging.getLogger('activitysim')\n\n file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]\n assert len(file_handlers) == 1\n asim_logger_baseFilename = file_handlers[0].baseFilename\n\n print \"handlers:\", logger.handlers\n\n logger.info('test_config_logger')\n logger.info('log_info')\n logger.warn('log_warn1')\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert \"could not find conf file\" not in out\n assert 'log_warn1' in out\n assert 'log_info' not in out\n\n close_handlers()\n\n logger = logging.getLogger(__name__)\n logger.warn('log_warn2')\n\n with open(asim_logger_baseFilename, 'r') as content_file:\n content = content_file.read()\n print content\n assert 'log_warn1' in content\n assert 'log_warn2' not in content\n\n\ndef test_custom_config_logger(capsys):\n\n add_canonical_dirs()\n\n custom_config_file = os.path.join(os.path.dirname(__file__), 'configs', 'custom_logging.yaml')\n tracing.config_logger(custom_config_file)\n\n logger = logging.getLogger('activitysim')\n\n logger.warn('custom_log_warn')\n\n asim_logger_filename = os.path.join(os.path.dirname(__file__), 'output', 'xasim.log')\n\n with open(asim_logger_filename, 'r') as content_file:\n content = content_file.read()\n assert 'custom_log_warn' in content\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert 'custom_log_warn' in out\n\n\ndef test_basic(capsys):\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n # remove existing handlers or basicConfig is a NOP\n logging.getLogger().handlers = []\n\n tracing.config_logger(basic=True)\n\n logger = logging.getLogger()\n file_handlers = [h for h in logger.handlers if type(h) is logging.FileHandler]\n assert len(file_handlers) == 0\n\n logger = logging.getLogger('activitysim')\n\n logger.info('test_basic')\n logger.debug('log_debug')\n logger.info('log_info')\n logger.warn('log_warn')\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert 'log_warn' in out\n assert 'log_info' in out\n assert 'log_debug' not in out\n\n close_handlers()\n\n\ndef test_print_summary(capsys):\n\n add_canonical_dirs()\n\n tracing.config_logger()\n\n tracing.print_summary('label', df=None, describe=False, value_counts=False)\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert 'print_summary neither value_counts nor describe' in out\n\n close_handlers()\n\n\ndef test_register_households(capsys):\n\n add_canonical_dirs()\n\n tracing.config_logger()\n\n df = pd.DataFrame({'zort': ['a', 'b', 'c']}, index=[1, 2, 3])\n\n tracing.register_households(df, 5)\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n # should warn that household id not in index\n assert 'trace_hh_id 5 not in dataframe' in out\n\n # should warn and rename index if index name is None\n assert \"households table index had no name. renamed index 'household_id'\" in out\n\n close_handlers()\n\n\ndef test_register_tours(capsys):\n\n add_canonical_dirs()\n\n tracing.config_logger()\n\n df = pd.DataFrame({'zort': ['a', 'b', 'c']}, index=[1, 2, 3])\n\n tracing.register_tours(df, 5)\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert \"no person ids registered for trace_hh_id 5\" in out\n\n close_handlers()\n\n\ndef test_register_persons(capsys):\n\n add_canonical_dirs()\n\n tracing.config_logger()\n\n df = pd.DataFrame({'household_id': [1, 2, 3]}, index=[11, 12, 13])\n\n tracing.register_persons(df, 5)\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n # should warn that household id not in index\n assert 'trace_hh_id 5 not found' in out\n\n # should warn and rename index if index name is None\n assert \"persons table index had no name. renamed index 'person_id'\" in out\n\n close_handlers()\n\n\ndef test_write_csv(capsys):\n\n add_canonical_dirs()\n\n tracing.config_logger()\n\n # should complain if df not a DataFrame or Series\n tracing.write_csv(df='not a df or series', file_name='baddie')\n\n out, err = capsys.readouterr()\n\n # don't consume output\n print out\n\n assert \"write_df_csv object 'baddie' of unexpected type\" in out\n\n close_handlers()\n\n\ndef test_slice_ids():\n\n df = pd.DataFrame({'household_id': [1, 2, 3]}, index=[11, 12, 13])\n\n # slice by named column\n sliced_df = tracing.slice_ids(df, [1, 3, 6], column='household_id')\n assert len(sliced_df.index) == 2\n\n # slice by index\n sliced_df = tracing.slice_ids(df, [6, 12], column=None)\n assert len(sliced_df.index) == 1\n\n # attempt to slice by non-existent column\n with pytest.raises(RuntimeError) as excinfo:\n sliced_df = tracing.slice_ids(df, [5, 6], column='baddie')\n assert \"slice_ids slicer column 'baddie' not in dataframe\" in str(excinfo.value)\n", "id": "12275808", "language": "Python", "matching_score": 2.7469589710235596, "max_stars_count": 0, "path": "activitysim/activitysim/core/test/test_tracing.py" }, { "content": "import os\nimport sys\nimport time\nimport yaml\nimport numpy as np\nimport pandas as pd\nimport orca\n\n\n# Configurations\nASIM_LOGGER = 'simca'\nCSV_FILE_TYPE = 'csv'\n\n\ndef get_injectable(name, default=None):\n\n if orca.is_injectable(name):\n return orca.get_injectable(name)\n else:\n return default\n\n\ndef check_for_variability():\n return get_injectable('check_for_variability', False)\n\n\ndef extend_trace_label(trace_label, extension):\n if trace_label:\n trace_label = \"%s.%s\" % (trace_label, extension)\n return trace_label\n\n\ndef print_elapsed_time(msg=None, t0=None, debug=False):\n t1 = time.time()\n if msg:\n t = t1 - (t0 or t1)\n msg = \"Time to execute %s : %s seconds (%s minutes)\" % (\n msg, round(t, 3), round(t / 60.0))\n print(msg)\n return t1\n\n\ndef delete_csv_files(output_dir):\n \"\"\"\n Delete CSV files\n\n Parameters\n ----------\n output_dir: str\n Directory of trace output CSVs\n\n Returns\n -------\n Nothing\n \"\"\"\n for the_file in os.listdir(output_dir):\n if the_file.endswith(CSV_FILE_TYPE):\n file_path = os.path.join(output_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n\ndef log_file_path(name):\n \"\"\"\n For use in logging.yaml tag to inject log file path\n\n Parameters\n ----------\n name: str\n output folder name\n\n Returns\n -------\n f: str\n output folder name\n \"\"\"\n output_dir = get_injectable('output_dir')\n f = os.path.join(output_dir, name)\n return f\n\n\ndef print_summary(label, df, describe=False, value_counts=False):\n \"\"\"\n Print summary\n\n Parameters\n ----------\n label: str\n tracer name\n df: pandas.DataFrame\n traced dataframe\n describe: boolean\n print describe?\n value_counts: boolean\n print value counts?\n\n Returns\n -------\n Nothing\n \"\"\"\n\n if not (value_counts or describe):\n print(\"print_summary neither value_counts nor describe\")\n\n if value_counts:\n print \"\\n%s value counts:\\n%s\\n\" % (label, df.value_counts())\n\n if describe:\n print \"\\n%s summary:\\n%s\\n\" % (label, df.describe())\n\n\ndef register_households(df, trace_hh_id):\n \"\"\"\n Register with orca households for tracing\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n\n trace_hh_id: int\n household id we are tracing\n\n Returns\n -------\n Nothing\n \"\"\"\n\n print(\"tracing household id %s in %s households\" % (\n trace_hh_id, len(df.index)))\n\n if trace_hh_id not in df.index:\n print(\"trace_hh_id %s not in dataframe\" % trace_hh_id)\n\n # inject persons_index name of person dataframe index\n if df.index.name is None:\n df.index.names = ['household_id']\n print(\n \"households table index had no\"\n \" name. renamed index '%s'\" % df.index.name)\n orca.add_injectable(\"hh_index_name\", df.index.name)\n\n print(\"register_households injected hh_index_name '%s'\" % df.index.name)\n\n\ndef register_persons(df, trace_hh_id):\n \"\"\"\n Register with orca persons for tracing\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n\n trace_hh_id: int\n household id we are tracing\n\n Returns\n -------\n Nothing\n \"\"\"\n\n # inject persons_index name of person dataframe index\n if df.index.name is None:\n df.index.names = ['person_id']\n print(\n \"persons table index had no name.\"\n \" renamed index '%s'\" % df.index.name)\n orca.add_injectable(\"persons_index_name\", df.index.name)\n\n print(\"register_persons injected persons_index_name '%s'\" % df.index.name)\n\n # inject list of person_ids in household we are tracing\n # this allows us to slice by person_id without requiring\n # presence of household_id column\n traced_persons_df = df[df['household_id'] == trace_hh_id]\n trace_person_ids = traced_persons_df.index.tolist()\n if len(trace_person_ids) == 0:\n print(\"register_persons: trace_hh_id %s not found.\" % trace_hh_id)\n\n orca.add_injectable(\"trace_person_ids\", trace_person_ids)\n print(\"register_persons injected trace_person_ids %s\" % trace_person_ids)\n\n print(\"tracing person_ids %s in %s persons\" % (\n trace_person_ids, len(df.index)))\n\n\ndef register_tours(df, trace_hh_id):\n \"\"\"\n Register with orca persons for tracing\n\n create an orca injectable 'trace_tour_ids' with a\n list of tour_ids in household we are tracing.\n This allows us to slice by tour_id without\n requiring presence of person_id column\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n\n trace_hh_id: int\n household id we are tracing\n\n Returns\n -------\n Nothing\n \"\"\"\n\n # get list of persons in traced household\n # (should already have been registered)\n person_ids = get_injectable(\"trace_person_ids\", [])\n\n if len(person_ids) == 0:\n # trace_hh_id not in households table or register_persons\n # was not not called\n print(\"no person ids registered for trace_hh_id %s\" % trace_hh_id)\n return\n\n # but if household_id is in households, then we may have some tours\n traced_tours_df = slice_ids(df, person_ids, column='person_id')\n trace_tour_ids = traced_tours_df.index.tolist()\n if len(trace_tour_ids) == 0:\n print(\"register_tours: no tours found for person_ids %s.\" % person_ids)\n else:\n print(\"tracing tour_ids %s in %s tours\" % (\n trace_tour_ids, len(df.index)))\n\n # register_tours is called for both mandatory and non_mandatory tours\n # so there may already be some tours registered - add the\n # new tours to the existing list\n trace_tour_ids = get_injectable(\"trace_tour_ids\", []) + trace_tour_ids\n\n orca.add_injectable(\"trace_tour_ids\", trace_tour_ids)\n print(\"register_tours injected trace_tour_ids %s\" % trace_tour_ids)\n\n\ndef register_trips(df, trace_hh_id):\n \"\"\"\n Register with orca persons for tracing\n\n create an orca injectable 'trace_tour_ids' with a list of tour_ids\n in household we are tracing. This allows us to slice by tour_id without\n requiring presence of person_id column\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n\n trace_hh_id: int\n household id we are tracin\n\n Returns\n -------\n Nothing\n \"\"\"\n\n # get list of persons in traced household (should already have\n # been registered)\n tour_ids = get_injectable(\"trace_tour_ids\", [])\n\n if len(tour_ids) == 0:\n # register_persons was not not called\n print(\"no tour ids registered for trace_hh_id %s\" % trace_hh_id)\n return\n\n # but if household_id is in households, then we may have some trips\n traced_trips_df = slice_ids(df, tour_ids, column='tour_id')\n trace_trip_ids = traced_trips_df.index.tolist()\n if len(traced_trips_df) == 0:\n print(\"register_trips: no trips found for tour_ids %s.\" % tour_ids)\n else:\n print(\"tracing trip_ids %s in %s trips\" % (\n trace_trip_ids, len(df.index)))\n\n orca.add_injectable(\"trace_trip_ids\", trace_trip_ids)\n print(\"register_trips injected trace_tour_ids %s\" % trace_trip_ids)\n\n\ndef register_traceable_table(table_name, df):\n \"\"\"\n Register traceable table\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n\n Returns\n -------\n Nothing\n \"\"\"\n\n trace_hh_id = get_injectable(\"trace_hh_id\", None)\n\n if trace_hh_id is None:\n return\n\n if table_name == 'households':\n register_households(df, trace_hh_id)\n elif table_name == 'persons':\n register_persons(df, trace_hh_id)\n elif table_name == 'trips':\n register_trips(df, trace_hh_id)\n elif table_name in [\"non_mandatory_tours\", \"mandatory_tours\"]:\n register_tours(df, trace_hh_id)\n\n\ndef sort_for_registration(table_names):\n\n # names of all traceable tables ordered by dependency on household_id\n # e.g. 'persons' has to be registered AFTER 'households'\n preferred_order = [\n 'households', 'persons', 'non_mandatory_tours',\n 'mandatory_tours', 'trips']\n\n table_names = list(table_names)\n\n for table_name in reversed(preferred_order):\n if table_name in table_names:\n # move it to the end of the list\n table_names.remove(table_name)\n table_names.append(table_name)\n\n return reversed(table_names)\n\n\ndef write_df_csv(df, file_path, index_label=None,\n columns=None, column_labels=None, transpose=True):\n\n mode = 'a' if os.path.isfile(file_path) else 'w'\n\n if columns:\n df = df[columns]\n\n if not transpose:\n df.to_csv(file_path, mode=\"a\", index=True, header=True)\n return\n\n df_t = df.transpose()\n if df.index.name is not None:\n df_t.index.name = df.index.name\n elif index_label:\n df_t.index.name = index_label\n\n with open(file_path, mode=mode) as f:\n if column_labels is None:\n column_labels = [None, None]\n if column_labels[0] is None:\n column_labels[0] = 'label'\n if column_labels[1] is None:\n column_labels[1] = 'value'\n\n if len(df_t.columns) == len(column_labels) - 1:\n column_label_row = ','.join(column_labels)\n else:\n column_label_row = \\\n column_labels[0] + ',' \\\n + ','.join([\n column_labels[1] + '_' + str(i + 1)\n for i in range(len(df_t.columns))])\n\n if mode == 'a':\n column_label_row = '# ' + column_label_row\n f.write(column_label_row + '\\n')\n df_t.to_csv(file_path, mode='a', index=True, header=True)\n\n\ndef write_series_csv(series, file_path, index_label=None,\n columns=None, column_labels=None):\n\n if isinstance(columns, str):\n series = series.rename(columns)\n elif isinstance(columns, list):\n if columns[0]:\n series.index.name = columns[0]\n series = series.rename(columns[1])\n if index_label and series.index.name is None:\n series.index.name = index_label\n series.to_csv(file_path, mode='a', index=True, header=True)\n\n\ndef write_csv(df, file_name, index_label=None, columns=None,\n column_labels=None, transpose=True):\n \"\"\"\n Print write_csv\n\n Parameters\n ----------\n df: pandas.DataFrame or pandas.Series\n traced dataframe\n file_name: str\n output file name\n index_label: str\n index name\n columns: list\n columns to write\n transpose: bool\n whether to transpose dataframe (ignored for series)\n Returns\n -------\n Nothing\n \"\"\"\n\n file_path = log_file_path('%s.%s' % (file_name, CSV_FILE_TYPE))\n\n if os.path.isfile(file_path):\n print(\"write_csv file exists %s %s\" % (type(df).__name__, file_name))\n\n if isinstance(df, pd.DataFrame):\n print(\"dumping %s dataframe to %s\" % (df.shape, file_name))\n write_df_csv(\n df, file_path, index_label, columns, column_labels,\n transpose=transpose)\n elif isinstance(df, pd.Series):\n print(\"dumping %s element series to %s\" % (len(df.index), file_name))\n write_series_csv(df, file_path, index_label, columns, column_labels)\n elif isinstance(df, dict):\n df = pd.Series(data=df)\n print(\"dumping %s element dict to %s\" % (len(df.index), file_name))\n write_series_csv(df, file_path, index_label, columns, column_labels)\n else:\n print(\n \"write_df_csv object\"\n \" '%s' of unexpected type: %s\" % (file_name, type(df)))\n\n\ndef slice_ids(df, ids, column=None):\n \"\"\"\n slice a dataframe to select only records with the specified ids\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n ids: int or list of ints\n slice ids\n column: str\n column to slice (slice using index if None)\n\n Returns\n -------\n df: pandas.DataFrame\n sliced dataframe\n \"\"\"\n\n if not isinstance(ids, (list, tuple)):\n ids = [ids]\n try:\n if column is None:\n df = df[df.index.isin(ids)]\n else:\n df = df[df[column].isin(ids)]\n except KeyError:\n # this happens if specified slicer column is not in df\n # df = df[0:0]\n raise RuntimeError(\n \"slice_ids slicer column '%s' not in dataframe\" % column)\n\n return df\n\n\ndef get_trace_target(df, slicer):\n \"\"\"\n get target ids and column or index to identify target trace rows in df\n\n Parameters\n ----------\n df: pandas.DataFrame\n dataframe to slice\n slicer: str\n name of column or index to use for slicing\n\n Returns\n -------\n (target, column) tuple\n\n target : int or list of ints\n id or ids that identify tracer target rows\n column : str\n name of column to search for targets or None to search index\n \"\"\"\n\n if slicer is None:\n slicer = df.index.name\n\n target_ids = None # id or ids to slice by (e.g. hh_id/person_ids/tour_ids)\n column = None # column name to slice on or None to slice on index\n\n if len(df.index) == 0:\n target_ids = None\n elif slicer == 'PERID' or slicer == get_injectable('persons_index_name'):\n target_ids = get_injectable('trace_person_ids', [])\n elif slicer == 'HHID' or slicer == orca.get_injectable('hh_index_name'):\n target_ids = get_injectable('trace_hh_id', [])\n elif slicer == 'person_id':\n target_ids = get_injectable('trace_person_ids', [])\n column = slicer\n elif slicer == 'hh_id':\n target_ids = get_injectable('trace_hh_id', [])\n column = slicer\n elif slicer == 'tour_id':\n if isinstance(df, pd.DataFrame) and ('person_id' in df.columns):\n target_ids = get_injectable('trace_person_ids', [])\n column = 'person_id'\n else:\n target_ids = get_injectable('trace_tour_ids', [])\n elif slicer == 'trip_id': # FIX ME\n if isinstance(df, pd.DataFrame) and ('person_id' in df.columns):\n target_ids = get_injectable('trace_person_ids', [])\n column = 'person_id'\n else:\n target_ids = get_injectable('trace_trip_ids', [])\n elif slicer == 'TAZ' or slicer == 'ZONE':\n target_ids = get_injectable('trace_od', [])\n elif slicer == 'NONE':\n target_ids = None\n else:\n raise RuntimeError(\"slice_canonically: bad slicer '%s'\" % (slicer, ))\n\n if target_ids and not isinstance(target_ids, (list, tuple)):\n target_ids = [target_ids]\n\n return target_ids, column\n\n\ndef slice_canonically(df, slicer, label, warn_if_empty=False):\n \"\"\"\n Slice dataframe by traced household or person id dataframe and write to CSV\n\n Parameters\n ----------\n df: pandas.DataFrame\n dataframe to slice\n slicer: str\n name of column or index to use for slicing\n label: str\n tracer name - only used to report bad slicer\n\n Returns\n -------\n sliced subset of dataframe\n \"\"\"\n\n target_ids, column = get_trace_target(df, slicer)\n\n if target_ids is not None:\n df = slice_ids(df, target_ids, column)\n\n if warn_if_empty and len(df.index) == 0:\n column_name = column or slicer\n print(\n \"slice_canonically: no rows in %s with %s == %s\"\n % (label, column_name, target_ids))\n\n return df\n\n\ndef has_trace_targets(df, slicer=None):\n\n target_ids, column = get_trace_target(df, slicer)\n\n if target_ids is None:\n found = False\n else:\n\n if column is None:\n found = df.index.isin(target_ids).any()\n else:\n found = df[column].isin(target_ids).any()\n\n return found\n\n\ndef hh_id_for_chooser(id, choosers):\n\n if choosers.index.name == 'HHID' or \\\n choosers.index.name == get_injectable('hh_index_name'):\n hh_id = id\n elif 'household_id' in choosers.columns:\n hh_id = choosers.loc[id]['household_id']\n else:\n raise RuntimeError(\n \"don't grok chooser with index %s\" % choosers.index.name)\n\n return hh_id\n\n\ndef dump_df(dump_switch, df, trace_label, fname):\n if dump_switch:\n trace_label = extend_trace_label(trace_label, 'DUMP.%s' % fname)\n trace_df(df, trace_label, slicer='NONE', transpose=False)\n\n\ndef trace_df(df, label, slicer=None, columns=None,\n index_label=None, column_labels=None,\n transpose=True, warn_if_empty=False):\n \"\"\"\n Slice dataframe by traced household or person id dataframe and write to CSV\n\n Parameters\n ----------\n df: pandas.DataFrame\n traced dataframe\n label: str\n tracer name\n slicer: Object\n slicer for subsetting\n columns: list\n columns to write\n index_label: str\n index name\n column_labels: [str, str]\n labels for columns in csv\n transpose: boolean\n whether to transpose file for legibility\n warn_if_empty: boolean\n write warning if sliced df is empty\n\n Returns\n -------\n Nothing\n \"\"\"\n\n df = slice_canonically(df, slicer, label, warn_if_empty)\n\n if len(df.index) > 0:\n write_csv(df, file_name=label, index_label=(index_label or slicer),\n columns=columns, column_labels=column_labels,\n transpose=transpose)\n\n\ndef interaction_trace_rows(interaction_df, choosers, sample_size=None):\n \"\"\"\n Trace model design for interaction_simulate\n\n Parameters\n ----------\n interaction_df: pandas.DataFrame\n traced model_design dataframe\n choosers: pandas.DataFrame\n interaction_simulate choosers\n (needed to filter the model_design dataframe by traced hh or person id)\n sample_size int or None\n int for constant sample size, or None if choosers have\n different numbers of alternatives\n Returns\n -------\n trace_rows : numpy.ndarray\n array of booleans to flag which rows in interaction_df to trace\n\n trace_ids : tuple (str, numpy.ndarray)\n column name and array of trace_ids mapping trace_rows to their\n target_id for use by trace_interaction_eval_results which needs to know\n target_id so it can create separate tables for each distinct target for\n readability\n \"\"\"\n\n # slicer column name and id targets to use for chooser id added to\n # model_design dataframe currently we only ever slice by person_id,\n # but that could change, so we check here...\n\n if choosers.index.name == 'PERID' \\\n or choosers.index.name == get_injectable('persons_index_name'):\n slicer_column_name = choosers.index.name\n targets = get_injectable('trace_person_ids', [])\n elif (\n choosers.index.name == 'tour_id' and\n 'person_id' in choosers.columns):\n slicer_column_name = 'person_id'\n targets = get_injectable('trace_person_ids', [])\n else:\n raise RuntimeError(\n \"interaction_trace_rows don't know how to slice index '%s'\"\n % choosers.index.name)\n\n if sample_size is None:\n # if sample size not constant, we count on index of\n # interaction_df being same as choosers\n assert interaction_df.index.name == choosers.index.name\n trace_rows = np.in1d(interaction_df.index, targets)\n trace_ids = interaction_df[trace_rows].index.values\n else:\n\n if slicer_column_name == choosers.index.name:\n trace_rows = np.in1d(choosers.index, targets)\n trace_ids = np.asanyarray(choosers[trace_rows].index)\n else:\n trace_rows = np.in1d(choosers['person_id'], targets)\n trace_ids = np.asanyarray(choosers[trace_rows].person_id)\n\n # simply repeat if sample size is constant across choosers\n assert sample_size == len(interaction_df.index) / len(choosers.index)\n trace_rows = np.repeat(trace_rows, sample_size)\n trace_ids = np.repeat(trace_ids, sample_size)\n\n assert type(trace_rows) == np.ndarray\n assert type(trace_ids) == np.ndarray\n\n trace_ids = (slicer_column_name, trace_ids)\n\n return trace_rows, trace_ids\n\n\ndef trace_interaction_eval_results(trace_results, trace_ids, label):\n \"\"\"\n Trace model design eval results for interaction_simulate\n\n Parameters\n ----------\n trace_results: pandas.DataFrame\n traced model_design dataframe\n trace_ids : tuple (str, numpy.ndarray)\n column name and array of trace_ids from interaction_trace_rows()\n used to filter the trace_results dataframe by traced hh or person id\n label: str\n tracer name\n\n Returns\n -------\n Nothing\n \"\"\"\n\n assert type(trace_ids[1]) == np.ndarray\n\n slicer_column_name = trace_ids[0]\n trace_results[slicer_column_name] = trace_ids[1]\n\n targets = np.unique(trace_ids[1])\n\n if len(trace_results.index) == 0:\n return\n\n # write out the raw dataframe\n file_path = log_file_path('%s.raw.csv' % label)\n trace_results.to_csv(file_path, mode=\"a\", index=True, header=True)\n\n # if there are multiple targets, we want them in separate tables\n # for readability\n for target in targets:\n\n df_target = trace_results[trace_results[slicer_column_name] == target]\n\n # we want the transposed columns in predictable order\n df_target.sort_index(inplace=True)\n\n # # remove the slicer (person_id or hh_id) column?\n # del df_target[slicer_column_name]\n\n target_label = '%s.%s.%s' % (label, slicer_column_name, target)\n trace_df(df_target, target_label,\n slicer=\"NONE\",\n transpose=True,\n column_labels=['expression', None],\n warn_if_empty=False)\n", "id": "12003797", "language": "Python", "matching_score": 3.344496011734009, "max_stars_count": 0, "path": "bayarea_urbansim/baus/tracing.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport warnings\nimport logging\n\nimport numpy as np\nimport orca\nimport pandas as pd\nimport yaml\n\nfrom activitysim.core import pipeline\n\nwarnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)\npd.options.mode.chained_assignment = None\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](cache=True)\ndef store(data_dir, settings):\n if 'store' not in settings:\n logger.error(\"store file name not specified in settings\")\n raise RuntimeError(\"store file name not specified in settings\")\n fname = os.path.join(data_dir, settings[\"store\"])\n if not os.path.exists(fname):\n logger.error(\"store file not found: %s\" % fname)\n raise RuntimeError(\"store file not found: %s\" % fname)\n\n file = pd.HDFStore(fname, mode='r')\n pipeline.close_on_exit(file, fname)\n\n return file\n\n\[email protected](cache=True)\ndef cache_skim_key_values(settings):\n return settings['time_periods']['labels']\n\n\[email protected](cache=True)\ndef households_sample_size(settings):\n return settings.get('households_sample_size', 0)\n\n\[email protected](cache=True)\ndef chunk_size(settings):\n return int(settings.get('chunk_size', 0))\n\n\[email protected](cache=True)\ndef check_for_variability(settings):\n return bool(settings.get('check_for_variability', False))\n\n\[email protected](cache=True)\ndef trace_hh_id(settings):\n\n id = settings.get('trace_hh_id', None)\n\n if id and not isinstance(id, int):\n logger.warn(\"setting trace_hh_id is wrong type, should be an int, but was %s\" % type(id))\n id = None\n\n return id\n\n\[email protected]()\ndef trace_person_ids():\n # overridden by register_persons if trace_hh_id is defined\n return []\n\n\[email protected]()\ndef trace_tour_ids():\n # overridden by register_tours if trace_hh_id is defined\n return []\n\n\[email protected](cache=True)\ndef hh_index_name(settings):\n # overridden by register_households if trace_hh_id is defined\n return None\n\n\[email protected](cache=True)\ndef persons_index_name(settings):\n # overridden by register_persons if trace_hh_id is defined\n return None\n\n\[email protected](cache=True)\ndef trace_od(settings):\n\n od = settings.get('trace_od', None)\n\n if od and not (isinstance(od, list) and len(od) == 2 and all(isinstance(x, int) for x in od)):\n logger.warn(\"setting trace_od is wrong type, should be a list of length 2, but was %s\" % od)\n od = None\n\n return od\n\n\[email protected](cache=True)\ndef enable_trace_log(trace_hh_id, trace_od):\n return (trace_hh_id or trace_od)\n", "id": "2980014", "language": "Python", "matching_score": 1.7534945011138916, "max_stars_count": 0, "path": "activitysim/activitysim/abm/misc.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\nimport os\n\nimport orca\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core import assign\nfrom activitysim.core import tracing\nfrom activitysim.core import config\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AccessibilitySkims(object):\n \"\"\"\n Wrapper for skim arrays to facilitate use of skims by accessibility model\n\n Parameters\n ----------\n skims : 2D array\n omx: open omx file object\n this is only used to load skims on demand that were not preloaded\n length: int\n number of zones in skim to return in skim matrix\n in case the skims contain additional external zones that should be trimmed out so skim\n array is correct shape to match (flattened) O-D tiled columns in the od dataframe\n transpose: bool\n whether to transpose the matrix before flattening. (i.e. act as a D-O instead of O-D skim)\n \"\"\"\n\n def __init__(self, skim_dict, omx, length, transpose=False):\n self.skim_dict = skim_dict\n self.omx = omx\n self.length = length\n self.transpose = transpose\n\n def __getitem__(self, key):\n \"\"\"\n accessor to return flattened skim array with specified key\n flattened array will have length length*length and will match tiled OD df used by assign\n\n this allows the skim array to be accessed from expressions as\n skim['DISTANCE'] or skim[('SOVTOLL_TIME', 'MD')]\n \"\"\"\n try:\n data = self.skim_dict.get(key).data\n except KeyError:\n omx_key = '__'.join(key)\n logger.info(\"AccessibilitySkims loading %s from omx as %s\" % (key, omx_key,))\n data = self.omx[omx_key]\n\n data = data[:self.length, :self.length]\n\n if self.transpose:\n return data.transpose().flatten()\n else:\n return data.flatten()\n\n\[email protected]()\ndef accessibility_spec(configs_dir):\n f = os.path.join(configs_dir, 'accessibility.csv')\n return assign.read_assignment_spec(f)\n\n\[email protected]()\ndef accessibility_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'accessibility.yaml')\n\n\[email protected]()\ndef compute_accessibility(settings, accessibility_spec,\n accessibility_settings,\n skim_dict, omx_file, land_use, trace_od):\n\n \"\"\"\n Compute accessibility for each zone in land use file using expressions from accessibility_spec\n\n The actual results depend on the expressions in accessibility_spec, but this is initially\n intended to permit implementation of the mtc accessibility calculation as implemented by\n Accessibility.job\n\n Compute measures of accessibility used by the automobile ownership model.\n The accessibility measure first multiplies an employment variable by a mode-specific decay\n function. The product reflects the difficulty of accessing the activities the farther\n (in terms of round-trip travel time) the jobs are from the location in question. The products\n to each destination zone are next summed over each origin zone, and the logarithm of the\n product mutes large differences. The decay function on the walk accessibility measure is\n steeper than automobile or transit. The minimum accessibility is zero.\n \"\"\"\n\n logger.info(\"Running compute_accessibility\")\n\n constants = config.get_model_constants(accessibility_settings)\n land_use_columns = accessibility_settings.get('land_use_columns', [])\n\n land_use_df = land_use.to_frame()\n\n zone_count = len(land_use_df.index)\n\n # create OD dataframe\n od_df = pd.DataFrame(\n data={\n 'orig': np.repeat(np.asanyarray(land_use_df.index), zone_count),\n 'dest': np.tile(np.asanyarray(land_use_df.index), zone_count)\n }\n )\n\n if trace_od:\n trace_orig, trace_dest = trace_od\n trace_od_rows = (od_df.orig == trace_orig) & (od_df.dest == trace_dest)\n else:\n trace_od_rows = None\n\n # merge land_use_columns into od_df\n land_use_df = land_use_df[land_use_columns]\n od_df = pd.merge(od_df, land_use_df, left_on='dest', right_index=True).sort_index()\n\n locals_d = {\n 'log': np.log,\n 'exp': np.exp,\n 'skim_od': AccessibilitySkims(skim_dict, omx_file, zone_count),\n 'skim_do': AccessibilitySkims(skim_dict, omx_file, zone_count, transpose=True)\n }\n if constants is not None:\n locals_d.update(constants)\n\n results, trace_results, trace_assigned_locals \\\n = assign.assign_variables(accessibility_spec, od_df, locals_d, trace_rows=trace_od_rows)\n accessibility_df = pd.DataFrame(index=land_use.index)\n for column in results.columns:\n data = np.asanyarray(results[column])\n data.shape = (zone_count, zone_count)\n accessibility_df[column] = np.log(np.sum(data, axis=1) + 1)\n\n orca.add_column(\"accessibility\", column, accessibility_df[column])\n\n if trace_od:\n\n if not trace_od_rows.any():\n logger.warn(\"trace_od not found origin = %s, dest = %s\" % (trace_orig, trace_dest))\n else:\n\n # add OD columns to trace results\n df = pd.concat([od_df[trace_od_rows], trace_results], axis=1)\n\n # dump the trace results table (with _temp variables) to aid debugging\n # note that this is not the same as the orca-injected accessibility table\n # FIXME - should we name this differently and also dump the updated accessibility table?\n tracing.trace_df(df,\n label='accessibility',\n index_label='skim_offset',\n slicer='NONE',\n warn_if_empty=True)\n\n if trace_assigned_locals:\n tracing.write_csv(trace_assigned_locals, file_name=\"accessibility_locals\")\n\n tracing.trace_df(orca.get_table('persons_merged').to_frame(), \"persons_merged\",\n warn_if_empty=True)\n", "id": "9092047", "language": "Python", "matching_score": 3.8631622791290283, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/accessibility.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\nimport os\n\nimport numpy as np\nimport pandas as pd\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef undupe_column_names(df, template=\"{} ({})\"):\n \"\"\"\n rename df column names so there are no duplicates (in place)\n\n e.g. if there are two columns named \"dog\", the second column will be reformatted to \"dog (2)\"\n\n Parameters\n ----------\n df : pandas.DataFrame\n dataframe whose column names should be de-duplicated\n template : template taking two arguments (old_name, int) to use to rename columns\n\n Returns\n -------\n df : pandas.DataFrame\n dataframe that was renamed in place, for convenience in chaining\n \"\"\"\n\n new_names = []\n seen = set()\n for name in df.columns:\n n = 1\n new_name = name\n while new_name in seen:\n n += 1\n new_name = template.format(name, n)\n new_names.append(new_name)\n seen.add(new_name)\n df.columns = new_names\n return df\n\n\ndef read_assignment_spec(fname,\n description_name=\"Description\",\n target_name=\"Target\",\n expression_name=\"Expression\"):\n \"\"\"\n Read a CSV model specification into a Pandas DataFrame or Series.\n\n The CSV is expected to have columns for component descriptions\n targets, and expressions,\n\n The CSV is required to have a header with column names. For example:\n\n Description,Target,Expression\n\n Parameters\n ----------\n fname : str\n Name of a CSV spec file.\n description_name : str, optional\n Name of the column in `fname` that contains the component description.\n target_name : str, optional\n Name of the column in `fname` that contains the component target.\n expression_name : str, optional\n Name of the column in `fname` that contains the component expression.\n\n Returns\n -------\n spec : pandas.DataFrame\n dataframe with three columns: ['description' 'target' 'expression']\n \"\"\"\n\n cfg = pd.read_csv(fname, comment='#')\n\n # drop null expressions\n # cfg = cfg.dropna(subset=[expression_name])\n\n cfg.rename(columns={target_name: 'target',\n expression_name: 'expression',\n description_name: 'description'},\n inplace=True)\n\n # backfill description\n if 'description' not in cfg.columns:\n cfg.description = ''\n\n cfg.target = cfg.target.str.strip()\n cfg.expression = cfg.expression.str.strip()\n\n return cfg\n\n\nclass NumpyLogger(object):\n def __init__(self, logger):\n self.logger = logger\n self.target = ''\n self.expression = ''\n\n def write(self, msg):\n self.logger.error(\"numpy warning: %s\" % (msg.rstrip()))\n self.logger.error(\"expression: %s = %s\" % (str(self.target), str(self.expression)))\n\n\ndef assign_variables(assignment_expressions, df, locals_dict, df_alias=None, trace_rows=None):\n \"\"\"\n Evaluate a set of variable expressions from a spec in the context\n of a given data table.\n\n Expressions are evaluated using Python's eval function.\n Python expressions have access to variables in locals_d (and df being\n accessible as variable df.) They also have access to previously assigned\n targets as the assigned target name.\n\n lowercase variables starting with underscore are temp variables (e.g. _local_var)\n and not returned except in trace_restults\n\n uppercase variables starting with underscore are temp variables (e.g. _LOCAL_SCALAR)\n and not returned except in trace_assigned_locals\n This is useful for defining general purpose local constants in expression file\n\n Users should take care that expressions should result in\n a Pandas Series (scalars will be automatically promoted to series.)\n\n Parameters\n ----------\n assignment_expressions : pandas.DataFrame of target assignment expressions\n target: target column names\n expression: pandas or python expression to evaluate\n df : pandas.DataFrame\n locals_d : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of \"python\" expression.\n trace_rows: series or array of bools to use as mask to select target rows to trace\n\n Returns\n -------\n variables : pandas.DataFrame\n Will have the index of `df` and columns named by target and containing\n the result of evaluating expression\n trace_df : pandas.DataFrame or None\n a dataframe containing the eval result values for each assignment expression\n \"\"\"\n\n np_logger = NumpyLogger(logger)\n\n def is_local(target):\n return target.startswith('_') and target.isupper()\n\n def is_temp(target):\n return target.startswith('_')\n\n def to_series(x, target=None):\n if x is None or np.isscalar(x):\n if target:\n logger.warn(\"WARNING: assign_variables promoting scalar %s to series\" % target)\n return pd.Series([x] * len(df.index), index=df.index)\n return x\n\n trace_assigned_locals = trace_results = None\n if trace_rows is not None:\n # convert to numpy array so we can slice ndarrays as well as series\n trace_rows = np.asanyarray(trace_rows)\n if trace_rows.any():\n trace_results = []\n trace_assigned_locals = {}\n\n # avoid touching caller's passed-in locals_d parameter (they may be looping)\n locals_dict = locals_dict.copy() if locals_dict is not None else {}\n if df_alias:\n locals_dict[df_alias] = df\n else:\n locals_dict['df'] = df\n local_keys = locals_dict.keys()\n\n l = []\n # need to be able to identify which variables causes an error, which keeps\n # this from being expressed more parsimoniously\n for e in zip(assignment_expressions.target, assignment_expressions.expression):\n target, expression = e\n\n if target in local_keys:\n logger.warn(\"assign_variables target obscures local_d name '%s'\" % str(target))\n\n if is_local(target):\n x = eval(expression, globals(), locals_dict)\n locals_dict[target] = x\n if trace_assigned_locals is not None:\n trace_assigned_locals[target] = x\n continue\n\n try:\n\n # FIXME - log any numpy warnings/errors but don't raise\n np_logger.target = str(target)\n np_logger.expression = str(expression)\n saved_handler = np.seterrcall(np_logger)\n save_err = np.seterr(all='log')\n\n values = to_series(eval(expression, globals(), locals_dict), target=target)\n\n np.seterr(**save_err)\n np.seterrcall(saved_handler)\n\n except Exception as err:\n logger.error(\"assign_variables error: %s: %s\" % (type(err).__name__, str(err)))\n\n logger.error(\"assign_variables expression: %s = %s\"\n % (str(target), str(expression)))\n\n # values = to_series(None, target=target)\n raise err\n\n l.append((target, values))\n\n if trace_results is not None:\n trace_results.append((target, values[trace_rows]))\n\n # update locals to allows us to ref previously assigned targets\n locals_dict[target] = values\n\n # build a dataframe of eval results for non-temp targets\n # since we allow targets to be recycled, we want to only keep the last usage\n # we scan through targets in reverse order and add them to the front of the list\n # the first time we see them so they end up in execution order\n variables = []\n seen = set()\n for statement in reversed(l):\n # statement is a tuple (<target_name>, <eval results in pandas.Series>)\n target_name = statement[0]\n if not is_temp(target_name) and target_name not in seen:\n variables.insert(0, statement)\n seen.add(target_name)\n\n # DataFrame from list of tuples [<target_name>, <eval results>), ...]\n variables = pd.DataFrame.from_items(variables)\n\n if trace_results is not None:\n\n trace_results = pd.DataFrame.from_items(trace_results)\n trace_results.index = df[trace_rows].index\n\n trace_results = undupe_column_names(trace_results)\n\n # add df columns to trace_results\n trace_results = pd.concat([df[trace_rows], trace_results], axis=1)\n\n return variables, trace_results, trace_assigned_locals\n", "id": "6735381", "language": "Python", "matching_score": 0.8836773633956909, "max_stars_count": 0, "path": "activitysim/activitysim/core/assign.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport itertools as it\n\ngeography = 'taz'\n\n\n# loosely borrowed from https://gist.github.com/haleemur/aac0ac216b3b9103d149\ndef format_df(df, formatters=None, **kwargs):\n formatting_columns = list(set(formatters.keys()).intersection(df.columns))\n df_copy = df[formatting_columns].copy()\n na_rep = kwargs.get('na_rep') or ''\n for col, formatter in formatters.items():\n try:\n df[col] = df[col].apply(lambda x: na_rep if pd.isnull(x)\n else formatter.format(x))\n except KeyError:\n print('{} does not exist in the dataframe.'.format(col)) +\\\n 'Ignoring the formatting specifier'\n return df\n\n\ndef get_base_year_df(base_run_year=2010):\n geography_id = 'zone_id' if geography == 'taz' else geography\n df = pd.read_csv('output/baseyear_{}_summaries_{}.csv'.format(geography, base_run_year),\n index_col=geography_id)\n df = df.fillna(0)\n return df\n\n\ndef get_outcome_df(run, year=2040):\n geography_id = 'zone_id' if geography == 'taz' else geography\n df = pd.read_csv(\n 'http://urbanforecast.com/runs/run%(run)d_%(geography)s_summaries_%(year)d.csv'\n % {\"run\": run, \"year\": year, \"geography\": geography},\n index_col=geography_id)\n df = df.fillna(0)\n return df\n\n\ndef write_outcome_csv(df, run, geography, year=2040):\n geography_id = 'zone_id' if geography == 'taz' else geography\n f = 'runs/run%(run)d_%(geography)s_summaries_%(year)d.csv' \\\n % {\"run\": run, \"year\": year, \"geography\": geography}\n df = df.fillna(0)\n df.to_csv(f)\n\n\ndef compare_series(base_series, outcome_series, index):\n s = base_series\n s1 = outcome_series\n d = {\n 'Count': s1,\n 'Share': s1 / s1.sum(),\n 'Percent_Change': 100 * (s1 - s) / s,\n 'Share_Change': (s1 / s1.sum()) - (s / s.sum())\n }\n # there must be a less verbose way to do this:\n columns = ['Count', 'Share', 'Percent_Change',\n 'Share_Change']\n df = pd.DataFrame(d, index=index, columns=columns)\n return df\n\n\ndef compare_outcome(run, base_series, formatters):\n df = get_outcome_df(run)\n s = df[base_series.name]\n df = compare_series(base_series, s, df.index)\n df = format_df(df, formatters)\n return df\n\n\ndef remove_characters(word, characters=b' _aeiou'):\n return word.translate(None, characters)\n\n\ndef make_esri_columns(df):\n df.columns = [str(x[0]) + str(x[1]) for x in df.columns]\n df.columns = [remove_characters(x) for x in df.columns]\n return df\n df.to_csv(f)\n\n\ndef to_esri_csv(df, variable, runs):\n f = 'compare/esri_' +\\\n '%(variable)s_%(runs)s.csv'\\\n % {\"variable\": variable,\n \"runs\": '-'.join(str(x) for x in runs)}\n df = make_esri_columns(df)\n df.to_csv(f)\n\ndef write_bundle_comparison_csv(df, variable, runs):\n df = make_esri_columns(df)\n if variable==\"tothh\" or variable == \"TOTHH\":\n headers = ['hh10', 'hh10_shr', 'hh40_0', 'hh40_0_shr',\n 'pctch40_0', 'Shrch40_0', 'hh40_3', 'hh40_3_shr',\n 'pctch40_3', 'shrch40_3', 'hh40_1', 'hh40_1_shr',\n 'pctch40_1', 'shrch40_1', 'hh40_2', 'hh40_2_shr',\n 'pctch40_2', 'shrch40_2', '3_40_0_40_rat',\n '1_40_0_40_rat', '2_40_0_40_rat']\n df.columns = headers\n df = df[['hh10', 'hh10_shr', 'hh40_0', 'hh40_0_shr',\n 'pctch40_0', 'Shrch40_0', 'hh40_3', 'hh40_3_shr', 'pctch40_3',\n 'shrch40_3', '3_40_0_40_rat', 'hh40_1', 'hh40_1_shr', 'pctch40_1',\n 'shrch40_1', '1_40_0_40_rat', 'hh40_2', 'hh40_2_shr', 'pctch40_2',\n 'shrch40_2', '2_40_0_40_rat']]\n elif variable==\"totemp\" or variable == \"TOTEMP\":\n headers = ['emp10', 'emp10_shr', 'emp40_0',\n 'emp40_0_shr', 'pctch40_0', 'Shrch40_0', 'emp40_3',\n 'emp40_3_shr', 'pctch40_3', 'shrch40_3', 'emp40_1',\n 'emp40_1_shr', 'pctch40_1', 'shrch40_1', 'emp40_2',\n 'emp40_2_shr', 'pctch40_2', 'shrch40_2', '3_40_0_40_rat',\n '1_40_0_40_rat', '2_40_0_40_rat']\n df.columns = headers\n df = df[['emp10', 'emp10_shr', 'emp40_0',\n 'emp40_0_shr', 'pctch40_0', 'Shrch40_0', 'emp40_3',\n 'emp40_3_shr', 'pctch40_3', 'shrch40_3', '3_40_0_40_rat',\n 'emp40_1', 'emp40_1_shr', 'pctch40_1', 'shrch40_1',\n '1_40_0_40_rat', 'emp40_2', 'emp40_2_shr', 'pctch40_2',\n 'shrch40_2', '2_40_0_40_rat']]\n cut_variable_name = variable[3:]\n f = 'compare/' + \\\n '%(geography)s_%(variable)s_%(runs)s.csv'\\\n % {\"geography\": geography,\n \"variable\": cut_variable_name,\n \"runs\": '_'.join(str(x) for x in runs)}\n df.to_csv(f)\n\n\ndef write_csvs(df, variable, runs):\n f = 'compare/' +\\\n '%(variable)s_%(runs)s.csv'\\\n % {\"variable\": variable,\n \"runs\": '-'.join(str(x) for x in runs)}\n #df.to_csv(f)\n write_bundle_comparison_csv(df, variable, runs)\n\n\ndef divide_series(a_tuple, variable):\n s = get_outcome_df(a_tuple[0])[variable]\n s1 = get_outcome_df(a_tuple[1])[variable]\n s2 = s1 / s\n s2.name = str(a_tuple[1]) + '/' + str(a_tuple[0])\n return s2\n\n\ndef get_combinations(nparray):\n return pd.Series(list(it.combinations(np.unique(nparray), 2)))\n\n\ndef compare_outcome_for(variable, runs, set_geography):\n global geography\n geography = set_geography\n # empty list to build up dataframe from other dataframes\n base_year_df = get_base_year_df()\n df_lst = []\n s = base_year_df[variable]\n s1 = s / s.sum()\n d = {\n 'Count': s,\n 'Share': s1\n }\n\n df = pd.DataFrame(d, index=base_year_df.index)\n if geography == 'superdistrict':\n formatters = {'Count': '{:.0f}',\n 'Share': '{:.2f}'}\n df = pd.DataFrame(d, index=base_year_df.index)\n df = format_df(df, formatters)\n df_lst.append(df)\n more_formatters = {'Count': '{:.0f}',\n 'Share': '{:.2f}',\n 'Percent_Change': '{:.0f}',\n 'Share_Change': '{:.3f}'}\n for run in runs:\n df_lst.append(compare_outcome(run, s, more_formatters))\n else:\n formatters = {'Count': '{:.4f}',\n 'Share': '{:.6f}'}\n df = pd.DataFrame(d, index=base_year_df.index)\n df = format_df(df, formatters)\n df_lst.append(df)\n more_formatters = {'Count': '{:.4f}',\n 'Share': '{:.6f}',\n 'Percent_Change': '{:.6f}',\n 'Share_Change': '{:.6f}'}\n for run in runs:\n df_lst.append(compare_outcome(run, s, more_formatters))\n\n # build up dataframe of ratios of run count variables to one another\n if len(runs) > 1:\n ratios = pd.DataFrame()\n combinations = get_combinations(runs)\n #just compare no no project right now\n s2 = divide_series((runs[0],runs[1]), variable)\n ratios[s2.name] = s2\n s2 = divide_series((runs[0],runs[2]), variable)\n ratios[s2.name] = s2\n s2 = divide_series((runs[0],runs[3]), variable)\n ratios[s2.name] = s2\n df_rt = pd.DataFrame(ratios)\n formatters = {}\n for column in df_rt.columns:\n formatters[column] = '{:.2f}'\n df_rt = format_df(df_rt, formatters)\n df_lst.append(df_rt)\n\n # build up summary names to the first level of the column multiindex\n keys = ['', 'BaseRun2010']\n run_column_shortnames = ['r' + str(x) + 'y40' for x in runs]\n keys.extend(run_column_shortnames)\n keys.extend(['y40Ratios'])\n\n\n df2 = pd.concat(df_lst, axis=1, keys=keys)\n\n write_csvs(df2, variable, runs)\n\n\ndef subtract_base_year_urban_footprint(run_number):\n base_year_filename = 'runs/run{}_urban_footprint_summary_summaries_{}.csv'.format(run_number,2010)\n bdf = pd.read_csv(base_year_filename, index_col=0)\n outcome_year_filename = 'runs/run{}_urban_footprint_summary_summaries_{}.csv'.format(run_number,2040)\n odf = pd.read_csv(outcome_year_filename, index_col=0)\n sdf = odf - bdf\n sdf.to_csv('runs/run{}_urban_footprint_subtracted_summaries_{}.csv'.format(run_number,2040))\n", "id": "403343", "language": "Python", "matching_score": 2.659890651702881, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/output_csv_utils.py" }, { "content": "from output_csv_utils import compare_outcome_for\nimport sys\n\n# compares outcomes from simulation runs\n# example usage:\n# python scripts/compare_output.py 556 572 611\n\nruns = map(int, sys.argv[1:])\n\n#compare_outcome_for('tothh', set_geography='pda', runs=runs)\n#compare_outcome_for('totemp', set_geography='pda', runs=runs)\ncompare_outcome_for('tothh', set_geography='juris', runs=runs)\ncompare_outcome_for('totemp', set_geography='juris', runs=runs)\n#compare_outcome_for('tothh', set_geography='superdistrict', runs=runs)\n#compare_outcome_for('totemp', set_geography='superdistrict', runs=runs)\n#compare_outcome_for('TOTHH', set_geography='taz', runs=runs)\n#compare_outcome_for('TOTEMP', set_geography='taz', runs=runs)", "id": "9977440", "language": "Python", "matching_score": 0.3203200697898865, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/compare_output.py" }, { "content": "import pandas as pd\nimport orca\nimport sys\nsys.path.append(\".\")\nimport models\n\n# make sure to use the baseline zoning, even if set differently for urbansim\norca.get_injectable(\"settings\").update(scenario=\"baseline\")\n\nparcels = orca.get_table(\"parcels_zoning_calculations\")\nparcels_geography = orca.get_table(\"parcels_geography\")\n\ndf = parcels.to_frame([\"geom_id\", \"total_residential_units\", \"zoned_du\",\n \"zoned_du_underbuild\", \"zoned_du_underbuild_nodev\", \n \"effective_max_dua\",\"effective_max_office_far\",\n # \"office_allowed\",\"retail_allowed\",\"industrial_allowed\",\n # \"cat_r\"\n # \"office_high\",\"office_medium\",\"office_low\",\n \"non_res_categories\"\n ])\ndf.to_csv(\"output/parcel_zoning_capacity.csv\")", "id": "10967393", "language": "Python", "matching_score": 4.000761032104492, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/capacity_calculator_parcels.py" }, { "content": "import pandas as pd\nimport orca\nimport sys\nsys.path.append(\".\")\nimport models\n\n# make sure to use the baseline zoning, even if set differently for urbansim\norca.get_injectable(\"settings\").update(scenario=\"baseline\")\n\nparcels = orca.get_table(\"parcels_zoning_calculations\")\nparcels_geography = orca.get_table(\"parcels_geography\")\n\ndf = parcels.to_frame([\"geom_id\", \"total_residential_units\", \"zoned_du\",\n \"zoned_du_underbuild\", \"zoned_du_underbuild_nodev\"\n ])\n\ndf[\"juris_name\"] = parcels_geography.juris_name\ndf[\"juris_id\"] = parcels_geography.jurisdiction\ndf = df.set_index(\"geom_id\")\n\ndf_filt = df.query(\"zoned_du_underbuild_nodev > 0\")\nprint \"Number of parcels with value > 0 = %d\" % len(df_filt)\n\ndf_filt.to_csv('/var/www/html/scratchpad/bayarea_softsites.csv')\n\ndf = df.groupby([\"juris_name\", \"juris_id\"]).sum()\ndf[\"total_residential_units\"] = \\\n df.total_residential_units.fillna(0).astype('int')\n\ndf.to_csv(\"output/city_capacity.csv\")\n", "id": "2953786", "language": "Python", "matching_score": 1.8197085857391357, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/capacity_calculator.py" }, { "content": "import sys\nimport orca\nsys.path.append(\".\")\nimport baus.models\nimport pandas as pd\nimport numpy as np\n\nfnames = [\n \"nodev\",\n \"manual_nodev\",\n \"oldest_building_age\",\n \"sdem\",\n \"apn\",\n \"parcel_id\",\n \"geom_id\",\n \"total_job_spaces\",\n \"total_sqft\",\n \"total_non_residential_sqft\",\n \"total_residential_units\",\n \"juris\",\n \"county\",\n \"pda\",\n \"zoned_du\",\n \"zoned_du_underbuild\",\n \"parcel_size\",\n \"parcel_acres\",\n \"oldest_building\",\n \"first_building_type_id\",\n \"general_type\",\n \"height\"\n]\n\ndf = orca.get_table(\"parcels\").to_frame(fnames)\ndf = df.join(orca.get_table(\"parcels_zoning_by_scenario\").to_frame())\n\nzoning = pd.read_csv('data/2015_10_06_zoning_parcels.csv', index_col=\"geom_id\")\ndf[\"zoning_id\"] = zoning.zoning_id.loc[df.geom_id].values\n\nsettings = orca.get_injectable(\"settings\")\n\n# filter buildings as urbansim does\n# f = settings[\"feasibility\"][\"parcel_filter\"]\n# df = df.query(f)\n\n# get building types\ndf[\"building_type\"] = \\\n df.first_building_type_id.map(settings[\"building_type_map2\"])\n\ndf[\"oldest_building\"][df.oldest_building > 2200] = np.nan\n\n# after filter can drop a few fields\ndf = df.drop([\"nodev\", \"oldest_building_age\", \n \"manual_nodev\", \"first_building_type_id\"], axis=1)\n\ndf.to_csv(\"parcels.csv\")\n", "id": "2471253", "language": "Python", "matching_score": 1.8073134422302246, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/export.py" }, { "content": "import pandas as pd\nimport orca\nimport sys\nimport os\nfrom urbansim.utils import misc\nsys.path.append(\".\")\nimport datasources\n\[email protected]('zcsv', cache=True)\ndef zcsv():\n df = pd.read_csv(os.path.join(misc.data_dir(),\n \"2015_12_21_zoning_parcels.csv\"),\n index_col=\"geom_id\")\n return df\n\nzb = orca.get_table(\"zoning_baseline\")\nzl = orca.get_table(\"zoning_lookup\")\nz = orca.get_table(\"zcsv\")\n\nzdf = z.to_frame()\n\nnull_df = zdf.loc[zdf.zoning_id.isnull(),:]\nprint \"there are \" + str(len(null_df.index)) + \" empty zoning ids\"\nprint \"number of parcels with null values by city:\"\nprint null_df.tablename.value_counts()\n\nprint \"number of parcels with null values by source zoning code by city:\"\nfor ix, val in null_df.tablename.value_counts().iteritems():\n\tif val>5:\n\t\tprint ix\n\t\tprint null_df[null_df.tablename==ix].zoning.value_counts()\n\nzl_df = zl.to_frame()\n\nzlcn = orca.get_table(\"zoning_table_city_lookup\")\nzlcndf = zlcn.to_frame()\nzl_df['zoning_lookup_table_id'] = zl_df.index\nzldf_tbl_nm = pd.merge(zl_df,zlcndf,how='left',left_on='city',right_on='city_name')\nzl_df = zldf_tbl_nm\n\nnull_df['geom_id'] = null_df.index\nmdf = pd.merge(null_df,zl_df,how='inner', right_on=['name','tablename'], left_on=['zoning','tablename'])\nmdf = mdf.set_index(mdf.geom_id)\n\nprint \"replaced \" + str(len(mdf.index)) + \" empty zoning ids\"\nzdf.loc[mdf.index,'zoning_id'] = mdf['zoning_lookup_table_id']\n\nnull_df = zdf.loc[zdf.zoning_id.isnull(),:]\nprint \"there are \" + str(len(null_df.index)) + \" empty zoning ids\"\n\nprint \"number of parcels with null values by city:\"\nprint null_df.tablename.value_counts()\n\nimport datetime\nx = datetime.date.today()\ncsvname = 'data/' + str(x.year) + '_' + str(x.month) + '_' + str(x.day) + '_zoning_parcels.csv'\n\nzdf.to_csv(csvname)\n\n", "id": "5660971", "language": "Python", "matching_score": 0.8307190537452698, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/fix_zoning_missing_id.py" }, { "content": "import pandas as pd, numpy as np\nimport pandas.io.sql as sql\nfrom pandas.io.excel import read_excel\nfrom spandex.io import exec_sql, df_to_db\nfrom spandex import TableLoader\n\nloader = TableLoader()\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\ndef csv_to_staging(path, table_name):\n \"\"\"Loads csv to staging schema on database\"\"\"\n df = pd.read_csv(path)\n df.index.name = 'idx'\n print 'Loading %s.' % table_name\n df_to_db(df, table_name, schema=loader.tables.staging)\n \ndef field_in_table(field_name, table_name):\n \"\"\"True if field in table else False\"\"\"\n return field_name in db_to_df(\"SELECT column_name FROM information_schema.columns WHERE table_name='%s'\" % table_name).column_name.values\n \ndef lat_long_to_point_geometry(tbl, schema, x_col = 'longitude', y_col = 'latitude', geom_field_name = 'geom', target_srid = 2768):\n \"\"\"Creates point geometry on db table based on lat long fields\"\"\"\n print 'Creating point geometry from lat/long in table %s.' % tbl \n if not field_in_table(geom_field_name, tbl):\n exec_sql(\"ALTER TABLE %s.%s ADD COLUMN %s geometry;\" % (schema, tbl, geom_field_name))\n exec_sql(\"UPDATE %s.%s SET %s = ST_GeomFromText('POINT(' || %s || ' ' || %s || ')',4326);\" % \n (schema, tbl, geom_field_name, x_col, y_col))\n exec_sql(\"CREATE INDEX %s_gidx on %s.%s using gist (%s);\" % (tbl, schema, tbl, geom_field_name))\n exec_sql(\"SELECT UpdateGeometrySRID('%s', '%s', '%s', %s);\" % (schema, tbl, geom_field_name, target_srid))\n exec_sql(\"UPDATE %s.%s SET %s = ST_TRANSFORM(ST_SetSRID(%s, 4326), %s);\" % (schema, tbl, geom_field_name, geom_field_name, target_srid))\n \ndef append_parcel_identifier(tbl, schema, tbl_geom, parcel_identifier):\n \"\"\"Append parcel identifier to target table \"\"\"\n print 'Appending parcel identifier to %s' % tbl\n if not field_in_table(parcel_identifier, tbl):\n exec_sql(\"alter table %s.%s add %s integer default 0;\" %(schema, tbl, parcel_identifier))\n exec_sql(\"update %s.%s set %s = a.%s from parcels a where st_contains(a.geom, %s.%s.%s);\" %\n (schema, tbl, parcel_identifier, parcel_identifier, schema, tbl, tbl_geom))\n \ndef imputation_variable(df, attribute, agg_function, lower_bound, upper_bound):\n # Summarize imputation data at parcel level and set bounds on valid values\n sr_grouped = df.groupby('gid')[attribute]\n if agg_function == 'median':\n var = sr_grouped.median()\n if agg_function == 'max':\n var = sr_grouped.max()\n if agg_function == 'sum':\n var = sr_grouped.sum()\n var = var[(var > lower_bound) & (var < upper_bound)] #set bounds on valid values to use for impute\n return var\n\n\n######## *LOADING* ########\n\n#### REDFIN\n# Read Redfin CSV and load to database\ncsv_to_staging(loader.get_path('built/bldg/homeprices/redfin_03feb14.csv'), 'redfin')\n# Lat/long to point geometry, with the right SRID\nlat_long_to_point_geometry('redfin', 'staging', 'longitude', 'latitude', 'geom', 2768)\n# Append the unique parcel identifier to the Redfin records\nappend_parcel_identifier('redfin', 'staging', 'geom', 'gid')\n\n#### GOV BUILDINGS\n# Read Gov Building CSV and load to database\ncsv_to_staging(loader.get_path('built/bldg/add_buildings1.csv'), 'public_bldgs')\n# Lat/long to point geometry, with the right SRID\nlat_long_to_point_geometry('public_bldgs', 'staging', 'x', 'y', 'geom', 2768)\n# Append the unique parcel identifier to the Gov Building records\nappend_parcel_identifier('public_bldgs', 'staging', 'geom', 'gid')\n\n#### COSTAR\ncostar_xls_path = loader.get_path('built/bldg/costar/2011/costar_allbayarea.xlsx')\ncostar = pd.read_excel(costar_xls_path)\ncostar_solano_path = loader.get_path('built/bldg/costar/2011/costar__clean2011_sol_020315.csv')\ncostar_sol = pd.read_csv(costar_solano_path)\ncostar2 = costar[['PropertyID', 'Building Name', 'Latitude', 'Longitude', 'Rentable Building Area', 'Year Built', 'PropertyType', 'Secondary Type', 'Total Available Space (SF)', 'Number Of Elevators', 'Last Sale Date', 'Last Sale Price', 'Average Weighted Rent', 'Number Of Stories']]\ncostar_sol2 = costar_sol[['PropertyID', 'Building Name', 'Latitude', 'Longitude', 'Rentable Building Area', 'Year Built', 'PropertyType', 'Secondary Type', 'Total Available Space (SF)', 'Number Of Elevators', 'Last Sale Date', 'Last Sale Price', 'Average Weighted Rent', 'Number Of Stories']]\ncostar2.columns = ['propertyid', 'building_name', 'latitude', 'longitude', 'rentable_area', 'year_built', 'property_type', 'secondary_type', 'available_space', 'elevators', 'last_sale_date', 'last_sale_price', 'rent', 'stories']\ncostar_sol2.columns = ['propertyid', 'building_name', 'latitude', 'longitude', 'rentable_area', 'year_built', 'property_type', 'secondary_type', 'available_space', 'elevators', 'last_sale_date', 'last_sale_price', 'rent', 'stories']\ncostar2 = pd.concat([costar2, costar_sol2])\nfor tex_col in ['building_name', 'property_type', 'secondary_type', 'last_sale_date', ]:\n costar2[tex_col] = costar2[tex_col].fillna(' ')\n costar2[tex_col] = costar2[tex_col].str.encode('utf-8')\ncostar2.last_sale_date = costar2.last_sale_date.fillna(' ')\ncostar2.last_sale_price = costar2.last_sale_price.str.replace(\",\", \"\").astype('float')\ncostar2.stories = costar2.stories.fillna(0).astype('int32')\ncostar2.index.name = 'idx'\ndf_to_db(costar2, 'costar', schema=loader.tables.staging)\n##Lat/long to point geometry, with the right SRID\nlat_long_to_point_geometry('costar', 'staging', 'longitude', 'latitude', 'geom', 2768)\n##Append the unique parcel identifier to the Costar records\nappend_parcel_identifier('costar', 'staging', 'geom', 'gid')\n\n\n######## *IMPUTE* ########\nprint 'Start point-based impute.'\n\n## Load dataframes for the imputation\nparcels = db_to_df('select gid, year_built, sqft_per_unit, non_residential_sqft, stories, imputation_flag from parcels;').set_index('gid')\ncostar = db_to_df('select * from staging.costar;')\nredfin = db_to_df('select gid, yearbuilt, sqft, lastsalepr, saleyear, hometype from staging.redfin;')\ngov_buildings = db_to_df('select * from staging.public_bldgs;')\ngov_buildings.sqft = gov_buildings.sqft.str.replace(',', '').astype('int')\n\n## Assign imputation variables to parcels df\n# Redfin\nparcels['redfin_year_built'] = imputation_variable(redfin, 'yearbuilt', 'median', 1800, 2016)\nparcels['redfin_sqft_per_unit'] = imputation_variable(redfin, 'sqft', 'median', 199, 25000)\n# Gov\nparcels['gov_sqft'] = imputation_variable(gov_buildings, 'sqft', 'max', 199, 2000000)\n# Costar\nparcels['costar_year_built'] = imputation_variable(costar, 'year_built', 'median', 1800, 2016)\nparcels['costar_non_residential_sqft'] = imputation_variable(costar, 'rentable_area', 'max', 199, 2000000) #sum?\nparcels['costar_stories'] = imputation_variable(costar, 'stories', 'max', 0, 100)\n\ndef impute_null(target_varname, source_varname, imputation_flag_note):\n idx_imputed = parcels[target_varname].isnull()*(~parcels[source_varname].isnull()) ## These are the records that will be imputed\n parcels[target_varname][parcels[target_varname].isnull()] = parcels[source_varname][parcels[target_varname].isnull()]\n parcels.imputation_flag[idx_imputed] = parcels.imputation_flag[idx_imputed] + imputation_flag_note ## Populate imputation flag for affected records\n print imputation_flag_note, '%s records affected' % idx_imputed.sum()\n \ndef impute_out_of_bounds(target_varname, source_varname, imputation_flag_note, floor, ceiling):\n out_of_bounds = np.logical_or((parcels[target_varname] < floor), (parcels[target_varname] > ceiling))\n impute_available = ~parcels[source_varname].isnull()\n idx_imputed = out_of_bounds*impute_available\n parcels[target_varname][idx_imputed] = parcels[source_varname][idx_imputed]\n parcels.imputation_flag[idx_imputed] = parcels.imputation_flag[idx_imputed] + imputation_flag_note\n print imputation_flag_note, '%s records affected' % idx_imputed.sum()\n \ndef impute_greater_than(target_varname, source_varname, imputation_flag_note):\n \"\"\"If target variable has value less than source variable, replace with source value.\"\"\"\n idx_imputed = parcels[source_varname] > parcels[target_varname]\n parcels[target_varname][idx_imputed] = parcels[source_varname][idx_imputed]\n parcels.imputation_flag[idx_imputed] = parcels.imputation_flag[idx_imputed] + imputation_flag_note\n print imputation_flag_note, '%s records affected' % idx_imputed.sum()\n \n#If null year_built, just take the Redfin year built value no matter what\nimpute_null('year_built', 'redfin_year_built', ', rf_yrblt')\n\n#If null sqft_per_unit, just take the Redfin sqft_per_unit value no matter what\nimpute_null('sqft_per_unit', 'redfin_sqft_per_unit', ', rf_sqftunit')\n\n#If year_built out of bounds, and Redfin value exists, take Redfin value\nimpute_out_of_bounds('year_built', 'redfin_year_built', ', rf_yrblt', 1800, 2015)\n\n#If sqft_per_unit out of bounds, and Redfin value exists, take Redfin value\nimpute_out_of_bounds('sqft_per_unit', 'redfin_sqft_per_unit', ', rf_sqftunit', 150, 50000)\n\n#If non-residential sqft null, just take the Gov value no matter what\nimpute_null('non_residential_sqft', 'gov_sqft', ', gov_nrsqft')\n\n#If non-residential sqft out of bounds, and Gov value exists, take Gov value\nimpute_out_of_bounds('non_residential_sqft', 'gov_sqft', ', gov_nrsqft', 150, 5000000)\n\n#If parcel nonres sqft less than indicated by Gov, use Gov nonres sqft\nimpute_greater_than('non_residential_sqft', 'gov_sqft', ', gov_nrsqft_boosted')\n\n#If year_built null, just take the Costar value no matter what\nimpute_null('year_built', 'costar_year_built', ', cs_yrblt')\n\n#If non-residential sqft null, just take the Costar value no matter what\nimpute_null('non_residential_sqft', 'costar_non_residential_sqft', ', cs_nrsqft')\n\n#If stories null, just take the Costar value no matter what\nimpute_null('stories', 'costar_stories', ', cs_stories')\n\n#If year built out of bounds, and costar value exists, take costar value\nimpute_out_of_bounds('year_built', 'costar_year_built', ', cs_yrblt', 1800, 2015)\n\n#If non-residential sqft out of bounds, and costar value exists, take costar value\nimpute_out_of_bounds('non_residential_sqft', 'costar_non_residential_sqft', ', cs_nrsqft', 150, 5000000)\n\n#If parcel nonres sqft less than indicated by Costar, use Costar nonres sqft\nimpute_greater_than('non_residential_sqft', 'costar_non_residential_sqft', ', cr_nrsqft_boosted')\n\n#If stories 0 or 1, and costar value exists, take costar value\nimpute_out_of_bounds('stories', 'costar_stories', ', cs_stories', 2, 100)\n\n\n#Append other imputation source data to parcels table, just for reference/imputation/modeling in later steps\nparcels['redfin_sale_price'] = redfin.groupby('gid').lastsalepr.median()\nparcels['redfin_sale_year'] = redfin.groupby('gid').saleyear.median()\nparcels['redfin_home_type'] = redfin.groupby('gid').hometype.max()\n\nparcels['gov_type'] = gov_buildings.groupby('gid').development_type_id.max()\n\nparcels['costar_property_type'] = costar.groupby('gid').property_type.max()\nparcels['costar_secondary_type'] = costar.groupby('gid').secondary_type.max()\nparcels['costar_building_name'] = costar.groupby('gid').building_name.max()\nparcels['costar_elevators'] = costar.groupby('gid').elevators.max()\nparcels['costar_rent'] = costar.groupby('gid').rent.max()\n\n\n######## *UPDATE* ########\nprint 'Point-based impute done. Loading results back to db.'\n\n# Updated fields back to database\nparcels_imputed = parcels[['year_built', 'sqft_per_unit', 'non_residential_sqft', 'redfin_sale_price', 'redfin_sale_year', 'redfin_home_type', 'gov_type', 'gov_sqft', 'costar_property_type', 'costar_secondary_type', 'costar_building_name', 'costar_elevators', 'costar_rent', 'imputation_flag']]\nfor col in ['redfin_home_type', 'costar_property_type', 'costar_secondary_type', 'costar_building_name', 'costar_rent']:\n parcels_imputed[col][parcels_imputed[col].isnull()] = ''\n parcels_imputed[col] = parcels_imputed[col].str.encode('utf-8')\ndf_to_db(parcels_imputed, 'parcels_imputed', schema=loader.tables.staging)\n\n#Update the master parcel table on the database\nexec_sql(\"update parcels set year_built = a.year_built from staging.parcels_imputed a where a.gid = parcels.gid;\")\nexec_sql(\"update parcels set sqft_per_unit = a.sqft_per_unit from staging.parcels_imputed a where a.gid = parcels.gid;\")\nexec_sql(\"update parcels set non_residential_sqft = a.non_residential_sqft from staging.parcels_imputed a where a.gid = parcels.gid;\")\nexec_sql(\"update parcels set imputation_flag = a.imputation_flag from staging.parcels_imputed a where a.gid = parcels.gid;\")\n\ndef add_field_and_populate_with_imputed_value(varname, vartype):\n if not field_in_table(varname, 'parcels'):\n exec_sql(\"ALTER TABLE parcels ADD COLUMN %s %s;\" % (varname, vartype))\n exec_sql(\"update parcels set %s = a.%s from staging.parcels_imputed a where a.gid = parcels.gid;\" % (varname, varname))\n \nadd_field_and_populate_with_imputed_value('redfin_sale_price', 'numeric')\nadd_field_and_populate_with_imputed_value('redfin_sale_year', 'numeric')\nadd_field_and_populate_with_imputed_value('redfin_home_type', 'text')\nadd_field_and_populate_with_imputed_value('gov_type', 'numeric')\nadd_field_and_populate_with_imputed_value('gov_sqft', 'numeric')\nadd_field_and_populate_with_imputed_value('costar_elevators', 'numeric')\nadd_field_and_populate_with_imputed_value('costar_property_type', 'text')\nadd_field_and_populate_with_imputed_value('costar_secondary_type', 'text')\nadd_field_and_populate_with_imputed_value('costar_building_name', 'text')\nadd_field_and_populate_with_imputed_value('costar_rent', 'text')", "id": "3216046", "language": "Python", "matching_score": 4.427921295166016, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/point_based_imputation.py" }, { "content": "#Imports\nimport pandas as pd, numpy as np\nimport pandas.io.sql as sql\nfrom spandex import TableLoader\nfrom spandex.io import exec_sql, df_to_db\nfrom spandex.targets import scaling as scl\nimport hashlib\n\n#Connect to the database\nloader = TableLoader()\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\n##Load parcels as dataframes for the imputation\nparcels = db_to_df('select * from parcels;')\nparcels = parcels.set_index('gid')\n\n#Standardize the res_type field\nparcels.res_type[parcels.res_type.isnull()] = 'other'\nparcels.res_type[parcels.res_type==''] = 'other'\nparcels.res_type[np.in1d(parcels.res_type, ['FLATS', 'APTS', 'CONDO', 'SRO', 'LIVEWORK', 'mixed'])] = 'multi'\nparcels.res_type[parcels.res_type=='SINGLE'] = 'single'\n\n# Load TAZ residential unit control totals and other zonal targets.\ntaz_controls_csv = loader.get_path('hh/taz2010_imputation.csv')\ntargetunits = pd.read_csv(taz_controls_csv, index_col='taz1454')\n\ntaz_controls_csv2 = loader.get_path('hh/tazsumm_redfin.csv')\ntargetvalues = pd.read_csv(taz_controls_csv2, index_col='taz')\n\nnonres_sqft_zone = pd.DataFrame({'observed':parcels.groupby('taz').non_residential_sqft.sum(), 'target':targetunits.targetnonressqft})\n\n# For all employment points, translate to nonres-sqft by multiplying by 250.\n# Filter out synthetic job-based buildings so that we keep only those that have no residential and have less than 500 existing sqft. \n# For each TAZ, calculate the difference needed to match aggregate target.\n# If need to increment nrsqft upwards, sort synthetic buildings by sqft and take the top x that covers the needed difference\n# If no valid job points and non existing nonres-sqft, introduce a synthetic building in the TAZ- equal to the target, and put it on the biggest parcel.\n# Do same in the case of no parcels (and add synthetic parcel)\n# Scale to match\n\n#No need to tag in imputation_flag column based on scaling- otherwise everything would be tagged.\nnonres_sqft_zone['difference'] = nonres_sqft_zone.target - nonres_sqft_zone.observed\n\n##Append the unique parcel identifier to the establisment point records.\nif 'parcel_id' not in db_to_df(\"SELECT column_name FROM information_schema.columns WHERE table_name='establishment_points'\").column_name.values:\n exec_sql(\"alter table staging.establishment_points add parcel_id integer default 0;\")\n exec_sql(\"update staging.establishment_points set parcel_id = a.gid from parcels a where st_within(staging.establishment_points.geom, a.geom);\")\n\n#Load the establishment points to be used for non-residential sqft imputation\nestab_points = db_to_df('select emp_here, naics2, parcel_id from staging.establishment_points;')\nestabs_joined = pd.merge(estab_points, parcels.reset_index(), left_on = 'parcel_id', right_on = 'gid')\n\n# Filter out some synthetic job-based buildings so that we keep only those that have no existing residential and have less than 500 existing non-residential sqft. \nestabs_joined = estabs_joined[np.logical_or((estabs_joined.residential_units == 0), (estabs_joined.residential_units.isnull())) & np.logical_or((estabs_joined.non_residential_sqft < 500), (estabs_joined.non_residential_sqft.isnull()))]\n\n# For all employment points, translate to nonres-sqft by multiplying by 250.\nestabs_joined['nrsqft_assumed'] = 250 * estabs_joined.emp_here\n\n#Aggregate employment-point-based-buildings to the parcel level\nestabs_joined_nrsqft = estabs_joined.groupby('gid').nrsqft_assumed.sum()\nestabs_joined_taz = estabs_joined.groupby('gid').taz.max()\nestabs_for_impute = pd.DataFrame({'non_residential_sqft':estabs_joined_nrsqft, 'taz':estabs_joined_taz})\n\n#Subset of parcels, just for convenience\nparcels_abridged = parcels[['taz', 'res_type', 'residential_units', 'calc_area']]\n\n#Function to add synthetic parcel in case that TAZ contains no existing parcels\ndef add_parcel(zone_id):\n new_pid = parcels.index.max() + 1\n parcels.loc[new_pid] = None\n parcels.taz.loc[new_pid] = zone_id ##Should also append a specific county_id, but need a taz-county xref\n parcels.imputation_flag.loc[new_pid] = 'synthetic'\n for col in parcels.dtypes.iteritems():\n col_name = col[0]\n col_type = col[1]\n if (col_name != 'taz') & (col_name != 'imputation_flag'):\n if col_name == 'county_id':\n parcels[col_name].loc[new_pid] = 0\n elif col_type == 'object':\n parcels[col_name].loc[new_pid] = ' '\n else:\n parcels[col_name].loc[new_pid] = 0.0\n return new_pid\n\n#Select parcel in each taz for new building, if needed later\n#Get parcel_id of parcel with max area\nparcel_with_max_area_by_taz = parcels_abridged.reset_index().groupby('taz').apply(lambda t: t[t.calc_area==t.calc_area.max()]).gid.reset_index()[['taz', 'gid']].set_index('taz')\n\n#Example usage. This gives the GID of parcel with max area in TAZ 4\n#print parcel_with_max_area_by_taz.loc[4]\ndef select_parcel_for_new_building(zone_id):\n return int(parcel_with_max_area_by_taz.loc[zone_id].values.max())\n\nparcel_du_by_taz = parcels.groupby(['taz', 'res_type']).residential_units.sum()\nobserved_parcel_du_taz = parcel_du_by_taz.index.get_level_values(0)\n\nsf_parcels = parcels_abridged[(parcels_abridged.res_type == 'single') & (parcels_abridged.residential_units > 0)]\nmf_parcels = parcels_abridged[(parcels_abridged.res_type == 'multi') & (parcels_abridged.residential_units > 0)]\nsf_parcels_loc = sf_parcels.reset_index().set_index(['taz', 'gid']).loc\nmf_parcels_loc = mf_parcels.reset_index().set_index(['taz', 'gid']).loc\n\nres_parcel_updates = pd.Series()\nnew_res_buildings = []\n\nnonres_parcel_updates = pd.Series()\nnew_nonres_buildings = []\n\ndef place_units(alternative_ids, number_of_units):\n probabilities = np.ones(len(alternative_ids))\n choices = np.random.choice(alternative_ids, size = number_of_units, replace = True)\n return pd.Series(choices).value_counts() #this is units_to_add by gid\n\ndef update_res_parcels(units_to_add_or_remove):\n #parcels.residential_units.loc[units_to_add_or_remove.index] = parcels.residential_units.loc[units_to_add_or_remove.index] + units_to_add_or_remove.values\n global res_parcel_updates\n res_parcel_updates = res_parcel_updates.append(units_to_add_or_remove)\n\ndef select_unit_deletions_by_gid(deletion_candidates_exploded, difference):\n deletions = np.random.choice(deletion_candidates_exploded, size = difference, replace = False)\n return -1*pd.Series(deletions).value_counts()\n\ndef explode_unit_candidates(loc, zone_id):\n candidates = loc[zone_id].residential_units\n return np.repeat(candidates.index.values, candidates.values.astype('int'))\n\n\ndef add_sf_units(zone_id, sf_difference, observed_sf_units):\n if observed_sf_units > 0:\n #allocate according to existing distribution\n alternative_ids = sf_parcels_loc[zone_id].index.values\n units_to_add = place_units(alternative_ids, sf_difference)\n update_res_parcels(units_to_add)\n else:\n pid = select_parcel_for_new_building(zone_id)\n new_res_buildings.append([pid, 'single', sf_difference])\n \ndef remove_sf_units(zone_id, sf_difference):\n deletion_candidates_exploded = explode_unit_candidates(sf_parcels_loc, zone_id)\n deletions_by_gid = select_unit_deletions_by_gid(deletion_candidates_exploded, sf_difference)\n update_res_parcels(deletions_by_gid)\n\ndef add_mf_units(zone_id, mf_difference, observed_mf_units):\n if observed_mf_units > 0:\n #allocate according to existing distribution\n alternative_ids = mf_parcels_loc[zone_id].index.values\n units_to_add = place_units(alternative_ids, mf_difference)\n update_res_parcels(units_to_add)\n else:\n pid = select_parcel_for_new_building(zone_id)\n new_res_buildings.append([pid, 'multi', mf_difference])\n\ndef remove_mf_units(zone_id, mf_difference):\n deletion_candidates_exploded = explode_unit_candidates(mf_parcels_loc, zone_id)\n deletions_by_gid = select_unit_deletions_by_gid(deletion_candidates_exploded, mf_difference)\n update_res_parcels(deletions_by_gid)\n \n \ndef add_or_remove_sf_units(zone_id, sf_difference, observed_sf_units):\n if sf_difference > 0:\n print ' *Adding %s single-family DU.' % sf_difference\n add_sf_units(zone_id, sf_difference, observed_sf_units)\n if sf_difference < 0:\n sf_difference = abs(sf_difference)\n print ' *Removing %s single-family DU.' % sf_difference\n remove_sf_units(zone_id, sf_difference)\n \ndef add_or_remove_mf_units(zone_id, mf_difference, observed_mf_units):\n if mf_difference > 0:\n print ' *Adding %s multi-family DU.' % mf_difference\n add_mf_units(zone_id, mf_difference, observed_mf_units)\n if mf_difference < 0:\n mf_difference = abs(mf_difference)\n print ' *Removing %s multi-family DU.' % mf_difference\n remove_mf_units(zone_id, mf_difference)\n \ndef flip_from_sf_to_mf_type(zone_id, number_of_units, observed_mf_units):\n print ' Trying to flilp %s single-family units to multi-family' % number_of_units\n \n sf_flip_alternatives = sf_parcels_loc[zone_id].residential_units.order(ascending = True)\n cumsum_idx = sf_flip_alternatives.cumsum().searchsorted(number_of_units)\n \n sampled_res_buildings = 0\n sampled_res_buildings1 = sf_flip_alternatives[:(cumsum_idx)]\n sampled_res_buildings2 = sf_flip_alternatives[:(cumsum_idx + 1)]\n \n if (sampled_res_buildings2.sum() > 0) & (sampled_res_buildings2.sum() <= number_of_units):\n sampled_res_buildings = sampled_res_buildings2\n elif (sampled_res_buildings1.sum() > 0) & (sampled_res_buildings1.sum() <= number_of_units):\n sampled_res_buildings = sampled_res_buildings1\n \n if type(sampled_res_buildings) is int:\n add_or_remove_mf_units(zone_id, number_of_units, observed_mf_units)\n return number_of_units\n else:\n #Remove from sf_parcels_loc\n buildings_to_drop = zip([zone_id,]*len(sampled_res_buildings), sampled_res_buildings.index)\n sf_parcels_loc.obj = sf_parcels_loc.obj.drop(buildings_to_drop)\n\n #Edit parcels so these are mf buildings\n parcels.res_type.loc[sampled_res_buildings.index.values] = 'multi'\n parcels.development_type_id.loc[sampled_res_buildings.index.values] = 'MF'\n parcels.imputation_flag.loc[sampled_res_buildings.index] = parcels.imputation_flag.loc[sampled_res_buildings.index] + ', restype_flip' ## Populate imputation flag for affected records\n \n print ' Flipped %s single-family units to multi-family' % sampled_res_buildings.sum()\n if (number_of_units - sampled_res_buildings.sum()) > 0:\n add_or_remove_mf_units(zone_id, number_of_units - sampled_res_buildings.sum(), observed_mf_units)\n return number_of_units - sampled_res_buildings.sum()\n\ndef flip_from_mf_to_sf_type(zone_id, number_of_units, observed_sf_units):\n print ' Trying to flip %s multi-family units to single-family' % number_of_units\n \n mf_flip_alternatives = mf_parcels_loc[zone_id].residential_units.order(ascending = True)\n cumsum_idx = mf_flip_alternatives.cumsum().searchsorted(number_of_units)\n \n sampled_res_buildings = 0\n sampled_res_buildings1 = mf_flip_alternatives[:(cumsum_idx)]\n sampled_res_buildings2 = mf_flip_alternatives[:(cumsum_idx + 1)]\n \n if (sampled_res_buildings2.sum() > 0) & (sampled_res_buildings2.sum() <= number_of_units):\n sampled_res_buildings = sampled_res_buildings2\n elif (sampled_res_buildings1.sum() > 0) & (sampled_res_buildings1.sum() <= number_of_units):\n sampled_res_buildings = sampled_res_buildings1\n \n if type(sampled_res_buildings) is int:\n add_or_remove_sf_units(zone_id, number_of_units, observed_sf_units)\n return number_of_units\n else:\n #Remove from mf_parcels_loc\n buildings_to_drop = zip([zone_id,]*len(sampled_res_buildings), sampled_res_buildings.index)\n mf_parcels_loc.obj = mf_parcels_loc.obj.drop(buildings_to_drop)\n\n #Edit parcels so these are sf buildings\n parcels.res_type.loc[sampled_res_buildings.index.values] = 'single'\n parcels.development_type_id.loc[sampled_res_buildings.index.values] = 'SF'\n parcels.imputation_flag.loc[sampled_res_buildings.index] = parcels.imputation_flag.loc[sampled_res_buildings.index] + ', restype_flip' ## Populate imputation flag for affected records\n \n print ' Flipped %s multi-family units to single-family' % sampled_res_buildings.sum()\n if (number_of_units - sampled_res_buildings.sum()) > 0:\n add_or_remove_sf_units(zone_id, number_of_units - sampled_res_buildings.sum(), observed_sf_units)\n return number_of_units - sampled_res_buildings.sum()\n\n\n#NRSQFT imputation based on estab sites to match aggregate totals\nfor rec in nonres_sqft_zone.iterrows():\n zone_id = rec[0]\n print 'Non-residential sqft imputation for taz %s' % zone_id\n difference = rec[1]['difference']\n if np.isnan(difference):\n ##No parcels exist in this zone...\n print ' No parcels in this zone. Adding synthetic parcel and needed non-residential sqft.'\n difference = rec[1]['target']\n if difference > 0:\n new_pid = add_parcel(zone_id)\n new_nonres_buildings.append([new_pid, 'nonres_generic', difference])\n else:\n if difference > 0:\n observed = rec[1]['observed']\n #If starting point is zero non-res sqft, then take estab points to match target regardless of distance from target\n #If some nonres-sqft already exists. Only impute from estab points if target difference exceeds 5000 sqft\n if (observed == 0) or (difference > 5000):\n print ' Target non_residential_sqft to sample from estab-points: %s' % difference\n estabs_taz = estabs_for_impute[estabs_for_impute.taz == zone_id]\n if len(estabs_taz) > 0:\n print ' Estab sites exist to sample from'\n nonres_alternatives = estabs_taz.non_residential_sqft.order(ascending=True)\n cumsum_idx = nonres_alternatives.cumsum().searchsorted(difference)\n sampled_estabs = nonres_alternatives[:(cumsum_idx + 1)]\n print ' Sampled non_residential_sqft to add: %s' % sampled_estabs.sum()\n nonres_parcel_updates = nonres_parcel_updates.append(sampled_estabs) \n else:\n #No estab sites exist to sample from\n if observed == 0:\n #Add synthetic sqft corresponding to the target difference\n print ' No estab sites exist and no existing nonres sqft: Add synthetic sqft to match target difference'\n pid = select_parcel_for_new_building(zone_id)\n new_nonres_buildings.append([pid, 'nonres_generic', difference])\n\n####Summarize NRSQFT change\nprint parcels.groupby('county_id').non_residential_sqft.sum()\nparcels.non_residential_sqft.loc[nonres_parcel_updates.index] = parcels.non_residential_sqft.loc[nonres_parcel_updates.index].fillna(0) + nonres_parcel_updates.values\nparcels.imputation_flag.loc[nonres_parcel_updates.index] = parcels.imputation_flag.loc[nonres_parcel_updates.index] + ', estab_sqft' ## Populate imputation flag for affected records\nprint parcels.groupby('county_id').non_residential_sqft.sum()\n\n\n#RESUNIT imputation to match aggregate totals\nzonal_residential_unit_controls = targetunits[['targetSF', 'targetMF']]\nfor taz_du in zonal_residential_unit_controls.iterrows():\n \n zone_id = int(taz_du[0])\n print 'Matching aggregate residential unit targets for zone %s.' % zone_id\n \n target_sf_units = int(taz_du[1]['targetSF'])\n target_mf_units = int(taz_du[1]['targetMF'])\n print ' Target of %s single-family DU.' % (target_sf_units)\n print ' Target of %s multi-family DU.' % (target_mf_units)\n \n #TAZ currently has residential units on parcels\n if zone_id in observed_parcel_du_taz:\n parcel_observed_du = parcel_du_by_taz.loc[zone_id]\n \n #Look up observed SF DU\n if 'single' in parcel_observed_du.keys():\n observed_sf_units = int(parcel_observed_du['single'])\n else:\n observed_sf_units = 0\n \n #Look up observed MF DU\n if 'multi' in parcel_observed_du.keys():\n observed_mf_units = int(parcel_observed_du['multi'])\n else:\n observed_mf_units = 0\n \n print ' Observed %s single-family DU.' % (observed_sf_units)\n print ' Observed %s multi-family DU.' % (observed_mf_units)\n \n #Calculate difference between target and observed\n sf_difference = target_sf_units - observed_sf_units\n mf_difference = target_mf_units - observed_mf_units\n \n print ' Target difference: %s single-family DU.' % (sf_difference)\n print ' Target difference: %s multi-family DU.' % (mf_difference)\n \n ####Impute as needed to cover difference\n \n # Imputation is needed somewhere\n if (sf_difference != 0) | (mf_difference != 0):\n \n # One of the residential categories requires no imputation\n if (sf_difference == 0) | (mf_difference == 0):\n \n if sf_difference != 0:\n add_or_remove_sf_units(zone_id, sf_difference, observed_sf_units)\n\n if mf_difference != 0:\n add_or_remove_mf_units(zone_id, mf_difference, observed_mf_units)\n \n # Both residential categories require imputation\n else:\n net_difference = sf_difference + mf_difference\n \n # Both residential categories require additions, or both require subtractions\n if ((sf_difference > 0) & (mf_difference > 0)) | ((sf_difference < 0) & (mf_difference < 0)):\n add_or_remove_sf_units(zone_id, sf_difference, observed_sf_units)\n add_or_remove_mf_units(zone_id, mf_difference, observed_mf_units)\n \n # Single-family requires subtractions, and multi-family requires additions\n elif (sf_difference < 0) & (mf_difference > 0):\n # If |decrease| in single-family units exceeds |increase| in multi-family units\n if abs(sf_difference) > abs(mf_difference):\n # Take subset of the single-family decrease corresponding to the mult-family increase, and flip the type\n remainder = flip_from_sf_to_mf_type(zone_id, mf_difference, observed_mf_units)\n # Take rest of the single-family decrease, and remove these units, but only from among non-type-flipped parcels\n add_or_remove_sf_units(zone_id, net_difference - remainder, observed_sf_units - (mf_difference - remainder))\n # If |decrease| in single-family units is less than or equal to |increase| in multi-family units\n elif abs(sf_difference) <= abs(mf_difference):\n # Take all of the single-family decrease, and flip their type\n remainder = flip_from_sf_to_mf_type(zone_id, abs(sf_difference), observed_mf_units)\n # Take rest of the multifamily-increase, if any remains, and add units\n if net_difference > 0:\n add_or_remove_mf_units(zone_id, net_difference, observed_mf_units)\n # If not all singlefamily could be type-flipped, then remove units\n if remainder > 0:\n allocated = abs(sf_difference) - remainder\n add_or_remove_sf_units(zone_id, -remainder, observed_sf_units - allocated)\n \n # Multi-family requires subtractions, and single-family requires additions\n elif (mf_difference < 0) & (sf_difference > 0):\n # If |decrease| in multi-family units exceeds |increase| in single-family units\n if abs(mf_difference) > abs(sf_difference):\n # Take subset of the multi-family decrease corresponding to the single-family increase, and flip the type\n remainder = flip_from_mf_to_sf_type(zone_id, sf_difference, observed_sf_units)\n # Take rest of the multi-family decrease, and remove these units, but only from among non-type-flipped parcels\n add_or_remove_mf_units(zone_id, net_difference - remainder, observed_mf_units - (sf_difference - remainder))\n # If |decrease| in multi-family units is less than or equal to |increase| in single-family units\n elif abs(mf_difference) <= abs(sf_difference):\n # Take all of the multi-family decrease, and flip their type\n remainder = flip_from_mf_to_sf_type(zone_id, abs(mf_difference), observed_sf_units)\n # Take rest of the single-increase, if any remains, and add units\n if net_difference > 0:\n add_or_remove_sf_units(zone_id, net_difference, observed_sf_units)\n # If not all multifamily could be type-flipped, then remove units\n if remainder > 0:\n allocated = abs(mf_difference) - remainder\n add_or_remove_mf_units(zone_id, -remainder, observed_mf_units - allocated)\n \n else:\n #TAZ currently has ZERO parcels, add an artificial parcel\n print ' No parcels in this zone. Adding synthetic parcel and needed residential units by type.'\n new_pid = add_parcel(zone_id)\n if target_sf_units > 0:\n new_res_buildings.append([new_pid, 'single', target_sf_units])\n if target_mf_units > 0:\n new_res_buildings.append([new_pid, 'multi', target_mf_units])\n \nprint parcels.groupby('county_id').residential_units.sum()\nparcels.residential_units.loc[res_parcel_updates.index] = parcels.residential_units.loc[res_parcel_updates.index].fillna(0) + res_parcel_updates.values\nparcels.imputation_flag.loc[res_parcel_updates.index] = parcels.imputation_flag.loc[res_parcel_updates.index] + ', du_zonetarget' ## Populate imputation flag for affected records\nprint parcels.groupby('county_id').residential_units.sum()\n\n\nnew_res_buildings_df = pd.DataFrame(new_res_buildings, columns = ['gid', 'res_type', 'residential_units'])\nnew_nonres_buildings_df = pd.DataFrame(new_nonres_buildings, columns = ['gid', 'type', 'non_residential_sqft'])\n\nnew_res_buildings_df = pd.merge(new_res_buildings_df, parcels[['county_id', 'taz']], left_on = 'gid', right_index=True)\nnew_nonres_buildings_df = pd.merge(new_nonres_buildings_df, parcels[['county_id', 'taz']], left_on = 'gid', right_index=True)\n\n\nprint new_res_buildings_df.groupby('county_id').residential_units.sum()\nprint new_nonres_buildings_df.groupby('county_id').non_residential_sqft.sum()\n\n\n##Deal with parcels where development type id is unknown, imputing using Costar/Redfin\nproblematic = parcels[parcels.development_type_id.isnull() & (parcels.res_type=='other')][['county_id','improvement_value','year_built','stories','sqft_per_unit','residential_units','non_residential_sqft','building_sqft','res_type','land_use_type_id','development_type_id','redfin_home_type', 'costar_property_type','costar_secondary_type']]\n##Where no dev type, but we can get dev type from costar, then use costar for the dev type!\n\n##Tag these as nonresidential dev_type based on costar type designation\nproblematic_nonres = problematic[(~(problematic.costar_property_type == '')) & ((problematic.year_built>0)|(problematic.improvement_value>0)|(problematic.stories>0)|(problematic.building_sqft>0)|(problematic.non_residential_sqft>0))]\n\n##Tag these as residential res_type/dev_type based on redfin type designation\nproblematic_res = problematic[(~(problematic.redfin_home_type == '')) & (problematic.costar_property_type == '') & ((problematic.year_built>0)|(problematic.improvement_value>0)|(problematic.stories>0)|(problematic.sqft_per_unit>0)|(problematic.building_sqft>0))] ##2810\n\n##After the above, export the county_id/land_use_type_id's that remain as a diagnostic output for Mike to investigate, then assume the rest have dev_type \"unknown\"\n##Double check that all records with res_type 'single' or 'multi' have the appropriate development type\n\n#Map between costar types and development_type_id categories\ncostar_devtype_map = {'Retail':'RT',\n 'Office':'OF',\n 'Industrial':'IW',\n 'Flex':'IW',\n 'Specialty':'OF',\n 'Retail (Strip Center)':'RT',\n 'Retail (Neighborhood Center)':'RT',\n 'Hospitality':'HO',\n 'Health Care':'HP',\n 'Retail (Community Center)':'RT',\n 'Sports & Entertainment':'RT',\n 'Retail (Power Center)':'RT',\n 'Retail (Regional Mall)':'RT',\n 'Retail (Lifestyle Center)':'RT',\n 'Retail (Super Regional Mall)':'RT',\n 'Office (Strip Center)':'OF',\n 'Retail (Theme/Festival Center)':'RT',\n 'Office (Neighborhood Center)':'OF',\n 'Office (Lifestyle Center)':'OF',\n 'Retail (Outlet Center)':'RT',\n 'Specialty (Neighborhood Center)':'RT',\n 'Industrial (Lifestyle Center)':'IW',\n 'Office (Regional Mall)':'OF',\n 'Flex (Strip Center)':'OF',\n 'General Retail':'RT',\n 'General Retail (Strip Center)':'RT',\n 'Hospitality (Neighborhood Center)':'HO',\n 'Office (Super Regional Mall)':'OF'}\n\nfor costar_type in costar_devtype_map.keys():\n idx = problematic_nonres[problematic_nonres.costar_property_type == costar_type].index.values\n parcels.development_type_id.loc[idx] = costar_devtype_map[costar_type]\n parcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', costar_type'\n \n#Map between redfin types and res_type categories\nredfin_devtype_map1 = {'Single Family Residential':'single',\n 'Condo/Coop':'multi',\n 'Townhouse':'multi',\n 'Vacant Land':'other',\n 'Multi-Family (2-4 Unit)':'multi',\n 'Unknown':'other',\n 'Other':'other',\n 'Mobile/Manufactured Home':'other',\n 'Multi-Family (5+ Unit)':'multi',\n 'Ranch':'single'}\n\n#Map between redfin types and development_type_id categories\nredfin_devtype_map2 = {'Single Family Residential':'SF',\n 'Condo/Coop':'MF',\n 'Townhouse':'MF',\n 'Vacant Land':'other',\n 'Multi-Family (2-4 Unit)':'MF',\n 'Unknown':'other',\n 'Other':'other',\n 'Mobile/Manufactured Home':'SF',\n 'Multi-Family (5+ Unit)':'MF',\n 'Ranch':'SF'}\n\nfor redfin_type in redfin_devtype_map1.keys():\n idx = problematic_res[problematic_res.redfin_home_type == redfin_type].index.values\n parcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', redfin_type'\n parcels.res_type.loc[idx] = redfin_devtype_map1[redfin_type]\n\nfor redfin_type in redfin_devtype_map2.keys():\n idx = problematic_res[problematic_res.redfin_home_type == redfin_type].index.values\n parcels.development_type_id.loc[idx] = redfin_devtype_map2[redfin_type]\n \n \nparcels.development_type_id[parcels.development_type_id.isnull()*(parcels.res_type=='single')] = 'SF'\nparcels.development_type_id[parcels.development_type_id.isnull()*(parcels.res_type=='multi')] = 'MF'\nparcels.development_type_id[parcels.development_type_id.isnull()] = 'other' ##These are the parcels to print out lu_type diagnostics for...\n##Note these 'other' parcels will most typically be vacant\n\n#Standardize the development_type_id coding\ndevcode_map = {\"HM\":\"MF\",\n\"HS\":\"SF\",\n\"HT\":\"MF\",\n\"ME\":\"MR\",\n\"RB\":\"RT\",\n\"RC\":\"BR\",\n\"REC\":\"BR\",\n\"RS\":\"RT\",\n\" \":\"other\",\n\"VT\":\"LD\",\n\"VAC\":\"LD\",\n\"VA\":\"LD\",\n}\n\nfor devcode in devcode_map.keys():\n parcels.development_type_id[parcels.development_type_id == devcode] = devcode_map[devcode]\n \nparcels['proportion_undevelopable'] = 0.0 ##Populate this with spatial function\nparcels.land_use_type_id[parcels.land_use_type_id.isnull()] = ' '\nparcels.development_type_id.value_counts()\nparcels.county_id[parcels.county_id==' '] = 0\nparcels.county_id = parcels.county_id.astype('int')\n\n#SF-specific devtype correction due to residential sometimes being mistakenly coded as RT\nparcels.development_type_id[(parcels.county_id == 75) & (parcels.res_type == 'single') & (parcels.development_type_id == 'RT')] = 'SF'\nparcels.development_type_id[(parcels.county_id == 75) & (parcels.res_type == 'multi') & (parcels.development_type_id == 'RT')] = 'MF'\n\n# Assign development type id based on gov_type status\nparcels.gov_type = parcels.gov_type.fillna(0).astype('int')\nparcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 12)] = 'HP'\nparcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 17)] = 'SC'\nparcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 18)] = 'SH'\nparcels.development_type_id[(parcels.residential_units == 0) & (~parcels.res_type.isin(['single', 'multi'])) & (parcels.gov_type == 19)] = 'GV'\n\n# Set SCL common areas as undevelopable\nscl_parcels = parcels[parcels.county_id == 85]\nscl_parcels = scl_parcels[scl_parcels.apn.str.startswith('-')]\nparcels.proportion_undevelopable.loc[scl_parcels.index.values] = 1.0\n\n##############\n###BUILDINGS##\n##############\n\nidx = (parcels.improvement_value > 0) | (parcels.year_built > 0) | (parcels.building_sqft > 0) | (parcels.non_residential_sqft > 0) | (parcels.residential_units > 0) | (parcels.stories > 0) | (parcels.sqft_per_unit > 0) | ((parcels.costar_property_type.str.len()> 1) & (~parcels.costar_property_type.isin(['Land','Land (Community Center)']))) | ((parcels.redfin_home_type.str.len()> 1) & (~parcels.redfin_home_type.isin(['Vacant Land','Other','Unknown'])))\nbuildings = parcels[idx]\nprint len(buildings)\n\nbuildings = buildings[['county_id', 'land_use_type_id', 'res_type', 'improvement_value', 'year_assessed', 'year_built', 'building_sqft', 'non_residential_sqft', 'residential_units', 'sqft_per_unit', 'stories', 'development_type_id', 'taz', 'redfin_sale_price', 'redfin_sale_year', 'redfin_home_type', 'costar_elevators', 'costar_property_type', 'costar_secondary_type', 'costar_building_name', 'costar_rent']].copy(deep=True)\n\nbuildings['building_id'] = np.arange(len(buildings)) + 1\nbuildings.index.name = 'parcel_id'\nbuildings = buildings.reset_index()\n\n## Incorporate the synthetic buildings added as necessary in some situations to match aggregate unit targets\nnew_res_buildings_df = new_res_buildings_df.rename(columns = {'gid':'parcel_id'})\nnew_res_buildings_df['land_use_type_id'] = 'from_imputation'\nnew_res_buildings_df['development_type_id'] = ''\nnew_res_buildings_df.development_type_id[new_res_buildings_df.res_type == 'single'] = 'SF'\nnew_res_buildings_df.development_type_id[new_res_buildings_df.res_type == 'multi'] = 'MF'\nnew_res_buildings_df['building_id'] = np.arange(buildings.building_id.max() + 1, buildings.building_id.max() + len(new_res_buildings_df) + 1)\n\nnew_nonres_buildings_df = new_nonres_buildings_df.rename(columns = {'gid':'parcel_id'})\nnew_nonres_buildings_df['land_use_type_id'] = 'from_imputation'\nnew_nonres_buildings_df['development_type_id'] = 'OF'\nnew_nonres_buildings_df['building_id'] = np.arange(new_res_buildings_df.building_id.max() + 1, new_res_buildings_df.building_id.max() + len(new_nonres_buildings_df) + 1)\n\n# Tag the associated parcels with \"add_synth_bldg\" in imputation flag\nidx = np.unique(new_res_buildings_df.parcel_id)\nparcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', add_synth_res_bldg'\n\nidx = np.unique(new_nonres_buildings_df.parcel_id)\nparcels.imputation_flag.loc[idx] = parcels.imputation_flag.loc[idx] + ', add_synth_nonres_bldg'\n\n# Merge the synthetic buildings with the rest\nbuildings = pd.concat([buildings, new_res_buildings_df])\nbuildings = pd.concat([buildings, new_nonres_buildings_df])\n\n## Building column cleaning/imputation\nbuildings.residential_units[buildings.residential_units.isnull()] = 0\nbuildings.non_residential_sqft[buildings.non_residential_sqft.isnull()] = 0\n\n# Upper and lower bound configuration (move settings to separate config file?)\nyear_built_lower_bound = 1790\nyear_built_upper_bound = 2015\n\nsqft_per_unit_lower_bound = 200\nsqft_per_unit_upper_bound = 30000\n\ntargetvalues['year_built_av_nonres'] = buildings[(~buildings.res_type.isin(['single', 'multi'])) & (buildings.year_built > year_built_lower_bound) & (buildings.year_built < year_built_upper_bound)].groupby('taz').year_built.mean()\nbuildings = pd.merge(buildings, targetvalues, left_on = 'taz', right_index = True, how = 'left')\n\nbuildings = buildings.set_index('building_id')\n\n## YEAR BUILT\n\n# Residential building with out-of-bounds or null year_built- replace with observed zonal average of good data points\nidx = (buildings.res_type.isin(['single', 'multi'])) & ((buildings.year_built < year_built_lower_bound) | (buildings.year_built > year_built_upper_bound) | (buildings.year_built.isnull()))\nbuildings.year_built[idx] = buildings.yearbuilt_av\n# Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', res_zone_yrblt'\n\n# If any residential buildings with year_built value still out of bounds (e.g. if zonal average data from previous step had bad value), use average across all residential buildings\nidx = (buildings.res_type.isin(['single', 'multi'])) & buildings.year_built.isnull()\nbuildings.year_built[idx] = buildings.year_built[(buildings.res_type.isin(['single', 'multi']))].mean()\n# Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', res_region_yrblt'\n\n# Non-residential building with out-of-bounds or null year_built- replace with zonal average of good data points\nidx = (~buildings.res_type.isin(['single', 'multi'])) & ((buildings.year_built < year_built_lower_bound) | (buildings.year_built > year_built_upper_bound) | (buildings.year_built.isnull()))\nbuildings.year_built[idx] = buildings.year_built_av_nonres\n# Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', nr_zone_yrblt'\n\n# If any non-residential buildings with year_built value still out of bounds (e.g. if zonal average data from previous step had bad value), use average across all non-residential buildings\nidx = (~buildings.res_type.isin(['single', 'multi'])) & buildings.year_built.isnull()\nbuildings.year_built[idx] = buildings.year_built[(~buildings.res_type.isin(['single', 'multi']))].mean()\n# Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', nr_region_yrblt'\n\n## NON-RESIDENTIAL SQFT\n\n# If nonresidential structure (and this is known by btype and resunits), and nonres sqft is 0 but building_sqft is positive, nonres_sqft should equal building_sqft\nidx = (~buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units == 0) & (buildings.non_residential_sqft == 0) & (buildings.building_sqft > 0)\nbuildings.non_residential_sqft[idx] = buildings.building_sqft\n#Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', nrsqft_from_bsqft'\n\n# If nonresidential structure with zero residential units, building_sqft should equal non-residential sqft\nidx = (~buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units == 0)\nbuildings.building_sqft[idx] = buildings.non_residential_sqft\n#Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', bsqft_from_nrsqft'\n\n# SQFT PER UNIT\n# Residential sqft_per_unit should be building_sqft / residential_units\nidx = (buildings.res_type.isin(['single', 'multi'])) & (buildings.building_sqft > 0) & (buildings.residential_units > 0) & np.logical_or((buildings.sqft_per_unit == 0), (buildings.sqft_per_unit.isnull())) & (buildings.non_residential_sqft == 0)\nbuildings.sqft_per_unit[idx] = buildings.building_sqft[idx] / buildings.residential_units[idx]\n\n# Replacing sqft_per_unit nulls/zeros with county avg of good data points\nfor cid in np.unique(buildings.county_id):\n if cid != 0:\n idx = (buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units > 0) & (buildings.county_id == cid) & (buildings.sqft_per_unit > 0)\n mean_sqft_per_unit = buildings[idx].sqft_per_unit.mean()\n if mean_sqft_per_unit < 1000: mean_sqft_per_unit = 1000\n idx2 = (buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units > 0) & (buildings.county_id == cid) & (np.logical_or((buildings.sqft_per_unit.isnull()) , (buildings.sqft_per_unit < 100)))\n buildings.sqft_per_unit[idx2] = mean_sqft_per_unit\n # Imputation flag\n idx_parcel = np.unique(buildings.parcel_id[idx2])\n parcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', cnty_sq_du'\n \n# Applying bounds to sqft per unit\n\n# Enforce lower bound\nidx = (buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units > 0) & (buildings.sqft_per_unit < sqft_per_unit_lower_bound)\nbuildings.sqft_per_unit[idx] = sqft_per_unit_lower_bound\n#Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', sq_du_lower_bound'\n\n# Enforce upper bound\nidx = (buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units > 0) & (buildings.sqft_per_unit > sqft_per_unit_upper_bound)\nbuildings.sqft_per_unit[idx] = sqft_per_unit_upper_bound\n# Imputation flag\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', sq_du_upper_bound'\n\nbuildings.sqft_per_unit[buildings.sqft_per_unit < 0] = 0\nbuildings.sqft_per_unit[buildings.sqft_per_unit > sqft_per_unit_upper_bound] = sqft_per_unit_upper_bound\n\n# Residential building_sqft should equal sqft_per_unit * residential_units\nidx = (buildings.res_type.isin(['single', 'multi'])) & (buildings.residential_units > 0) & (buildings.sqft_per_unit > 0) & (buildings.non_residential_sqft == 0)\nbuildings.building_sqft[idx] = buildings.sqft_per_unit[idx] * buildings.residential_units[idx]\n\n# Building_sqft should equal nonres sqft + sqft_per_unit*residential units\nbuildings.sqft_per_unit[buildings.sqft_per_unit.isnull()] = 0\nbuildings.building_sqft = buildings.non_residential_sqft + (buildings.sqft_per_unit * buildings.residential_units)\n\n# STORIES\n# If stories null or less than 1, set to 1\nidx = buildings.stories.isnull() | (buildings.stories < 1)\nbuildings.stories[idx] = 1\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', stories1'\n\nidx = (buildings.stories == 1) & (buildings.costar_elevators > 0)\nbuildings.stories[idx] = 3\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', stories3_cs_elevator'\n\nidx = buildings.stories > 90\nbuildings.stories[buildings.stories > 99] = 1 ##Marin data problem\nbuildings.stories[buildings.stories > 98] = 2 ##SCL data problem\nbuildings.stories[buildings.stories > 90] = 1 ##SF data problem\nidx_parcel = np.unique(buildings.parcel_id[idx])\nparcels.imputation_flag.loc[idx_parcel] = parcels.imputation_flag.loc[idx_parcel] + ', stories_corrected'\n\n# Replace nulls with zero\nbuildings.improvement_value[buildings.improvement_value.isnull()] = 0.0\nbuildings.year_assessed[buildings.year_assessed.isnull()] = 0.0\nbuildings.taz[buildings.taz.isnull()] = 0.0\nbuildings.res_type[buildings.res_type.isnull()] = 'other'\n\n# Cut down to necessary building columns\nbuildings2 = buildings[['parcel_id','county_id', 'land_use_type_id', 'res_type', 'improvement_value', 'year_assessed', 'year_built', 'building_sqft', 'non_residential_sqft', 'residential_units', 'sqft_per_unit', 'stories', 'development_type_id', 'taz', 'redfin_sale_price', 'redfin_sale_year', 'redfin_home_type', 'costar_property_type', 'costar_rent']]\n\nprint buildings2.development_type_id.value_counts()\n\n\n############\n##SCALING###\n############\n\n## Scaling for zonal targets: (note: scaling residential prices takes place in price_imputation)\n\ntargets_residential_year_built = pd.DataFrame(\n {'column_name': ['year_built']*len(targetvalues),\n 'target_value': targetvalues.yearbuilt_av.values,\n 'target_metric': ['mean']*len(targetvalues),\n 'filters': ('(residential_units > 0) & (taz == ' + pd.Series(targetvalues.index.values).astype('str')) + ')',\n 'clip_low': [np.nan]*len(targetvalues),\n 'clip_high': [np.nan]*len(targetvalues),\n 'int_result': [np.nan]*len(targetvalues)})\n\ntargets_non_residential_sqft = pd.DataFrame(\n {'column_name': ['non_residential_sqft']*len(targetunits),\n 'target_value': targetunits.targetnonressqft.values,\n 'target_metric': ['sum']*len(targetunits),\n 'filters': ('(non_residential_sqft > 0) & (taz == ' + pd.Series(targetunits.index.values).astype('str')) + ')',\n 'clip_low': [np.nan]*len(targetunits),\n 'clip_high': [np.nan]*len(targetunits),\n 'int_result': [np.nan]*len(targetunits)})\n\nbuildings2 = scl.scale_to_targets_from_table(buildings2, targets_residential_year_built)\n\ntargets_non_residential_sqft['taz'] = targetunits.index.values\ntargets_non_residential_sqft = targets_non_residential_sqft.set_index('taz')\ntargets_non_residential_sqft['existing_nrsqft'] = buildings2.groupby('taz').non_residential_sqft.sum()\ntargets_non_residential_sqft.target_value[targets_non_residential_sqft.target_value < targets_non_residential_sqft.existing_nrsqft] = targets_non_residential_sqft.existing_nrsqft[targets_non_residential_sqft.target_value < targets_non_residential_sqft.existing_nrsqft]\ndel targets_non_residential_sqft['existing_nrsqft']\nbuildings2 = scl.scale_to_targets_from_table(buildings2, targets_non_residential_sqft)\n\nprint buildings[buildings.building_sqft == 0].res_type.value_counts() \nprint len(buildings2[(buildings2.building_sqft == 0) & (buildings2.res_type=='other')])\n\n# Post scaling bound-checking\nbuildings2.year_built[buildings2.year_built > year_built_upper_bound] = year_built_upper_bound\nbuildings2.year_built[buildings2.year_built < year_built_lower_bound] = year_built_lower_bound\n\n\n# COMPARE WITH TARGETS\ntargetunits['sf'] = buildings2[buildings2.res_type == 'single'].groupby('taz').residential_units.sum()\ntargetunits['mf'] = buildings2[buildings2.res_type == 'multi'].groupby('taz').residential_units.sum()\ntargetunits['nrsqft'] = buildings2.groupby('taz').non_residential_sqft.sum()\nprint targetunits[['sf','targetSF','mf','targetMF', 'nrsqft', 'targetnonressqft']].head()\ntargetunits[['sf','targetSF','mf','targetMF', 'nrsqft', 'targetnonressqft']].sum()\n# summary_output_path = loader.get_path('out/regeneration/summaries/built_space_summary.csv')\n# targetunits[['sf','targetSF','mf','targetMF', 'nrsqft', 'targetnonressqft']].to_csv(summary_output_path)\ntargetunits = targetunits[['sf','targetSF','mf','targetMF', 'nrsqft', 'targetnonressqft']]\ndf_to_db(targetunits, 'summary_built_space', schema=loader.tables.public)\n\n\n# EXPORT BUILDINGS TO DB\nprint 'Loading processed buildings to db'\ndf_to_db(buildings2, 'buildings', schema=loader.tables.public)\n\n\n## Create geom_id (serves similar purpose to joinnumA) on parcels based on integer representation of geometry hash\nidx = []\ngeom_hashes = []\nfor i, geom in parcels.geom.iteritems():\n idx.append(i)\n md5_hash = str(hashlib.md5(geom).hexdigest())\n geom_hashes.append(int(md5_hash[0:11], 16))\n \nparcel_identifier = pd.Series(geom_hashes, index = idx)\nparcels['geom_id'] = parcel_identifier\n\n\n# EXPORT PROCESSED PARCELS TO DB\nparcels['parcel_acres'] = parcels.calc_area/4046.86\nparcels['taz_id'] = parcels.taz\nparcels['tax_exempt_status'] = parcels.tax_exempt\nparcels2 = parcels[['development_type_id', 'land_value', 'parcel_acres', 'county_id', 'taz_id', 'proportion_undevelopable', 'tax_exempt_status', 'apn', 'parcel_id_local', 'geom_id', 'imputation_flag']]\ndevtype_devid_xref = {'SF':1, 'MF':2, 'MFS':3, 'MH':4, 'MR':5, 'GQ':6, 'RT':7, 'BR':8, 'HO':9, 'OF':10, 'OR':11, 'HP':12, 'IW':13, \n 'IL':14, 'IH':15, 'VY':16, 'SC':17, 'SH':18, 'GV':19, 'VP':20, 'VA':21, 'PG':22, 'PL':23, 'TR':24, 'LD':25, 'other':-1}\nfor dev in devtype_devid_xref.keys():\n parcels2.development_type_id[parcels2.development_type_id == dev] = devtype_devid_xref[dev]\nparcels2.taz_id = parcels2.taz_id.astype('int')\nparcels2.development_type_id = parcels2.development_type_id.astype('int')\nparcels2.tax_exempt_status = parcels2.tax_exempt_status.fillna(0).astype('int')\nparcels2.land_value = parcels2.land_value.fillna(0.0)\nparcels2.parcel_id_local = parcels2.parcel_id_local.fillna(' ')\nparcels2.parcel_acres[parcels2.parcel_acres.isnull()] = 2.5\nparcels2.index.name = 'parcel_id'\n\nprint 'Loading processed parcels to db'\ndf_to_db(parcels2, 'parcel', schema=loader.tables.public)\n\n# Attach geom to the processed parcels\nprint 'Attaching geometry to the processed parcels'\nexec_sql(\"\"\"\nalter table parcel add geom geometry(MultiPolygon); \nSELECT UpdateGeometrySRID('parcel', 'geom', 2768);\nupdate parcel set geom = a.geom from parcels a where parcel.parcel_id = a.gid;\nupdate parcel set geom_id = parcel_id where geom is null;\n\"\"\")\n\n# Create spatial index.\nprint 'Spatially indexing the processed parcels'\nexec_sql(\"\"\"\nCREATE INDEX parcel_geom_gist ON parcel\n USING gist (geom);\n\"\"\")\n\n# Add XY coords to processed parcels\nprint 'Add x and y coords on the processed parcels'\nexec_sql(\"alter table parcel add centroid geometry;\")\nexec_sql(\"update parcel set centroid = ST_centroid(geom);\")\nexec_sql('ALTER TABLE parcel ADD x numeric;')\nexec_sql('ALTER TABLE parcel ADD y numeric;')\nexec_sql(\"update parcel set x = ST_X(ST_Transform(centroid, 4326));\")\nexec_sql(\"update parcel set y = ST_Y(ST_Transform(centroid, 4326));\")\n", "id": "248814", "language": "Python", "matching_score": 5.716255187988281, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/match_aggregate.py" }, { "content": "import pandas as pd, numpy as np\nimport pandas.io.sql as sql\nfrom spandex import TableLoader\nfrom spandex.io import exec_sql, df_to_db\nimport statsmodels.formula.api as smf\nimport statsmodels.api as sm\nfrom spandex.targets import scaling as scl\n\n#Connect to the database\nloader = TableLoader()\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\n# Load TAZ residential unit control totals and other zonal targets (used as additional source of zonal explanatory variables in the price imputation)\ntaz_controls_csv = loader.get_path('hh/taz2010_imputation.csv')\ntargetunits = pd.read_csv(taz_controls_csv, index_col='taz1454')\n\ntaz_controls_csv2 = loader.get_path('hh/tazsumm_redfin.csv')\ntargetvalues = pd.read_csv(taz_controls_csv2, index_col='taz')\n\n#Load the core data tables. Only buildlings table will be updated (the price attributes)\nbuildings = db_to_df('select * from buildings').set_index('building_id')\nparcels = db_to_df('select * from parcels')\n# Load TAZ-level synthetic population\nhh_path = loader.get_path('hh/synth/hhFile.p2011s3a1.2010.csv')\nhh = pd.read_csv(hh_path)\nhh = hh.set_index('HHID')\nhh.index.name = 'household_id'\nhh = hh.rename(columns = {'TAZ':'taz'})\n\ntargetunits['area'] = parcels.groupby('taz').calc_area.sum()\ntargetunits.area[targetunits.area.isnull()] = targetunits.area.mean()\n\ntargetunits['empdensity'] = targetunits.etot_10/targetunits.area\ntargetunits['resdensity'] = targetunits.targetunits/targetunits.area\ntargetunits['mf_sf_ratio'] = targetunits.targetMF/(targetunits.targetSF+1)\ntargetunits['nr_res_ratio'] = targetunits.targetnonressqft/(targetunits.targetunits+1)\n\ntargetunits['mean_income'] = hh.groupby('taz').HINC.mean()\ntargetunits['mean_hhsize'] = hh.groupby('taz').PERSONS.mean()\ntargetunits['mean_hhchildren'] = hh.groupby('taz').NOC.mean()\ntargetunits['mean_numvehicles'] = hh.groupby('taz').VEHICL.mean()\n\nfor col in ['mean_income', 'mean_hhsize', 'mean_hhchildren', 'mean_numvehicles']:\n targetunits[col][targetunits[col].isnull()] = targetunits[col].mean()\n\nbuildings = pd.merge(buildings, targetvalues, left_on = 'taz', right_index = True, how = 'left')\nbuildings = pd.merge(buildings, targetunits, left_on = 'taz', right_index = True, how = 'left')\n\n\n#Residential price imputation\nresprice_estimation_dataset = buildings[(buildings.sqft_per_unit > 200) & (buildings.redfin_sale_year > 2009) & (buildings.redfin_sale_price > 90000) & (buildings.redfin_sale_price < 9000000) & (buildings.res_type.isin(['single', 'multi']))]\nspecification = 'np.log(redfin_sale_price) ~ I(year_built < 1940) + I(year_built > 1990) + year_built + mean_income + mean_hhsize + mean_hhchildren + mean_numvehicles + mf_sf_ratio + resdensity + empdensity + nr_res_ratio + yearbuilt_av + yearbuilt_sd + sqft_per_unit + stories + I(res_type == \"multi\") + I(county_id == 1) + I(county_id == 13) + I(county_id == 41) + I(county_id == 55) + I(county_id == 85) + I(county_id == 81) + I(county_id == 95) + I(county_id == 97)'\nmodel = smf.ols(formula=specification, data=resprice_estimation_dataset)\nresults = model.fit()\nprint results.summary()\n\nresbuildings = buildings[buildings.res_type.isin(['single', 'multi']) & (buildings.residential_units > 0)]\nsim_data = results.predict(resbuildings)\nsim_data = np.exp(sim_data)\nsim_data = pd.Series(sim_data, index = resbuildings.index)\nbuildings['res_price'] = 0\nbuildings['res_price_per_sqft'] = 0\nbuildings.loc[sim_data.index,'res_price'] = sim_data\n#Now that regression equation is applied, scale residential prices to match zonal target\ntargets_residential_price = pd.DataFrame(\n {'column_name': ['res_price']*len(targetvalues),\n 'target_value': targetvalues.salepr2010_av.values,\n 'target_metric': ['mean']*len(targetvalues),\n 'filters': ('(residential_units > 0) & (taz == ' + pd.Series(targetvalues.index.values).astype('str')) + ')',\n 'clip_low': [np.nan]*len(targetvalues),\n 'clip_high': [np.nan]*len(targetvalues),\n 'int_result': [np.nan]*len(targetvalues)})\nbuildings = scl.scale_to_targets_from_table(buildings, targets_residential_price)\n\nbuildings.res_price_per_sqft[(buildings.res_price > 0) * (buildings.sqft_per_unit > 0)] = buildings.res_price/buildings.sqft_per_unit\n\n#Nonresidential price imputation\nnonresprice_estimation_dataset = buildings[(buildings.costar_property_type.str.len()>2) & (buildings.res_type == 'other') & (~buildings.costar_rent.isin(['', '-', 'Negotiable', 'Withheld']))]\nnonresprice_estimation_dataset['observed_costar_rent'] = nonresprice_estimation_dataset.costar_rent.astype('float')\n\nspecification = 'np.log(observed_costar_rent) ~ non_residential_sqft + targetnonressqft + I(development_type_id == \"OF\") + I(development_type_id == \"RT\") + I(year_built < 1940) + I(year_built > 1990) + year_built + mean_income + mean_hhsize + mean_hhchildren + mean_numvehicles + mf_sf_ratio + resdensity + empdensity + nr_res_ratio + yearbuilt_av + yearbuilt_sd + stories + I(county_id == 1) + I(county_id == 13) + I(county_id == 41) + I(county_id == 55) + I(county_id == 85) + I(county_id == 81) + I(county_id == 95) + I(county_id == 97) + e11_10 + e21_10 + e22_10 + e23_10 + e3133_10 + e42_10 + e4445_10 + e4849_10 + e51_10 + e52_10 + e53_10 + e54_10 + e55_10 + e56_10 + e61_10 + e62_10 + e71_10 + e72_10 + e81_10 + e92_10 + etot_10'\nmodel = smf.ols(formula=specification, data=nonresprice_estimation_dataset)\nresults = model.fit()\nprint results.summary()\n\nnonresbuildings = buildings[(buildings.res_type == 'other') & (buildings.non_residential_sqft > 0)]\nsim_data = results.predict(nonresbuildings)\nsim_data = np.exp(sim_data)\nsim_data = pd.Series(sim_data, index = nonresbuildings.index)\nbuildings['nonres_rent_per_sqft'] = 0\nbuildings.loc[sim_data.index,'nonres_rent_per_sqft'] = sim_data\n\n#summary_output_path = loader.get_path('out/regeneration/summaries/price_summary.csv')\nprice_summary = pd.DataFrame({'avg_nonres_rent_per_sqft':buildings[buildings.nonres_rent_per_sqft>0].groupby('taz').nonres_rent_per_sqft.mean(), 'avg_res_price_per_sqft':buildings[buildings.res_price_per_sqft>0].groupby('taz').res_price_per_sqft.mean(),})\ndf_to_db(price_summary, 'summary_price', schema=loader.tables.public)\n\n\n##Now export back to the database\nbuildings2 = buildings[['parcel_id','county_id', 'land_use_type_id', 'res_type', 'improvement_value', 'year_assessed', 'year_built', 'building_sqft', 'non_residential_sqft', 'residential_units', 'sqft_per_unit', 'stories', 'development_type_id', 'taz', 'redfin_sale_price', 'redfin_sale_year', 'redfin_home_type', 'costar_property_type', 'costar_rent', 'nonres_rent_per_sqft', 'res_price_per_sqft']]\n\ndf_to_db(buildings2, 'buildings', schema=loader.tables.public)", "id": "2523952", "language": "Python", "matching_score": 4.152064800262451, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/price_imputation.py" }, { "content": "import os\nimport pandas as pd\nfrom spandex import TableLoader, TableFrame\nfrom spandex.utils import load_config\nfrom spandex.io import exec_sql, df_to_db\nimport pandas.io.sql as sql\n\ndef db_to_df(query):\n \"\"\"Executes SQL query and returns DataFrame.\"\"\"\n conn = loader.database._connection\n return sql.read_frame(query, conn)\n\n# Build parcels TableFrame.\nloader = TableLoader()\ntable = loader.database.tables.public.parcels\ntf = TableFrame(table, index_col='gid')\n\n# Load TAZ residential unit control totals.\ntaz_controls_csv = loader.get_path('hh/taz2010_imputation.csv')\ntargetunits = pd.read_csv(taz_controls_csv, index_col='taz1454')['targetunits']\n\n# Get CSV output file directory.\noutput_dir = loader.get_path('out/regeneration/summaries')\n\n# Generate summary CSV by county and TAZ.\nfor grouper in ['county_id', 'taz']:\n df = tf[[grouper, 'non_residential_sqft', 'residential_units']]\n df.dropna(subset=[grouper], inplace=True)\n\n if grouper == 'taz':\n df[grouper] = df[grouper].astype(int)\n\n df['count'] = 1\n summary = df.groupby(grouper).sum()\n\n if grouper == 'taz':\n summary['residential_units_target'] = targetunits\n taz_df = summary\n\n output_tablename = 'summary_{}'.format(grouper)\n df_to_db(summary, output_tablename, schema=loader.tables.public)\n\nparcel_output_dir = loader.get_path('out/regeneration/summaries/parcels')\n\nconfig = load_config()\ndb_config = dict(config.items('database'))\n\nif 'taz_id' in db_to_df(\"SELECT column_name FROM information_schema.columns WHERE table_name='parcel'\").column_name.values:\n exec_sql(\"ALTER TABLE parcel RENAME COLUMN taz_id to zone_id;\")\nif 'parcel_acres' in db_to_df(\"SELECT column_name FROM information_schema.columns WHERE table_name='parcel'\").column_name.values:\n exec_sql(\"ALTER TABLE parcel RENAME COLUMN parcel_acres to acres;\")\n\n## Export parcel shapefile to output directory\nos.system('pgsql2shp -f \"%s\" -h %s -u %s -P %s %s parcel' % (parcel_output_dir, db_config['host'], db_config['user'], db_config['password'], db_config['database']))\n\n# Export simplified parcel shapefile\nos.system('pgsql2shp -f \"%s\" -h %s -u %s -P %s %s \"select geom_id, parcel_id, st_simplifypreservetopology(geom, 100) from parcel\"' % (parcel_output_dir, db_config['host'], db_config['user'], db_config['password'], db_config['database'])) \n \n## Export buildings as csv\nbuilding_output_path = loader.get_path('out/regeneration/summaries/buildings.csv')\n \nbuildings = db_to_df('select * from building').set_index('building_id')\nbuildings.to_csv(building_output_path)\n\n## Export TAZ summary file\nsummary_built_space = db_to_df('select * from summary_built_space').set_index('taz1454')\nsummary_price = db_to_df('select * from summary_price').set_index('taz')\nsummary_emp = db_to_df('select * from summary_emp').set_index('taz1454')\nsummary_hh = db_to_df('select * from summary_hh').set_index('taz1454')\n\nsummary_df = pd.DataFrame({\n'sf':summary_built_space.sf,\n'targetsf':summary_built_space.targetsf,\n'mf':summary_built_space.mf,\n'targetmf':summary_built_space.targetmf,\n'nrsqft':summary_built_space.nrsqft,\n'targetnrsqft':summary_built_space.targetnonressqft,\n'hh':summary_hh.hh,\n'hh_allocated':summary_hh.hh_allocated,\n'res_occupancy':summary_hh.occupancy,\n'job_spaces':summary_emp.job_spaces,\n'jobs':summary_emp.jobs,\n'jobs_allocated':summary_emp.jobs_allocated,\n'emp_occupancy':summary_emp.occupancy,\n'avg_nonres_rent_per_sqft':summary_price.avg_nonres_rent_per_sqft,\n'avg_res_price_per_sqft':summary_price.avg_res_price_per_sqft,\n})\n\nsummary_df = summary_df[['sf', 'targetsf', 'mf', 'targetmf', 'nrsqft', 'targetnrsqft', 'hh', 'hh_allocated', 'res_occupancy', 'job_spaces', 'jobs', 'jobs_allocated', 'emp_occupancy', 'avg_res_price_per_sqft', 'avg_nonres_rent_per_sqft']].fillna(0)\n\nsummary_output_path = loader.get_path('out/regeneration/summaries/taz_summary.csv')\nsummary_df.to_csv(summary_output_path)", "id": "945252", "language": "Python", "matching_score": 2.9389777183532715, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/summaries.py" }, { "content": "### UrbanCanvas\n\nimport os\nfrom spandex import TableLoader\nfrom spandex.io import exec_sql #spandex localhost exec_sql func\nfrom spandex.utils import load_config\nimport psycopg2\n\nloader = TableLoader()\n\n## spandex localhost db config\ndb_config = dict(load_config().items('database')) \n\n## UrbanCanvas db config\nurbancanvas_db_config = {'database': 'mtc',\n 'host': '172.16.58.3',\n 'password': '<PASSWORD>',\n 'port': '5432',\n 'user': 'mtc'}\n\n## if 'loading' schema not on localhost db, create. This schema is for tables to load to UrbanCanvas\nexec_sql(\"CREATE SCHEMA IF NOT EXISTS loading;\") \n\n#UrbanCanvas exec_sql func, for executing sql on UrbanCanvas database\ndef exec_sql2(query):\n print query\n conn_string = \"host=172.16.58.3 dbname='mtc' user='mtc' password='<PASSWORD>' port=5432\"\n import psycopg2\n conn=psycopg2.connect(conn_string)\n cur = conn.cursor()\n cur.execute(query)\n conn.commit()\n cur.close()\n conn.close()\n\n\"\"\"\nExports specified tables from localhost database to the UrbanCanvas\ndatabase. Assumes the spandex data directory has an 'out' folder.\nA 'loading' schema must exist on both localhost and UrbanCanvas.\n\nParameters\n----------\ntable_name : str\n Name of table to export to UrbanCanvas.\ntable_schema : str\n Name of schema on localhost that the table resides in.\nlocalhost_db_config : dict\n Dictionary of localhost database configuration settings.\n Should have the following keys: 'database', 'host', 'user',\n 'pass', 'port'.\nurbancanvas_db_config : dict\n Dictionary of UrbanCanvas database configuration settings.\n Should have the following keys: 'database', 'host', 'user',\n 'pass', 'port'.\nexec_sql_localhost_fn : function\n Function that executes sql on localhost db based on sql \n string argument.\nexec_sql_urbancanvas_fn : function\n Function that executes sql on UrbanCanvas db based on sql \n string argument.\nsrc_table_name : str, Optional\n Name of source table to export to UrbanCanvas db if name of\n table is different on localhost than on UrbanCanvas.\n\nReturns\n-------\nNone\n\n\"\"\"\ndef localhost_to_urbancanvas_db(table_name, table_schema, localhost_db_config, urbancanvas_db_config, exec_sql_localhost_fn, exec_sql_urbancanvas_fn, src_table_name = None):\n exec_sql_localhost_fn(\"drop table if exists loading.%s;\" % table_name)\n\n if src_table_name is not None:\n exec_sql_localhost_fn(\"SELECT * INTO loading.%s FROM %s.%s;\" % (table_name, table_schema, src_table_name))\n else:\n exec_sql_localhost_fn(\"SELECT * INTO loading.%s FROM %s.%s;\" % (table_name, table_schema, table_name))\n\n postgres_backup = loader.get_path('out/%s.backup' % table_name)\n os.system('pg_dump --host %s --port %s --username \"%s\" --format custom --verbose --file \"%s\" --table \"loading.%s\" \"%s\"' % \n (db_config['host'], db_config['port'], db_config['user'], postgres_backup, table_name, db_config['database'])) \n\n exec_sql_urbancanvas_fn(\"drop table if exists loading.%s;\" % table_name)\n\n os.system('pg_restore --host %s --port %s --username \"%s\" --dbname \"%s\" --role \"%s\" --no-password --verbose \"%s\"' % \n (urbancanvas_db_config['host'], urbancanvas_db_config['port'], urbancanvas_db_config['user'], urbancanvas_db_config['database'], urbancanvas_db_config['user'], postgres_backup))\n\n\n## Export tables from localhost database to UrbanCanvas database\n# localhost_to_urbancanvas_db('zoning', 'public', db_config, urbancanvas_db_config, exec_sql, exec_sql2)\nlocalhost_to_urbancanvas_db('building', 'public', db_config, urbancanvas_db_config, exec_sql, exec_sql2)\nlocalhost_to_urbancanvas_db('parcel', 'public', db_config, urbancanvas_db_config, exec_sql, exec_sql2)\n\n", "id": "12579765", "language": "Python", "matching_score": 0.5354272127151489, "max_stars_count": 0, "path": "bayarea_urbansim/data_regeneration/export_to_uc.py" }, { "content": "import folium\nimport pandas as pd\nimport geopandas\nimport math\nimport sys\n\nargs = sys.argv[1:]\nsuffix = ''\nif len(args) > 0:\n suffix = \"_\"+args[0]\n\nhtmldir = \"/var/www/html/scratchpad/\"\n\ngdf = geopandas.GeoDataFrame.from_file(htmldir+\"pdas.json\").set_index(\"id_1\")\ncentroids = gdf.centroid\n\nresults = pd.read_csv('runs/pda_model_results.csv').set_index(\"pda\")\n\n\ndef make_map(outname, map_funcs):\n map = folium.Map(location=[37.7792, -122.1191],\n zoom_start=10,\n tiles='Stamen Toner')\n\n pda_d = {}\n for pda, centroid in centroids.iteritems():\n\n if pda in pda_d:\n # for some reason pdas occur multiple times in the shapefile\n continue\n pda_d[pda] = 1\n\n result = None\n try:\n result = results.loc[pda.lower()]\n except:\n print \"PDA not found\", pda\n\n text = map_funcs['text'](result)\n color = map_funcs['color'](result)\n radius = map_funcs['radius'](result)\n\n map.circle_marker(\n location=[centroid.y, centroid.x], radius=radius,\n popup=text, line_color='black',\n fill_color=color, fill_opacity=0.6)\n\n map.create_map(path=outname)\n\n\ndef get_text(r):\n return str(r).replace(\"\\n\", \"<br>\") \\\n if r is not None else ''\n\n\ndef get_radius(r):\n return max(math.sqrt(r.targets)*10, 200) \\\n if r is not None else 0\n\n\ndef get_color(r):\n ratio = r.ratio if r is not None else 0\n if ratio > .5 and ratio < 2.0:\n color = 'green'\n if 2.0 < ratio < 4.0:\n color = 'yellow'\n if ratio > 4.0:\n color = 'red'\n if .25 < ratio < .5:\n color = 'blue'\n if ratio < .25:\n color = 'purple'\n return color\n\noutname = htmldir+'results%s.html' % suffix\nmap_funcs = {\n 'text': get_text,\n 'radius': get_radius,\n 'color': get_color\n}\nmake_map(outname, map_funcs)\n\n\ndef get_radius(r):\n return max(math.sqrt(r.fillna(0).modeled)*10, 200) \\\n if r is not None else 0\n\noutname = htmldir+'results_flipped%s.html' % suffix\nmap_funcs['radius'] = get_radius\nmake_map(outname, map_funcs)\n", "id": "4254804", "language": "Python", "matching_score": 1.702490210533142, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/make_pda_result_maps.py" }, { "content": "import pandas as pd\nimport sys\nimport orca\n\nRUNNUM = sys.argv[1]\n\nif RUNNUM == \"zoned\":\n sys.path.append(\".\")\n import models\n parcels = orca.get_table('parcels')\n modeled = parcels.zoned_du_underbuild.groupby(parcels.pda).sum()\n\nelse:\n RUNNUM = int(RUNNUM)\n # reading modeled\n df = pd.read_csv(\"runs/run%d_parcel_output.csv\" % RUNNUM)\n print \"Total modeled units = \", df.net_units.sum()\n print \"Total modeled units in all pdas = \", \\\n df.dropna(subset=[\"pda\"]).net_units.sum()\n # aggregating net units for modeled\n modeled = df.groupby(\"pda\").net_units.sum()\n\ntargets = pd.read_csv(\"data/pdatargets.csv\", sep=\"\\t\")\ntargets.index = targets.Key.str.lower()\ntargets = targets.Households2 - targets.Households1\n# print \"Warning, halving targets for 15 year simulation\"\n\n# something is wrong with the targets - they're too large\n# see email with mike about this, as this is only a temp solution\nprint \"Total target units in pdas = \", targets.sum()\n\nratio = (modeled / targets).reindex(targets.index).fillna(0)\n\nprint ratio.describe()\n\npd.DataFrame({\n \"modeled\": modeled,\n \"targets\": targets,\n \"ratio\": ratio\n}, index=targets.index).to_csv(\"runs/pda_model_results.csv\", index_label=\"pda\")\n", "id": "12051057", "language": "Python", "matching_score": 1.6933448314666748, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/compare_to_targets.py" }, { "content": "import pandas as pd\nimport os\n\nBASE = \"/var/www/html/runs/\"\n\ndef readfile(year):\n\tfname = BASE + \"run{}_parcel_output.csv\".format(runnum)\n\treturn pd.read_csv(fname, low_memory=False) \\\n\t\tif os.path.isfile(fname) else pd.DataFrame()\n\ndf = pd.concat([\n\treadfile(runnum)\n for runnum in range(1338, 1344)\n], axis=0)\n\ndf = df.query(\"(residential_units > 150 or job_spaces > 300) and SDEM != True\")\n\ngrps = df.groupby('geom_id')\n\ndf = grps.first()\ndf[\"occurences\"] = grps.size()\n\nprint len(df), df.occurences.describe()\n\ndf.to_csv(\"large_projects.csv\")\n", "id": "884268", "language": "Python", "matching_score": 1.2751742601394653, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/filter_large_projects.py" }, { "content": "import pandas as pd\n\n# this script compares the residential unit counts for two scenarios with\n# vmt fees on and off - presumably we want there to be more units built\n# in low-vmt areas\n\nRUN1 = 547 # vmt on\nRUN2 = 540 # vmt off\nvmt_order = [\"S\", \"M\", \"MH\", \"H\", \"VH\"]\n\nfor runnum in [RUN1, RUN2]:\n print \"VMT summary for run =\", runnum\n\n df = pd.read_csv('runs/run%d_parcel_output.csv' % runnum,\n \t low_memory=False)\n\n df = df[df.form == \"residential\"]\n\n total_units = df.net_units.sum()\n print \"Total units =\", total_units\n\n s = df.groupby(\"vmt_res_cat\").net_units.sum().loc[vmt_order]\n \n print s / total_units\n", "id": "10520104", "language": "Python", "matching_score": 0.9109946489334106, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/vmt_compare.py" }, { "content": "import os\n\n# run a full package of scenarios\n\nfor num in [0, 1, 2, 3, 4]:\n os.system('python run.py -s %d' % num)\n\nwith open('RUNNUM', 'r') as f:\n runnum = f.readline()\n\nlastrun = int(runnum)\nlst = [lastrun for i in range(4)]\nlst1 = [-5, -4, -3, -2]\nruns = [d + c for d, c in zip(lst, lst1)]\nos.system('python scripts/compare_output.py \"%d\" \"%d\" \"%d\" \"%d\"' %\n (runs[0], runs[1], runs[2], runs[3]))\n", "id": "9818375", "language": "Python", "matching_score": 0.0002867378352675587, "max_stars_count": 0, "path": "bayarea_urbansim/all.py" }, { "content": "import os\nimport time\nimport datetime as dt\n\nimport cPickle\n\nimport numpy as np\nimport pandas as pd\nimport orca\n\nimport logging\n\nimport random\nimport tracing\nfrom tracing import print_elapsed_time\n\nlogger = logging.getLogger(__name__)\n\n# name of the checkpoint dict keys\n# (which are also columns in the checkpoints dataframe stored in hte pipeline store)\n_TIMESTAMP = 'timestamp'\n_CHECKPOINT_NAME = 'checkpoint_name'\n_PRNG_CHANNELS = 'prng_channels'\n_NON_TABLE_COLUMNS = [_CHECKPOINT_NAME, _TIMESTAMP, _PRNG_CHANNELS]\n\n# name used for storing the checkpoints dataframe to the pipeline store\n_CHECKPOINT_TABLE_NAME = 'checkpoints'\n\n# name of the first step/checkpoint created when teh pipeline is started\n_INITIAL_CHECKPOINT_NAME = 'init'\n\n# most recent checkpoint\n_LAST_CHECKPOINT = {}\n\n# array of checkpoint dicts\n_CHECKPOINTS = []\n\n# the one and only instantiated random number generator object (effetively a singleton)\n_PRNG = random.Random()\n\n\n_OPEN_FILES = {}\n\n\ndef close_on_exit(file, name):\n assert name not in _OPEN_FILES\n _OPEN_FILES[name] = file\n\n\ndef close_open_files():\n for name, file in _OPEN_FILES.iteritems():\n print \"Closing %s\" % name\n file.close()\n _OPEN_FILES.clear()\n\n\ndef add_dependent_columns(base_dfname, new_dfname):\n tbl = orca.get_table(new_dfname)\n for col in tbl.columns:\n logger.debug(\"Adding dependent column %s\" % col)\n orca.add_column(base_dfname, col, tbl[col])\n\n\ndef open_pipeline_store(overwrite=False):\n \"\"\"\n Open the pipeline checkpoint store and add an orca injectable to access it\n\n Parameters\n ----------\n overwrite : bool\n delete file before opening (unless resuming)\n \"\"\"\n\n if orca.is_injectable('pipeline_store'):\n raise RuntimeError(\"Pipeline store is already open!\")\n\n pipeline_file_path = orca.get_injectable('pipeline_path')\n\n if overwrite:\n try:\n if os.path.isfile(pipeline_file_path):\n logger.debug(\"removing pipeline store: %s\" % pipeline_file_path)\n os.unlink(pipeline_file_path)\n except Exception as e:\n print(e)\n logger.warn(\"Error removing %s: %s\" % (e,))\n\n store = pd.HDFStore(pipeline_file_path, mode='a')\n\n orca.add_injectable('pipeline_store', store)\n\n logger.debug(\"opened pipeline_store\")\n\n\ndef get_pipeline_store():\n \"\"\"\n Return the open pipeline hdf5 checkpoint store or return False if it not been opened\n \"\"\"\n if orca.is_injectable('pipeline_store'):\n return orca.get_injectable('pipeline_store')\n else:\n return None\n\n\ndef get_rn_generator():\n \"\"\"\n Return the singleton random number object\n\n Returns\n -------\n activitysim.random.Random\n \"\"\"\n\n return _PRNG\n\n\ndef set_rn_generator_base_seed(seed):\n \"\"\"\n Like seed for numpy.random.RandomState, but generalized for use with all random streams.\n\n Provide a base seed that will be added to the seeds of all random streams.\n The default base seed value is 0, so set_base_seed(0) is a NOP\n\n set_rn_generator_base_seed(1) will (e.g.) provide a different set of random streams\n than the default, but will provide repeatable results re-running or resuming the simulation\n\n set_rn_generator_base_seed(None) will set the base seed to a random and unpredictable integer\n and so provides \"fully pseudo random\" non-repeatable streams with different results every time\n\n Must be called before start_pipeline() or pipeline.run()\n\n Parameters\n ----------\n seed : int or None\n \"\"\"\n\n if _LAST_CHECKPOINT:\n raise RuntimeError(\"Can only call set_rn_generator_base_seed before the first step.\")\n\n _PRNG.set_base_seed(seed)\n\n\ndef read_df(table_name, checkpoint_name=None):\n \"\"\"\n Read a pandas dataframe from the pipeline store.\n\n We store multiple versions of all simulation tables, for every checkpoint in which they change,\n so we need to know both the table_name and the checkpoint_name of hte desired table.\n\n The only exception is the checkpoints dataframe, which just has a table_name\n\n An error will be raised by HDFStore if the table is not found\n\n Parameters\n ----------\n table_name : str\n checkpoint_name : str\n\n Returns\n -------\n df : pandas.DataFrame\n the dataframe read from the store\n\n \"\"\"\n\n if checkpoint_name:\n key = \"%s/%s\" % (table_name, checkpoint_name)\n else:\n key = table_name\n\n t0 = print_elapsed_time()\n\n store = get_pipeline_store()\n df = store[key]\n\n t0 = print_elapsed_time(\"read_df %s shape %s\" % (key, df.shape,), t0, debug=True)\n\n return df\n\n\ndef write_df(df, table_name, checkpoint_name=None):\n \"\"\"\n Write a pandas dataframe to the pipeline store.\n\n We store multiple versions of all simulation tables, for every checkpoint in which they change,\n so we need to know both the table_name and the checkpoint_name to label the saved table\n\n The only exception is the checkpoints dataframe, which just has a table_name\n\n Parameters\n ----------\n df : pandas.DataFrame\n dataframe to store\n table_name : str\n also conventionally the orca table name\n checkpoint_name : str\n the checkpoint at which the table was created/modified\n \"\"\"\n\n # coerce column names to str as unicode names will cause PyTables to pickle them\n df.columns = df.columns.astype(str)\n\n if checkpoint_name:\n key = \"%s/%s\" % (table_name, checkpoint_name)\n else:\n key = table_name\n\n t0 = print_elapsed_time()\n\n store = get_pipeline_store()\n store[key] = df\n\n t0 = print_elapsed_time(\"write_df %s shape %s\" % (key, df.shape,), t0, debug=True)\n\n\ndef rewrap(table_name, df=None):\n \"\"\"\n Add or replace an orca registered table as a unitary DataFrame-backed DataFrameWrapper table\n\n if df is None, then get the dataframe from orca (table_name should be registered, or\n an error will be thrown) which may involve evaluating added columns, etc.\n\n If the orca table already exists, deregister it along with any associated columns before\n re-registering it.\n\n The net result is that the dataframe is a registered orca DataFrameWrapper table with no\n computed or added columns.\n\n Parameters\n ----------\n table_name\n df\n\n Returns\n -------\n the underlying df of the rewrapped table\n \"\"\"\n\n logger.debug(\"rewrap table %s inplace=%s\" % (table_name, (df is None)))\n\n if orca.is_table(table_name):\n\n if df is None:\n logger.debug(\"rewrap - orca.get_table(%s)\" % (table_name,))\n t = orca.get_table(table_name)\n df = t.to_frame()\n else:\n logger.debug(\"rewrap - orca.get_raw_table(%s)\" % (table_name,))\n # don't trigger function call of TableFuncWrapper\n t = orca.get_raw_table(table_name)\n\n t.clear_cached()\n\n for column_name in orca.list_columns_for_table(table_name):\n # logger.debug(\"pop %s.%s: %s\" % (table_name, column_name, t.column_type(column_name)))\n orca.orca._COLUMNS.pop((table_name, column_name), None)\n\n # remove from orca's table list\n orca.orca._TABLES.pop(table_name, None)\n\n assert df is not None\n\n logger.debug(\"rewrap - orca.add_table(%s)\" % (table_name,))\n orca.add_table(table_name, df)\n\n return df\n\n\ndef add_checkpoint(checkpoint_name):\n \"\"\"\n Create a new checkpoint with specified name, write all data required to restore the simulation\n to its current state.\n\n Detect any changed tables , re-wrap them and write the current version to the pipeline store.\n Write the current state of the random number generator.\n\n Parameters\n ----------\n checkpoint_name : str\n \"\"\"\n timestamp = dt.datetime.now()\n\n logger.debug(\"set_checkpoint %s timestamp %s\" % (checkpoint_name, timestamp))\n\n for table_name in orca_dataframe_tables():\n\n # if we have not already checkpointed it or it has changed\n # FIXME - this won't detect if the orca table was modified\n if (table_name not in _LAST_CHECKPOINT or len(orca.list_columns_for_table(table_name))):\n\n # rewrap the changed orca table as a unitary DataFrame-backed DataFrameWrapper table\n df = rewrap(table_name)\n\n logger.debug(\"set_checkpoint %s writing %s to store\" % (checkpoint_name, table_name, ))\n\n # write it to store\n write_df(df, table_name, checkpoint_name)\n\n # remember which checkpoint it was last written\n _LAST_CHECKPOINT[table_name] = checkpoint_name\n\n _LAST_CHECKPOINT[_CHECKPOINT_NAME] = checkpoint_name\n _LAST_CHECKPOINT[_TIMESTAMP] = timestamp\n\n # current state of the random number generator\n _LAST_CHECKPOINT[_PRNG_CHANNELS] = cPickle.dumps(_PRNG.get_channels())\n\n # append to the array of checkpoint history\n _CHECKPOINTS.append(_LAST_CHECKPOINT.copy())\n\n # create a pandas dataframe of the checkpoint history, one row per checkpoint\n checkpoints = pd.DataFrame(_CHECKPOINTS)\n\n # convert empty values to str so PyTables doesn't pickle object types\n for c in checkpoints.columns:\n checkpoints[c] = checkpoints[c].fillna('')\n\n # write it to the store, overwriting any previous version (no way to simply extend)\n write_df(checkpoints, _CHECKPOINT_TABLE_NAME)\n\n for channel_state in _PRNG.get_channels():\n logger.debug(\"channel_name '%s', step_name '%s', offset: %s\" % channel_state)\n\n\ndef orca_dataframe_tables():\n \"\"\"\n Return a list of the neames of all currently registered dataframe tables\n \"\"\"\n return [name for name in orca.list_tables() if orca.table_type(name) == 'dataframe']\n\n\ndef checkpointed_tables():\n \"\"\"\n Return a list of the names of all checkpointed tables\n \"\"\"\n return [name for name in _LAST_CHECKPOINT.keys() if name not in _NON_TABLE_COLUMNS]\n\n\ndef load_checkpoint(checkpoint_name):\n \"\"\"\n Load dataframes and restore random number channel state from pipeline hdf5 file.\n This restores the pipeline state that existed at the specified checkpoint in a prior simulation.\n This allows us to resume the simulation after the specified checkpoint\n\n Parameters\n ----------\n checkpoint_name : str\n model_name of checkpoint to load (resume_after argument to start_pipeline)\n \"\"\"\n\n logger.info(\"load_checkpoint %s\" % (checkpoint_name))\n\n checkpoints = read_df(_CHECKPOINT_TABLE_NAME)\n\n try:\n # truncate rows after target checkpoint\n i = checkpoints[checkpoints[_CHECKPOINT_NAME] == checkpoint_name].index[0]\n checkpoints = checkpoints.loc[:i]\n except IndexError:\n msg = \"Couldn't find checkpoint '%s' in checkpoints\" % (checkpoint_name,)\n logger.error(msg)\n raise RuntimeError(msg)\n\n # convert pandas dataframe back to array of checkpoint dicts\n checkpoints = checkpoints.to_dict(orient='records')\n\n # drop tables with empty names\n for checkpoint in checkpoints:\n for key in checkpoint.keys():\n if key not in _NON_TABLE_COLUMNS and not checkpoint[key]:\n del checkpoint[key]\n\n # patch _CHECKPOINTS array of dicts\n del _CHECKPOINTS[:]\n _CHECKPOINTS.extend(checkpoints)\n\n # patch _CHECKPOINTS dict with latest checkpoint info\n _LAST_CHECKPOINT.clear()\n _LAST_CHECKPOINT.update(_CHECKPOINTS[-1])\n\n logger.info(\"load_checkpoint %s timestamp %s\"\n % (checkpoint_name, _LAST_CHECKPOINT['timestamp']))\n\n # table names in order that tracing.register_traceable_table wants us to register them\n tables = tracing.sort_for_registration(checkpointed_tables())\n\n for table_name in tables:\n # read dataframe from pipeline store\n df = read_df(table_name, checkpoint_name=_LAST_CHECKPOINT[table_name])\n logger.info(\"load_checkpoint table %s %s\" % (table_name, df.shape))\n # register it as an orca table\n rewrap(table_name, df)\n # register for tracing\n tracing.register_traceable_table(table_name, df)\n\n # set random state to pickled state at end of last checkpoint\n logger.debug(\"resetting random state\")\n _PRNG.load_channels(cPickle.loads(_LAST_CHECKPOINT[_PRNG_CHANNELS]))\n\n\ndef run_model(model_name):\n \"\"\"\n Run the specified model and add checkpoint for model_name\n\n Since we use model_name as checkpoint name, the same model may not be run more than once.\n\n Parameters\n ----------\n model_name : str\n model_name is assumed to be the name of a registered orca step\n \"\"\"\n\n if not _LAST_CHECKPOINT:\n raise RuntimeError(\"Pipeline not initialized! Did you call start_pipeline?\")\n\n # can't run same model more than once\n if model_name in [checkpoint[_CHECKPOINT_NAME] for checkpoint in _CHECKPOINTS]:\n raise RuntimeError(\"Cannot run model '%s' more than once\" % model_name)\n\n t0 = print_elapsed_time()\n _PRNG.begin_step(model_name)\n orca.run([model_name])\n _PRNG.end_step(model_name)\n t0 = print_elapsed_time(\"run_model '%s'\" % model_name, t0)\n add_checkpoint(model_name)\n t0 = print_elapsed_time(\"add_checkpoint '%s'\" % model_name, t0)\n\n\ndef start_pipeline(resume_after=None):\n \"\"\"\n Start pipeline, either for a new run or, if resume_after, loading checkpoint from pipeline.\n\n If resume_after, then we expect the pipeline hdf5 file to exist and contain\n checkpoints from a previous run, including a checkpoint with name specified in resume_after\n\n Parameters\n ----------\n resume_after : str or None\n name of checkpoint to load from pipeline store\n \"\"\"\n\n logger.info(\"start_pipeline...\")\n\n t0 = print_elapsed_time()\n\n # preload skim_dict\n if orca.is_injectable('skim_dict'):\n orca.get_injectable('skim_dict')\n t0 = print_elapsed_time(\"load skim_dict\", t0)\n\n # load skim_stack\n if orca.is_injectable('skim_stack'):\n orca.get_injectable('skim_stack')\n t0 = print_elapsed_time(\"load skim_stack\", t0)\n\n if resume_after:\n # open existing pipeline\n open_pipeline_store(overwrite=False)\n load_checkpoint(resume_after)\n t0 = print_elapsed_time(\"load_checkpoint '%s'\" % resume_after, t0)\n else:\n # open new, empty pipeline\n open_pipeline_store(overwrite=True)\n add_checkpoint(_INITIAL_CHECKPOINT_NAME)\n t0 = print_elapsed_time(\"add_checkpoint '%s'\" % _INITIAL_CHECKPOINT_NAME, t0)\n\n logger.debug(\"start_pipeline complete\")\n\n\ndef run(models, resume_after=None):\n \"\"\"\n run the specified list of models, optionally loading checkpoint and resuming after specified\n checkpoint.\n\n Since we use model_name as checkpoint name, the same model may not be run more than once.\n\n If resume_after checkpoint is specified and a model with that name appears in the models list,\n then we only run the models after that point in the list. This allows the user always to pass\n the same list of models, but specify a resume_after point if desired.\n\n Parameters\n ----------\n models : [str]\n list of model_names\n resume_after : str or None\n model_name of checkpoint to load checkpoint and AFTER WHICH to resume model run\n \"\"\"\n\n if resume_after and resume_after in models:\n models = models[models.index(resume_after) + 1:]\n\n start_pipeline(resume_after)\n\n t0 = print_elapsed_time()\n for model in models:\n run_model(model)\n t0 = print_elapsed_time(\"run (%s models)\" % len(models), t0)\n\n # don't close the pipeline, as the user may want to read intermediate results from the store\n\n\ndef close():\n \"\"\"\n Close any known open files\n \"\"\"\n\n close_open_files()\n\n orca.get_injectable('pipeline_store').close()\n orca.add_injectable('pipeline_store', None)\n\n logger.info(\"close_pipeline\")\n\n\ndef get_table(table_name, checkpoint_name=None):\n \"\"\"\n Return pandas dataframe corresponding to table_name\n\n if checkpoint_name is None, return the current (most recent) version of the table.\n The table can be a checkpointed table or any registered orca table (e.g. function table)\n\n if checkpoint_name is specified, return table as it was at that checkpoint\n (the most recently checkpointed version of the table at or before checkpoint_name)\n\n Parameters\n ----------\n table_name : str\n checkpoint_name : str or None\n\n Returns\n -------\n df : pandas.DataFrame\n \"\"\"\n\n # orca table not in checkpoints (e.g. a merged table)\n if table_name not in _LAST_CHECKPOINT and orca.is_table(table_name):\n if checkpoint_name is not None:\n raise RuntimeError(\"get_table: checkpoint_name ('%s') not supported\"\n \"for non-checkpointed table '%s'\" % (checkpoint_name, table_name))\n\n return orca.get_table(table_name).to_frame()\n\n # was table ever checkpointed?\n if table_name not in checkpointed_tables():\n raise RuntimeError(\"table '%s' not in checkpointed tables.\" % table_name)\n\n # if they want current version of table, no need to read from pipeline store\n if checkpoint_name is None or _LAST_CHECKPOINT[table_name] == checkpoint_name:\n return orca.get_table(table_name).local\n\n if checkpoint_name not in [checkpoint[_CHECKPOINT_NAME] for checkpoint in _CHECKPOINTS]:\n raise RuntimeError(\"checkpoint '%s' not in checkpoints.\" % checkpoint_name)\n\n return read_df(table_name, checkpoint_name)\n\n\ndef get_checkpoints():\n \"\"\"\n Get pandas dataframe of info about all checkpoints stored in pipeline\n\n Returns\n -------\n checkpoints_df : pandas.DataFrame\n\n \"\"\"\n\n store = get_pipeline_store()\n\n if store:\n df = store[_CHECKPOINT_TABLE_NAME]\n else:\n pipeline_file_path = orca.get_injectable('pipeline_path')\n df = pd.read_hdf(pipeline_file_path, _CHECKPOINT_TABLE_NAME)\n\n # non-table columns first (column order in df is random because created from a dict)\n table_names = [name for name in df.columns.values if name not in _NON_TABLE_COLUMNS]\n\n df = df[[_CHECKPOINT_NAME, _TIMESTAMP] + table_names]\n df.index.name = 'step_num'\n\n return df\n", "id": "1159879", "language": "Python", "matching_score": 4.655822277069092, "max_stars_count": 0, "path": "activitysim/activitysim/core/pipeline.py" }, { "content": "import collections\n\nimport numpy as np\nimport pandas as pd\nimport orca\n\nfrom .tracing import print_elapsed_time\n\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# one more than 0xFFFFFFFF so we can wrap using: int64 % _MAX_SEED\n_MAX_SEED = (1 << 32)\n\n# not arbitrary, as we count on incrementing step_num from NULL_STEP_NUM to 0\nNULL_STEP_NUM = -1\n\nSavedChannelState = collections.namedtuple('SavedChannelState', 'channel_name step_num step_name')\n\n\n\"\"\"\nWe expect that the random number channel can be determined by the name of the index of the\ndataframe accompanying the request. This function encapsulates the knowledge of that mapping.\n\nGenerally, the channel name is just the table name used by the pipeline and orca.\nThe exception is the 'tours' channel, which is messy because the mandatory and non-mandatory\ntours tables are originally created separately and later combined in to a single 'tours'\ntable. But during a few model steps before they are combined, they actually exist as two\ndistinct tables. We only need to know this dirty secret about tables when we reload\ncheckpointed channels.\n\"\"\"\n\n_CHANNELS = {\n 'households': {\n 'max_steps': 2,\n 'index': 'HHID',\n 'table_names': ['households']\n },\n 'persons': {\n 'max_steps': 7,\n 'index': 'PERID',\n 'table_names': ['persons']\n },\n 'tours': {\n 'max_steps': 5,\n 'index': 'tour_id',\n 'table_names': ['non_mandatory_tours', 'mandatory_tours']\n },\n 'trips': {\n 'max_steps': 5,\n 'index': 'trip_id',\n 'table_names': ['trips']\n },\n}\n\n\nclass SimpleChannel(object):\n \"\"\"\n\n We need to ensure that we generate the same random streams (when re-run or even across\n different simulations.) We do this by generating a random seed for each domain_df row\n that is based on the domain_df index (which implies that generated tables like tours\n and trips are also created with stable, predictable, repeatable row indexes.\n\n Because we need to generate a distinct stream for each step, we can't just use the\n domain_df index - we need a strategy for handling multiple steps without generating\n collisions between streams (i.e. choosing the same seed for more than one stream.)\n\n The easiest way to do this would be to use an array of integers to seed the generator,\n with a global seed, a channel seed, a row seed, and a step seed. Unfortunately, seeding\n numpy RandomState with arrays is a LOT slower than with a single integer seed, and\n speed matters because we reseed on-the-fly for every call because creating a different\n RandomState object for each row uses too much memory (5K per RandomState object)\n\n So instead, multiply the domain_df index by the number of steps required for the channel\n add the step_num to the row_seed to get a unique seed for each (domain_df index, step_num)\n tuple.\n\n Currently, it is possible that random streams for rows in different tables may coincide.\n This would be easy to avoid with either seed arrays or fast jump/offset.\n\n numpy random seeds are unsigned int32 so there are 4,294,967,295 available seeds.\n That is probably just about enough to distribute evenly, for most cities, depending on the\n number of households, persons, tours, trips, and steps.\n\n We do read in the whole households and persons tables at start time, so we could note the\n max index values. But we might then want a way to ensure stability between the test, example,\n and full datasets. I am punting on this for now.\n \"\"\"\n\n def __init__(self, channel_name, base_seed, domain_df, max_steps, step_name, step_num):\n\n self.name = channel_name\n self.base_seed = base_seed\n\n # ensure that every channel is different, even for the same df index values and max_steps\n self.unique_channel_seed = hash(self.name) % _MAX_SEED\n\n self.step_name = step_name\n\n assert (step_num == NULL_STEP_NUM) or step_num >= 0\n self.step_num = step_num\n\n self.max_steps = max_steps\n\n assert (self.step_num < self.max_steps)\n\n # create dataframe to hold state for every df row\n self.row_states = self.create_row_states_for_domain(domain_df)\n\n def create_row_states_for_domain(self, domain_df):\n \"\"\"\n Create a dataframe with same index as domain_df and a single column\n with stable, predictable, repeatable row_seeds for that domain_df index value\n\n See notes on the seed generation strategy in class comment above.\n\n Parameters\n ----------\n domain_df : pandas.dataframe\n domain dataframe with index values for which random streams are to be generated\n\n Returns\n -------\n row_states : pandas.DataFrame\n \"\"\"\n\n # dataframe to hold state for every df row\n row_states = pd.DataFrame(index=domain_df.index)\n\n # ensure that every channel is different, even for the same df index values and max_steps\n unique_channel_seed = hash(self.name) % _MAX_SEED\n\n # FIXME - irksome that we need to know max_steps to avoid collisions\n # I'm not sure how to do this in a way that avoids collisions using a single seed\n # Unfortunately seeding from an array is currently A LOT slower than using a single seed\n # without knowing either max_steps or max_index or with support for jump/offset\n row_states['row_seed'] = (self.base_seed + self.unique_channel_seed +\n row_states.index * self.max_steps) % _MAX_SEED\n\n return row_states\n\n def extend_domain(self, domain_df, step_name, step_num):\n \"\"\"\n Extend existing row_state df by adding seed info for each row in domain_df\n\n This is only needed if the channel is composed of more than one underlying table.\n It is assumed that the index values of the component tables are disjoint and\n there will be no ambiguity/collisions between them\n\n Parameters\n ----------\n domain_df : pandas.DataFrame\n domain dataframe with index values for which random streams are to be generated\n and well-known index name corresponding to the channel\n\n step_name : str or None\n provided when reloading so we can restore step_name and step_num\n\n step_num : int or None\n \"\"\"\n\n # these should be new rows, no intersection with existing row_states\n assert len(self.row_states.index.intersection(domain_df.index)) == 0\n\n self.step_name = step_name\n\n if step_num >= 0:\n assert step_num >= self.step_num\n self.step_num = step_num\n\n new_row_states = self.create_row_states_for_domain(domain_df)\n self.row_states = pd.concat([self.row_states, new_row_states])\n\n def begin_step(self, step_name):\n \"\"\"\n Reset channel state for a new state\n\n Parameters\n ----------\n step_name : str\n pipeline step name for this step\n \"\"\"\n\n if self.step_name == step_name:\n return\n\n self.step_name = step_name\n self.step_num += 1\n\n if self.step_num >= self.max_steps:\n raise RuntimeError(\"Too many steps (%s) maxstep %s for channel '%s'\"\n % (self.step_num, self.max_steps, self.name))\n\n # number of rands pulled this step\n self.row_states['offset'] = 0\n\n # standard constant to use for choice_for_df instead of fast-forwarding rand stream\n self.multi_choice_offset = None\n\n logger.info(\"begin_step '%s' step_num %s for channel '%s'\"\n % (step_name, self.step_num, self.name, ))\n\n def _generators_for_df(self, df, override_offset=None):\n \"\"\"\n Python generator function for iterating over numpy prngs (nomenclature collision!)\n seeded and fast-forwarded on-the-fly to the appropriate position in the channel's\n random number stream for each row in df.\n\n if override_offset is truthy, it contains an offset to fast-forward by INSTEAD of the\n the current random_state row offset for that df row. This is passed by choice_for_df\n when set_multi_choice_offset has been set, so that multiple choice_for_df calls for the\n same row will yield the same choices (assuming that choice array is the same length)\n\n Parameters\n ----------\n df : pandas.DataFrame\n dataframe with index values for which random streams are to be generated\n and well-known index name corresponding to the channel\n override_offset\n\n \"\"\"\n\n # assert no dupes\n assert len(df.index.unique() == len(df.index))\n\n df_row_states = self.row_states.loc[df.index]\n\n prng = np.random.RandomState()\n for row in df_row_states.itertuples():\n\n seed = (row.row_seed + self.step_num) % _MAX_SEED\n prng.seed(seed)\n\n offset = override_offset or row.offset\n if offset:\n # consume rands\n prng.rand(offset)\n\n yield prng\n\n def set_multi_choice_offset(self, offset, step_name):\n \"\"\"\n setting multi_choice_offset ensures that multiple calls for the same row_state will yield\n the same choices (assuming that choice array is the same length). It also permits avoiding\n collisions with the rand() stream if multi_choice_offset is an integer larger than the\n max number of random_for_df calls made in the same step.\n\n choice_for_df passes multi_choice_offset to _generators_for_df as override_offset so that,\n if multi_choice_offset has been set, _generators_for_df will\n EITHER use the same rand sequence for choosing values\n OR use fresh random values for choices.\n\n Parameters\n ----------\n offset : int\n the offset into the current step's random number stream at which to begin taking\n rands for each choice_for_df row_state row\n step_name : str\n this allows us to ensure that this method is only ever called BEFORE the step begins\n\n Returns\n -------\n\n \"\"\"\n # must do this before step begins\n assert self.step_name != step_name\n\n # expect an int or None\n assert offset is None or type(offset) == int\n\n self.begin_step(step_name)\n self.multi_choice_offset = offset\n\n def random_for_df(self, df, step_name, n=1):\n \"\"\"\n Return n floating point random numbers in range [0, 1) for each row in df\n using the appropriate random channel for each row.\n\n Subsequent calls (in the same step) will return the next rand for each df row\n\n The resulting array will be the same length (and order) as df\n This method is designed to support alternative selection from a probability array\n\n The columns in df are ignored; the index name and values are used to determine\n which random number sequence to to use.\n\n If \"true pseudo random\" behavior is desired (i.e. NOT repeatable) the set_base_seed\n method (q.v.) may be used to globally reseed all random streams.\n\n Parameters\n ----------\n df : pandas.DataFrame\n df with index name and values corresponding to a registered channel\n\n n : int\n number of rands desired per df row\n\n Returns\n -------\n rands : 2-D ndarray\n array the same length as df, with n floats in range [0, 1) for each df row\n \"\"\"\n self.begin_step(step_name)\n generators = self._generators_for_df(df)\n rands = np.asanyarray([prng.rand(n) for prng in generators])\n # update offset for rows we handled\n self.row_states.loc[df.index, 'offset'] += n\n return rands\n\n def choice_for_df(self, df, step_name, a, size, replace):\n \"\"\"\n Apply numpy.random.choice once for each row in df\n using the appropriate random channel for each row.\n\n Concatenate the the choice arrays for every row into a single 1-D ndarray\n The resulting array will be of length: size * len(df.index)\n This method is designed to support creation of a interaction_dataset\n\n The columns in df are ignored; the index name and values are used to determine\n which random number sequence to to use.\n\n We pass the multi_choice_offset to _generators_for_df as override_offset so that,\n if multi_choice_offset has been set (by a call to set_multi_choice_offset method, q,v,)\n _generators_for_df will EITHER use the same rand sequence for choosing values\n OR use fresh random values for choices.\n\n Parameters\n ----------\n df : pandas.DataFrame\n df with index name and values corresponding to a registered channel\n\n step_name : str\n current step name so we can update row_states seed info\n\n The remaining parameters are passed through as arguments to numpy.random.choice\n\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n size : int or tuple of ints\n Output shape\n replace : boolean\n Whether the sample is with or without replacement\n\n Returns\n -------\n choices : 1-D ndarray of length: size * len(df.index)\n The generated random samples for each row concatenated into a single (flat) array\n \"\"\"\n self.begin_step(step_name)\n\n # initialize the generator iterator\n # note: if multi_choice_offset is set, it will be used to INSTEAD of current offset\n generators = self._generators_for_df(df, override_offset=self.multi_choice_offset)\n\n sample = np.concatenate(tuple(prng.choice(a, size, replace) for prng in generators))\n\n if not self.multi_choice_offset:\n # FIXME - if replace, should we estimate rands_consumed?\n if replace:\n logger.warn(\"choice_for_df MULTI_CHOICE_FF with replace\")\n # update offset for rows we handled\n self.row_states.loc[df.index, 'offset'] += size\n\n return sample\n\n\nclass Random(object):\n\n def __init__(self, channel_info=_CHANNELS):\n\n self.channel_info = channel_info\n\n # for map index name to channel name\n self.index_map = {info['index']: channel_name\n for channel_name, info in self.channel_info.iteritems()}\n\n self.channels = {}\n self.step_name = None\n self.step_seed = None\n self.base_seed = 0\n self.global_rng = np.random.RandomState()\n\n def get_channel_info(self, channel_name, property_name):\n\n info = self.channel_info.get(channel_name, None)\n if info is None:\n raise RuntimeError(\"Unknown channel '%s'\" % channel_name)\n\n property = info.get(property_name, None)\n if property is None:\n raise RuntimeError(\"Unknown property '%s' for channel '%s'\"\n % (property_name, channel_name))\n\n return property\n\n def get_channel_name_for_df(self, df):\n \"\"\"\n Return the channel name corresponding to the index name of df\n\n We expect that the random number channel can be determined by the name of the index of the\n dataframe accompanying the request. This mapping was specified in channel_info\n\n This function internally encapsulates the knowledge of that mapping.\n\n Parameters\n ----------\n df : pandas.DataFrame\n domain_df or a df passed to random number/choice methods with well known index name\n\n Returns\n -------\n channel_name : str\n \"\"\"\n channel_name = self.index_map.get(df.index.name, None)\n if channel_name is None:\n raise RuntimeError(\"No channel with index name '%s'\" % df.index.name)\n return channel_name\n\n def get_channel_for_df(self, df):\n \"\"\"\n Return the channel for this df. Channel should already have been loaded/added.\n\n Parameters\n ----------\n df : pandas.dataframe\n either a domain_df for a channel being added or extended\n or a df for which random values are to be generated\n \"\"\"\n channel_name = self.get_channel_name_for_df(df)\n if channel_name not in self.channels:\n raise RuntimeError(\"Channel '%s' has not yet been added.\" % channel_name)\n return self.channels[channel_name]\n\n # step handling\n\n def begin_step(self, step_name):\n \"\"\"\n Register that the pipeline has entered a new step and that global and channel streams\n should transition to the new stream.\n\n Parameters\n ----------\n step_name : str\n pipeline step name\n \"\"\"\n\n assert self.step_name is None\n assert step_name is not None\n assert step_name != self.step_name\n\n self.step_name = step_name\n self.step_seed = hash(step_name) % _MAX_SEED\n\n seed = [self.base_seed, self.step_seed]\n self.global_rng = np.random.RandomState(seed)\n\n def end_step(self, step_name):\n \"\"\"\n This is mostly just for internal consistency checking -\n I'm not sure it serves any useful purpose except to catch \"mis-steps\" in the pipeline code\n\n Parameters\n ----------\n step_name : str\n name of current step (just a consistency check)\n \"\"\"\n assert self.step_name is not None\n assert self.step_name == step_name\n\n self.step_name = None\n self.step_seed = None\n self.global_rng = None\n\n # channel management\n\n def add_channel(self, domain_df, channel_name, step_name=None, step_num=NULL_STEP_NUM):\n \"\"\"\n Create or extend a channel for generating random number streams for domain_df.\n\n We need to be prepared to extend an existing channel because mandatory and non-mandatory\n tours are generated separately by different sub-models, but end up members of a common\n tours channel.\n\n Parameters\n ----------\n domain_df : pandas.DataFrame\n domain dataframe with index values for which random streams are to be generated\n and well-known index name corresponding to the channel\n\n channel_name : str\n expected channel name provided as a consistency check\n\n step_name : str or None\n for channels being loaded (resumed) we need the step_name and step_num to maintain\n consistent step numbering\n\n step_num : int or None\n for channels being loaded (resumed) we need the step_name and step_num to maintain\n consistent step numbering\n \"\"\"\n assert channel_name == self.get_channel_name_for_df(domain_df)\n assert (step_name is None) == (step_num == NULL_STEP_NUM)\n\n logger.debug(\"Random: add_channel step_num %s step_name '%s'\" % (step_num, step_name))\n\n if channel_name in self.channels:\n logger.debug(\"extending channel '%s' %s ids\" % (channel_name, len(domain_df.index)))\n channel = self.channels[channel_name]\n channel.extend_domain(domain_df, step_name, step_num)\n\n else:\n logger.debug(\"adding channel '%s' %s ids\" % (channel_name, len(domain_df.index)))\n\n max_steps = self.get_channel_info(channel_name, 'max_steps')\n\n channel = SimpleChannel(channel_name,\n self.base_seed,\n domain_df,\n max_steps,\n step_name,\n step_num\n )\n\n self.channels[channel_name] = channel\n\n def get_channels(self):\n \"\"\"\n Return channel state in a form to be pickled and checkpointed by the pipeline manager and\n later read, unpickled and passed back to the load_channels method to restore channel states\n\n Returns\n -------\n salvable_channel_state : SavedChannelState\n \"\"\"\n\n salvable_channel_state =\\\n [SavedChannelState(channel_name=channel_name,\n step_num=c.step_num,\n step_name=c.step_name)\n for channel_name, c in self.channels.iteritems()]\n\n return salvable_channel_state\n\n def load_channels(self, saved_channels):\n \"\"\"\n Load the channels listed in saved_channels\n\n The saved_channels list is a list of channel states created by get_channels and\n saved by the pipeline manager at a checkpoint.\n\n This channel state information allows us to restore the channels to the same state\n as they were when checkpointed so that the random number streams will can be resumed.\n\n Note that we assume that the channel names correspond to orca table names, so that\n we can get the domain_df for that channel from orca.\n\n Since tours are originally created in two tables (mandatory and non-mandatory) we get the\n domain_dfs from them because the checkpoint may have occurred when only one of those\n tables had been created and the tours table may not exist yet.\n\n Parameters\n ----------\n saved_channels : array of SavedChannelState\n \"\"\"\n\n for channel_state in saved_channels:\n\n channel_name = channel_state.channel_name\n assert channel_name in self.channel_info\n\n # FIXME - this rigamarole is here to support the tours channel two component tables\n table_names = self.get_channel_info(channel_name, 'table_names')\n\n logger.debug(\"loading channel %s from %s\" % (channel_state.channel_name, table_names))\n\n logger.debug(\"channel_state %s\" % (channel_state, ))\n\n for table_name in table_names:\n if orca.is_table(table_name):\n df = orca.get_table(table_name).local\n self.add_channel(df,\n channel_name=channel_state.channel_name,\n step_num=channel_state.step_num,\n step_name=channel_state.step_name)\n\n # random number generation\n\n def set_base_seed(self, seed=None):\n \"\"\"\n Like seed for numpy.random.RandomState, but generalized for use with all random streams.\n\n Provide a base seed that will be added to the seeds of all random streams.\n The default base seed value is 0, so set_base_seed(0) is a NOP\n\n set_base_seed(1) will (e.g.) provide a different set of random streams than the default\n but will provide repeatable results re-running or resuming the simulation\n\n set_base_seed(None) will set the base seed to a random and unpredictable integer and so\n provides \"fully pseudo random\" non-repeatable streams with different results every time\n\n Must be called before first step (before any channels are added or rands are consumed)\n\n Parameters\n ----------\n seed : int or None\n \"\"\"\n\n if self.step_name is not None or self.channels:\n raise RuntimeError(\"Can only call set_base_seed before the first step.\")\n\n assert len(self.channels.keys()) == 0\n\n if seed is None:\n self.base_seed = np.random.RandomState().randint(_MAX_SEED)\n logger.info(\"Set random seed randomly to %s\" % self.base_seed)\n else:\n logger.info(\"Set random seed base to %s\" % seed)\n self.base_seed = seed\n\n def get_global_rng(self):\n \"\"\"\n Return a numpy random number generator for use within current step.\n\n This method is designed to provide random numbers for uses that do not correspond to\n known channel domains. e.g. to select a subset of households to use for the simulation.\n\n global_rng is reseeded to a predictable value at the beginning of every step so that\n it behaves repeatably when simulation is resumed or re-run.\n\n If \"true pseudo random\" behavior is desired (i.e. NOT repeatable) the set_base_seed\n method (q.v.) may be used to globally reseed all random streams.\n\n Returns\n -------\n global_rng : numpy.random.RandomState()\n numpy random number generator for use within current step\n\n \"\"\"\n assert self.step_name is not None\n return self.global_rng\n\n def set_multi_choice_offset(self, df, offset):\n \"\"\"\n Specify a fixed offset into the df channel's random number streams to use for all calls\n made to choice_for_df for the duration of the current step.\n\n A value of None means that a different set of random values should be used for each\n subsequent call to choice_for_df (for the same df row index)\n\n Recall that since each row in the df has its own distinct random stream,\n this means that each random stream is offset by the specified amount.\n\n This method has no particular utility if choice_for_df is only called once for each\n domain_df row, other than to (potentially) make subsequent calls to random_for_df\n faster if choice_for_df consumes a large number of random numbers, as random_for_df\n will not need to fast-forward as much.\n\n This method must be invoked before any random numbers are consumed in hte current step.\n The multi_choice_offset is reset to the default (None) at the beginning of each step.\n\n If \"true pseudo random\" behavior is desired (i.e. NOT repeatable) the set_base_seed\n method (q.v.) may be used to globally reseed all random streams.\n\n Parameters\n ----------\n df : pandas.dataframe\n offset : int or None\n absolute integer offset to fast-forward to in random streams in choice_for_df\n \"\"\"\n\n channel = self.get_channel_for_df(df)\n channel.set_multi_choice_offset(offset, self.step_name)\n logging.info(\"set_multi_choice_offset to %s for channel %s\"\n % (channel.multi_choice_offset, channel.name))\n\n def random_for_df(self, df, n=1):\n \"\"\"\n Return a single floating point random number in range [0, 1) for each row in df\n using the appropriate random channel for each row.\n\n Subsequent calls (in the same step) will return the next rand for each df row\n\n The resulting array will be the same length (and order) as df\n This method is designed to support alternative selection from a probability array\n\n The columns in df are ignored; the index name and values are used to determine\n which random number sequence to to use.\n\n We assume that we can identify the channel to used based on the name of df.index\n This channel should have already been registered by a call to add_channel (q.v.)\n\n If \"true pseudo random\" behavior is desired (i.e. NOT repeatable) the set_base_seed\n method (q.v.) may be used to globally reseed all random streams.\n\n Parameters\n ----------\n df : pandas.DataFrame\n df with index name and values corresponding to a registered channel\n\n n : int\n number of rands desired (default 1)\n\n Returns\n -------\n choices : 1-D ndarray the same length as df\n a single float in range [0, 1) for each row in df\n \"\"\"\n\n # FIXME - for tests\n if not self.channels:\n rng = np.random.RandomState(0)\n rands = np.asanyarray([rng.rand(n) for _ in range(len(df))])\n return rands\n\n t0 = print_elapsed_time()\n channel = self.get_channel_for_df(df)\n rands = channel.random_for_df(df, self.step_name, n)\n t0 = print_elapsed_time(\"random_for_df for %s rows\" % len(df.index), t0, debug=True)\n return rands\n\n def choice_for_df(self, df, a, size, replace):\n \"\"\"\n Apply numpy.random.choice once for each row in df\n using the appropriate random channel for each row.\n\n Concatenate the the choice arrays for every row into a single 1-D ndarray\n The resulting array will be of length: size * len(df.index)\n This method is designed to support creation of a interaction_dataset\n\n The columns in df are ignored; the index name and values are used to determine\n which random number sequence to to use.\n\n Depending on the value of the multi_choice_offset setting\n (set by calling set_multi_choice_offset method, q,v,)\n for subsequent calls in the same step, this routine will\n EITHER use the same rand sequence for choosing values\n OR use fresh random values for choices.\n\n We assume that we can identify the channel to used based on the name of df.index\n This channel should have already been registered by a call to add_channel (q.v.)\n\n Parameters\n ----------\n df : pandas.DataFrame\n df with index name and values corresponding to a registered channel\n\n The remaining parameters are passed through as arguments to numpy.random.choice\n\n a : 1-D array-like or int\n If an ndarray, a random sample is generated from its elements.\n If an int, the random sample is generated as if a was np.arange(n)\n size : int or tuple of ints\n Output shape\n replace : boolean\n Whether the sample is with or without replacement\n\n Returns\n -------\n choices : 1-D ndarray of length: size * len(df.index)\n The generated random samples for each row concatenated into a single (flat) array\n \"\"\"\n\n # FIXME - for tests\n if not self.channels:\n rng = np.random.RandomState(0)\n choices = np.concatenate(tuple(rng.choice(a, size, replace) for _ in range(len(df))))\n return choices\n\n t0 = print_elapsed_time()\n channel = self.get_channel_for_df(df)\n choices = channel.choice_for_df(df, self.step_name, a, size, replace)\n t0 = print_elapsed_time(\"choice_for_df for %s rows\" % len(df.index), t0, debug=True)\n return choices\n", "id": "8872085", "language": "Python", "matching_score": 3.0317883491516113, "max_stars_count": 0, "path": "activitysim/activitysim/core/random.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport numpy as np\nimport pandas as pd\nimport numpy.testing as npt\nimport pandas.util.testing as pdt\nimport pytest\n\nfrom .. import random\nfrom .. import pipeline\n\n\ndef test_basic():\n\n rng = random.Random()\n\n rng.set_base_seed(0)\n\n rng.begin_step('test_step')\n\n global_rng = rng.get_global_rng()\n\n npt.assert_almost_equal(global_rng.rand(1), [0.09237])\n\n # second call should return something different\n with pytest.raises(AssertionError) as excinfo:\n npt.assert_almost_equal(global_rng.rand(1), [0.09237])\n assert \"Arrays are not almost equal\" in str(excinfo.value)\n\n # second call should return something different\n with pytest.raises(RuntimeError) as excinfo:\n rng.set_base_seed(1)\n assert \"call set_base_seed before the first step\" in str(excinfo.value)\n\n\ndef test_channel():\n\n channels = {\n 'households': {'max_steps': 4, 'index': 'HHID'},\n 'persons': {'max_steps': 2, 'index': 'PERID'},\n }\n rng = random.Random(channels)\n\n persons = pd.DataFrame({\n \"household_id\": [1, 1, 2, 2, 2],\n }, index=[1, 2, 3, 4, 5])\n persons.index.name = 'PERID'\n\n households = pd.DataFrame({\n \"data\": [1, 1, 2, 2, 2],\n }, index=[1, 2, 3, 4, 5])\n households.index.name = 'HHID'\n\n rng.begin_step('test_step')\n\n rng.add_channel(persons, channel_name='persons', step_name='last', step_num=0)\n rng.add_channel(households, channel_name='households')\n\n rands = rng.random_for_df(persons)\n\n assert rands.shape == (5, 1)\n expected_rands = [0.9374985, 0.0206057, 0.4684723, 0.246012, 0.700952]\n npt.assert_almost_equal(np.asanyarray(rands).flatten(), expected_rands)\n\n # second call should return something different\n rands = rng.random_for_df(persons)\n expected_rands = [0.719677, 0.1214514, 0.7015227, 0.8206436, 0.6126977]\n npt.assert_almost_equal(np.asanyarray(rands).flatten(), expected_rands)\n\n rng.end_step('test_step')\n\n rng.begin_step('test_step2')\n\n # should raise if max_steps exceeded\n with pytest.raises(RuntimeError) as excinfo:\n rands = rng.random_for_df(persons)\n assert \"Too many steps\" in str(excinfo.value)\n\n rands = rng.random_for_df(households)\n expected_rands = [0.122587, 0.7472187, 0.4623908, 0.4600264, 0.8385861]\n npt.assert_almost_equal(np.asanyarray(rands).flatten(), expected_rands)\n\n choices = rng.choice_for_df(households, [1, 2, 3, 4], 2, replace=True)\n expected_choices = [1, 2, 1, 1, 3, 1, 3, 1, 1, 4]\n npt.assert_almost_equal(choices, expected_choices)\n\n # should be DIFFERENT the second time\n choices = rng.choice_for_df(households, [1, 2, 3, 4], 2, replace=True)\n expected_choices = [1, 4, 2, 2, 1, 1, 3, 2, 2, 2]\n npt.assert_almost_equal(choices, expected_choices)\n\n rng.end_step('test_step2')\n\n rng.begin_step('test_step3')\n\n rng.set_multi_choice_offset(households, 10)\n\n choices = rng.choice_for_df(households, [1, 2, 3, 4], 2, replace=True)\n expected_choices = [3, 4, 2, 4, 1, 2, 3, 3, 1, 2]\n npt.assert_almost_equal(choices, expected_choices)\n\n # should be SAME second time\n choices = rng.choice_for_df(households, [1, 2, 3, 4], 2, replace=True)\n npt.assert_almost_equal(choices, expected_choices)\n\n rng.end_step('test_step3')\n\n rng.begin_step('test_step4')\n\n rands = rng.random_for_df(households, n=2)\n\n expected_rands = [0.7375634, 0.7714111, 0.8960886, 0.6161022, 0.833949,\n 0.3427474, 0.9498073, 0.1408251, 0.1759239, 0.6410704]\n\n npt.assert_almost_equal(np.asanyarray(rands).flatten(), expected_rands)\n\n rng.end_step('test_step4')\n", "id": "8985319", "language": "Python", "matching_score": 1.1655528545379639, "max_stars_count": 0, "path": "activitysim/activitysim/core/test/test_random.py" }, { "content": "import numpy as np\nimport pytest\n\n\[email protected]\ndef random_seed(request):\n current = np.random.get_state()\n\n def fin():\n np.random.set_state(current)\n request.addfinalizer(fin)\n\n np.random.seed(0)\n", "id": "6772732", "language": "Python", "matching_score": 0.6723629832267761, "max_stars_count": 0, "path": "activitysim/conftest.py" }, { "content": "import sys\nimport time\nimport argparse\nimport numpy as np\n\nimport orca\n\nfrom lcog import datasources\nfrom lcog import variables\nfrom lcog import models\n\n### Template imports\nfrom urbansim.models import util\nfrom urbansim_templates import modelmanager as mm\nfrom urbansim_templates.models import OLSRegressionStep\n\nmm.initialize()\n\n\ndef run(forecast_year=2035, random_seed=False):\n \"\"\"\n Set up and run simulation.\n Parameters\n ----------\n forecast_year : int, optional\n Year to simulate to. If year argument is passed from the terminal, then\n that year is applied here, otherwise a default value is applied.\n random_seed : int, optional\n Random seed.\n Returns\n -------\n _ : None\n No return value for now.\n \"\"\"\n # Record start time\n start_time = time.time()\n\n orca.add_injectable('forecast_year', forecast_year)\n\n # Set value of optional random seed\n if random_seed:\n np.random.seed(random_seed)\n\n # Model names\n transition_models = ['household_transition', 'job_transition']\n price_models = ['repm_sf_detached', 'repm_duplex_townhome', 'repm_multifamily',\n 'repm_industrial', 'repm_retail', 'repm_office']\n developer_models = ['feasibility', 'residential_developer', 'non_residential_developer']\n location_models = ['hlcm1', 'hlcm2',\n 'elcm1', 'elcm2', 'elcm3', 'elcm4', 'elcm5', 'elcm6',\n 'elcm7', 'elcm8', 'elcm9', 'elcm10', 'elcm11', 'elcm12',\n 'elcm13', 'elcm14']\n end_of_year_models = ['generate_indicators']\n\n # Simulate\n orca.run(['build_networks', 'generate_indicators'])\n orca.run(transition_models + price_models + developer_models + location_models + end_of_year_models,\n iter_vars = list(range(2011, forecast_year + 1)))\n\n\n # Record end time\n end_time = time.time()\n time_elapsed = end_time - start_time\n print('Simulation duration: %s minutes' % (time_elapsed/60))\n\n\nif __name__ == '__main__':\n\n # Run simulation with optional command-line arguments\n if len(sys.argv) > 1:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-y\", \"--year\", type=int, help=\"forecast year to simulate to\")\n parser.add_argument(\"-s\", \"--seed\", type=float, help=\"random seed value\")\n args = parser.parse_args()\n\n forecast_year = args.year if args.year else 2035\n random_seed = int(args.seed) if args.seed else False\n run(forecast_year, random_seed)\n\n else:\n run()\n", "id": "344986", "language": "Python", "matching_score": 0.9103823304176331, "max_stars_count": 2, "path": "bayarea/simulate.py" }, { "content": "import pandana as pdna\nfrom urbansim.utils import misc\nimport orca\nfrom bayarea import datasources\nfrom bayarea import variables\nfrom bayarea import models\n\norca.run(['initialize_network_beam'])\norca.run(['network_aggregations_beam'])\n", "id": "2068941", "language": "Python", "matching_score": 0.415164053440094, "max_stars_count": 2, "path": "bayarea/access_testing.py" }, { "content": "import pandas as pd\nimport os\n\ndef find_latest_beam_iteration(beam_output_dir):\n iter_dirs = [os.path.join(root, dir) for root, dirs, files in os.walk(beam_output_dir) for dir in dirs if\n dir == \"ITERS\"]\n if not iter_dirs:\n return None, None\n last_iters_dir = max(iter_dirs, key=os.path.getmtime)\n all_iteration_dir = [it for it in os.listdir(last_iters_dir)]\n if not all_iteration_dir:\n return None, None\n it_prefix = \"it.\"\n max_it_num = max(dir_name[len(it_prefix):] for dir_name in all_iteration_dir)\n return os.path.join(last_iters_dir, it_prefix + str(max_it_num)), max_it_num\n\n\ndef find_produced_skims(beam_output_dir):\n iteration_dir, it_num = find_latest_beam_iteration(beam_output_dir)\n if iteration_dir is None:\n return None\n skims_path = os.path.join(iteration_dir, f\"{it_num}.activitySimODSkims_current.csv.gz\")\n if os.path.exists(skims_path):\n return skims_path\n else:\n return None\n\n\ndef merge_current_skims(all_skims_path, previous_skims_path, beam_output_dir):\n current_skims_path = find_produced_skims(beam_output_dir)\n if (current_skims_path is None) | (previous_skims_path == current_skims_path):\n # this means beam has not produced the skims\n return previous_skims_path\n\n schema = {\n \"origin\": str,\n \"destination\": str,\n \"DEBUG_TEXT\": str,\n }\n index_columns = ['timePeriod', 'pathType', 'origin', 'destination']\n\n all_skims = pd.read_csv(all_skims_path, dtype=schema, index_col=index_columns)\n cur_skims = pd.read_csv(current_skims_path, dtype=schema, index_col=index_columns)\n all_skims.loc[cur_skims.index.intersection(all_skims.index)] = cur_skims\n all_skims = pd.concat([all_skims, cur_skims.loc[cur_skims.index.difference(all_skims.index)]])\n all_skims = all_skims.reset_index()\n all_skims.to_csv(all_skims_path, index=False)\n return current_skims_path\n", "id": "8309747", "language": "Python", "matching_score": 0.9153603911399841, "max_stars_count": 3, "path": "pilates/beam/postprocessor.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport numpy as np\nimport pandas as pd\nimport openmatrix as omx\n\n# input files, SF county is zones 1 to 190, output files\ndata_file = 'mtc_asim.h5'\nskims_file = 'skims.omx'\n\n\ndef create_subset(data_file_out, skims_file_out, maxZone, households_sample_size=0):\n\n # process all data tables\n print 'skims/accessibility'\n df = pd.read_hdf(data_file, 'skims/accessibility')\n df = df[df.index <= maxZone]\n df.to_hdf(data_file_out, 'skims/accessibility')\n\n print 'land_use/taz_data'\n df = pd.read_hdf(data_file, 'land_use/taz_data')\n df = df[df.index <= maxZone]\n df.to_hdf(data_file_out, 'land_use/taz_data')\n\n print 'households'\n df = pd.read_hdf(data_file, 'households')\n df = df[df.TAZ <= maxZone]\n if households_sample_size:\n df = df.take(np.random.choice(len(df), size=households_sample_size, replace=False))\n df.to_hdf(data_file_out, 'households')\n\n print 'persons'\n per = pd.read_hdf(data_file, 'persons')\n per = per[per.household_id.isin(df.index)]\n per.to_hdf(data_file_out, 'persons')\n\n # process all skims\n skims = omx.openFile(skims_file)\n skims_out = omx.openFile(skims_file_out, 'a')\n\n skimsToProcess = skims.listMatrices()\n for skimName in skimsToProcess:\n print skimName\n skims_out[skimName] = skims[skimName][0:maxZone, 0:maxZone]\n skims_out[skimName].attrs.TITLE = '' # remove funny character for OMX viewer\n\n\ncreate_subset(data_file_out='mtc_asim_sf.h5',\n skims_file_out='skims_sf.omx',\n maxZone=190\n )\n\ncreate_subset(data_file_out='mtc_asim_sf_test.h5',\n skims_file_out='skims_sf_test.omx',\n maxZone=25,\n households_sample_size=5000\n )\n", "id": "1932212", "language": "Python", "matching_score": 2.0391435623168945, "max_stars_count": 0, "path": "activitysim/scripts/create_sf_example.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport households\nimport persons\nimport landuse\nimport skims\nimport accessibility\nimport tours\nimport size_terms\nimport trips\n", "id": "5882524", "language": "Python", "matching_score": 1.508851170539856, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/__init__.py" }, { "content": "import households\nimport persons\nimport landuse\n", "id": "4135952", "language": "Python", "matching_score": 0.7091006636619568, "max_stars_count": 0, "path": "activitysim/example/extensions/__init__.py" }, { "content": "import numpy as np\nimport pandas as pd\n\nimport orca\n\nfrom activitysim.core.util import other_than, reindex\n\n\[email protected](\"persons\")\ndef age_16_to_19(persons):\n c = persons.to_frame([\"age\"]).eval(\"16 <= age <= 19\")\n return c\n\n\[email protected](\"persons\")\ndef age_16_p(persons):\n return persons.to_frame([\"age\"]).eval(\"16 <= age\")\n\n\[email protected](\"persons\")\ndef adult(persons):\n return persons.to_frame([\"age\"]).eval(\"18 <= age\")\n\n\n# FIXME - these are my \"placeholder\" for joint trip generation\n# number of joint shopping tours\[email protected](\"persons\")\ndef num_shop_j(persons):\n return pd.Series(0, persons.index)\n\n\n# FIXME - these are my \"placeholder\" for joint trip generation\n# number of joint shopping tours\[email protected](\"persons\")\ndef num_main_j(persons):\n return pd.Series(0, persons.index)\n\n\n# FIXME - these are my \"placeholder\" for joint trip generation\n# number of joint shopping tours\[email protected](\"persons\")\ndef num_eat_j(persons):\n return pd.Series(0, persons.index)\n\n\n# FIXME - these are my \"placeholder\" for joint trip generation\n# number of joint shopping tours\[email protected](\"persons\")\ndef num_visi_j(persons):\n return pd.Series(0, persons.index)\n\n\n# FIXME - these are my \"placeholder\" for joint trip generation\n# number of joint shopping tours\[email protected](\"persons\")\ndef num_disc_j(persons):\n return pd.Series(0, persons.index)\n\n\[email protected](\"persons\")\ndef num_joint_tours(persons):\n return persons.num_shop_j + persons.num_main_j + persons.num_eat_j +\\\n persons.num_visi_j + persons.num_disc_j\n\n\[email protected](\"persons\")\ndef male(persons):\n return persons.sex == 1\n\n\[email protected](\"persons\")\ndef female(persons):\n return persons.sex == 2\n\n\n# this is an idiom to grab the person of the specified type and check to see if\n# there is 1 or more of that kind of person in each household\ndef presence_of(ptype, persons):\n bools = persons.ptype_cat == ptype\n return other_than(persons.household_id, bools)\n\n\[email protected]('persons')\ndef has_non_worker(persons):\n return presence_of(\"nonwork\", persons)\n\n\[email protected]('persons')\ndef has_retiree(persons):\n return presence_of(\"retired\", persons)\n\n\[email protected]('persons')\ndef has_preschool_kid(persons):\n return presence_of(\"preschool\", persons)\n\n\[email protected]('persons')\ndef has_driving_kid(persons):\n return presence_of(\"driving\", persons)\n\n\[email protected]('persons')\ndef has_school_kid(persons):\n return presence_of(\"school\", persons)\n\n\[email protected]('persons')\ndef has_full_time(persons):\n return presence_of(\"full\", persons)\n\n\[email protected]('persons')\ndef has_part_time(persons):\n return presence_of(\"part\", persons)\n\n\[email protected]('persons')\ndef has_university(persons):\n return presence_of(\"university\", persons)\n\n\n# convert employment categories to string descriptors\[email protected](\"persons\")\ndef employed_cat(persons, settings):\n return persons.pemploy.map(settings[\"employment_map\"])\n\n\n# convert student categories to string descriptors\[email protected](\"persons\")\ndef student_cat(persons, settings):\n return persons.pstudent.map(settings[\"student_map\"])\n\n\n# convert person type categories to string descriptors\[email protected](\"persons\")\ndef ptype_cat(persons, settings):\n return persons.ptype.map(settings[\"person_type_map\"])\n\n\n# borrowing these definitions from the original code\[email protected](\"persons\")\ndef student_is_employed(persons):\n return (persons.ptype_cat.isin(['university', 'driving']) &\n persons.employed_cat.isin(['full', 'part']))\n\n\[email protected](\"persons\")\ndef nonstudent_to_school(persons):\n return (persons.ptype_cat.isin(['full', 'part', 'nonwork', 'retired']) &\n persons.student_cat.isin(['grade_or_high', 'college']))\n\n\[email protected](\"persons\")\ndef is_worker(persons):\n return persons.employed_cat.isin(['full', 'part'])\n\n\[email protected](\"persons\")\ndef is_student(persons):\n return persons.student_cat.isin(['grade_or_high', 'college'])\n\n\[email protected](\"persons\")\ndef is_gradeschool(persons, settings):\n return (persons.student_cat == \"grade_or_high\") & \\\n (persons.age <= settings['grade_school_max_age'])\n\n\[email protected](\"persons\")\ndef is_highschool(persons, settings):\n return (persons.student_cat == \"grade_or_high\") & \\\n (persons.age > settings['grade_school_max_age'])\n\n\[email protected](\"persons\")\ndef is_university(persons):\n return persons.student_cat == \"university\"\n\n\[email protected](\"persons\")\ndef home_taz(households, persons):\n return reindex(households.home_taz, persons.household_id)\n\n\n# FIXME now totally sure what this is but it's used in non mandatory tour\n# FIXME generation and probably has to do with remaining unscheduled time\[email protected]('persons')\ndef max_window(persons):\n return pd.Series(0, persons.index)\n", "id": "2213606", "language": "Python", "matching_score": 2.6625802516937256, "max_stars_count": 0, "path": "activitysim/example/extensions/persons.py" }, { "content": "import numpy as np\nimport pandas as pd\n\nimport orca\n\nfrom activitysim.core.util import reindex\n\n\[email protected](\"households\")\ndef income_in_thousands(households):\n return households.income / 1000\n\n\[email protected](\"households\")\ndef income_segment(households):\n return pd.cut(households.income_in_thousands,\n bins=[-np.inf, 30, 60, 100, np.inf],\n labels=[1, 2, 3, 4]).astype(int)\n\n\[email protected](\"households\")\ndef non_workers(households, persons):\n return persons.household_id.value_counts() - households.workers\n\n\[email protected](\"households\")\ndef drivers(households, persons):\n # we assume that everyone 16 and older is a potential driver\n return persons.local.query(\"16 <= age\").\\\n groupby(\"household_id\").size().\\\n reindex(households.index).fillna(0)\n\n\[email protected](\"households\")\ndef num_young_children(households, persons):\n return persons.local.query(\"age <= 4\").\\\n groupby(\"household_id\").size().\\\n reindex(households.index).fillna(0)\n\n\[email protected](\"households\")\ndef num_children(households, persons):\n return persons.local.query(\"5 <= age <= 15\").\\\n groupby(\"household_id\").size().\\\n reindex(households.index).fillna(0)\n\n\[email protected](\"households\")\ndef num_adolescents(households, persons):\n return persons.local.query(\"16 <= age <= 17\").\\\n groupby(\"household_id\").size().\\\n reindex(households.index).fillna(0)\n\n\[email protected](\"households\")\ndef num_college_age(households, persons):\n return persons.local.query(\"18 <= age <= 24\").\\\n groupby(\"household_id\").size().\\\n reindex(households.index).fillna(0)\n\n\[email protected](\"households\")\ndef num_young_adults(households, persons):\n return persons.local.query(\"25 <= age <= 34\").\\\n groupby(\"household_id\").size().\\\n reindex(households.index).fillna(0)\n\n\n# just a rename / alias\[email protected](\"households\")\ndef home_taz(households):\n return households.TAZ\n\n\n# map household type ids to strings\[email protected](\"households\")\ndef household_type(households, settings):\n return households.HHT.map(settings[\"household_type_map\"])\n\n\[email protected](\"households\")\ndef non_family(households):\n return households.household_type.isin([\"nonfamily_male_alone\",\n \"nonfamily_male_notalone\",\n \"nonfamily_female_alone\",\n \"nonfamily_female_notalone\"])\n\n\n# can't just invert these unfortunately because there's a null household type\[email protected](\"households\")\ndef family(households):\n return households.household_type.isin([\"family_married\",\n \"family_male\",\n \"family_female\"])\n\n\n# FIXME - not sure why we would need this since it is added by auto_ownership model\n# @orca.column('households')\n# def auto_ownership(households):\n# return pd.Series(0, households.index)\n\n\[email protected]('households')\ndef hhsize(households):\n return households.PERSONS\n\n\[email protected]('households')\ndef home_is_urban(households, land_use, settings):\n s = reindex(land_use.area_type, households.home_taz)\n return s < settings['urban_threshold']\n\n\[email protected]('households')\ndef home_is_rural(households, land_use, settings):\n s = reindex(land_use.area_type, households.home_taz)\n return s > settings['rural_threshold']\n", "id": "11876610", "language": "Python", "matching_score": 1.550262451171875, "max_stars_count": 0, "path": "activitysim/example/extensions/households.py" }, { "content": "import orca\nimport pandas as pd\nimport os\nimport asim_utils\nimport asim_simulate\nimport asim_misc\nimport skim as askim\nimport tracing\nimport openmatrix as omx\n\n\n################################\n# from asim.abm.tables.landuse #\n################################\n\[email protected]()\ndef land_use(asim_store):\n\n df = asim_store[\"land_use/taz_data\"]\n\n print(\"loaded land_use %s\" % (df.shape,))\n\n # replace table function with dataframe\n orca.add_table('land_use', df)\n\n return df\n\n\n###################################\n# from asim.abm.tables.size_terms #\n###################################\n\[email protected]()\ndef size_terms(configs_dir):\n f = os.path.join(configs_dir, 'destination_choice_size_terms.csv')\n return pd.read_csv(f, index_col='segment')\n\n\[email protected]()\ndef destination_size_terms(land_use, size_terms):\n land_use = land_use.to_frame()\n size_terms = size_terms.to_frame()\n df = pd.DataFrame({key: asim_utils.size_term(\n land_use, row) for key, row in size_terms.iterrows()},\n index=land_use.index)\n df.index.name = \"TAZ\"\n return df\n\n\n##############################\n# from asim.abm.tables.skims #\n##############################\n\[email protected](cache=True)\ndef omx_file(data_dir, asim_settings):\n print(\"opening omx file\")\n\n fname = os.path.join(data_dir, asim_settings[\"skims_file\"])\n file = omx.open_file(fname)\n asim_utils.close_on_exit(file, fname)\n\n return file\n\n\[email protected](cache=True)\ndef skim_dict(omx_file, cache_skim_key_values):\n\n print(\"skims injectable loading skims\")\n\n skim_dict = askim.SkimDict()\n skim_dict.offset_mapper.set_offset_int(-1)\n\n skims_in_omx = omx_file.listMatrices()\n for skim_name in skims_in_omx:\n key, sep, key2 = skim_name.partition('__')\n skim_data = omx_file[skim_name]\n if not sep:\n # no separator - this is a simple 2d skim - we load them all\n skim_dict.set(key, skim_data)\n else:\n # there may be more time periods in the skim than\n # are used by the model. cache_skim_key_values is a list of\n # time periods (frem settings) that are used\n # FIXME - assumes that the only types of key2 are time_periods\n if key2 in cache_skim_key_values:\n skim_dict.set((key, key2), skim_data)\n\n return skim_dict\n\n\n###################################\n# from asim.abm.tables.households #\n###################################\n\[email protected]()\ndef asim_households(asim_store, households_sample_size, trace_hh_id):\n\n df_full = asim_store[\"households\"]\n\n # if we are tracing hh exclusively\n if trace_hh_id and households_sample_size == 1:\n\n # df contains only trace_hh (or empty if not in full store)\n df = tracing.slice_ids(df_full, trace_hh_id)\n\n # if we need sample a subset of full store\n elif households_sample_size > 0 and \\\n len(df_full.index) > households_sample_size:\n\n # take the requested random sample\n df = asim_simulate.random_rows(df_full, households_sample_size)\n\n # if tracing and we missed trace_hh in sample, but it is in full store\n if trace_hh_id and trace_hh_id not in df.index and \\\n trace_hh_id in df_full.index:\n # replace first hh in sample with trace_hh\n print(\n \"replacing household %s with %s in household sample\" %\n (df.index[0], trace_hh_id))\n df_hh = tracing.slice_ids(df_full, trace_hh_id)\n df = pd.concat([df_hh, df[1:]])\n\n else:\n df = df_full\n\n print(\"loaded households %s\" % (df.shape,))\n\n # replace table function with dataframe\n orca.add_table('asim_households', df)\n\n asim_utils.get_rn_generator().add_channel(df, 'asim_households')\n\n if trace_hh_id:\n tracing.register_traceable_table('asim_households', df)\n tracing.trace_df(df, \"asim_households\", warn_if_empty=True)\n\n return df\n\n\n################################\n# from asim.abm.tables.persons #\n################################\n\n# this assigns a chunk_id to each household so we can iterate\n# over persons by whole households\[email protected](\"asim_households\", cache=True)\ndef chunk_id(asim_households):\n\n # FIXME - pathological knowledge of name of chunk_id column\n # used by hh_chunked_choosers\n\n chunk_ids = pd.Series(range(len(asim_households)), asim_households.index)\n return chunk_ids\n\n\[email protected]()\ndef asim_persons(asim_store, households_sample_size, asim_households,\n trace_hh_id):\n\n df = asim_store[\"persons\"]\n\n if households_sample_size > 0:\n # keep all persons in the sampled households\n df = df[df.household_id.isin(asim_households.index)]\n\n print(\"loaded asim asim_persons %s\" % (df.shape,))\n\n # replace table function with dataframe\n orca.add_table('asim_persons', df)\n\n asim_utils.get_rn_generator().add_channel(df, 'asim_persons')\n\n if trace_hh_id:\n tracing.register_traceable_table('asim_persons', df)\n tracing.trace_df(df, \"asim_persons\", warn_if_empty=True)\n\n return df\n\n\n# another common merge for persons\[email protected]()\ndef asim_persons_merged(asim_persons, asim_households, land_use,\n accessibility):\n return orca.merge_tables(asim_persons.name, tables=[\n asim_persons, asim_households, land_use, accessibility])\n\n\n# this is the placeholder for all the columns to update after the\n# workplace location choice model\[email protected]()\ndef persons_workplace(asim_persons):\n return pd.DataFrame(index=asim_persons.index)\n\n\n# this use the distance skims to compute the raw distance to work from home\[email protected](\"persons_workplace\")\ndef distance_to_work(asim_persons, skim_dict):\n distance_skim = skim_dict.get('DIST')\n return pd.Series(distance_skim.get(asim_persons.home_taz,\n asim_persons.workplace_taz),\n index=asim_persons.index)\n\n\n# this uses the free flow travel time in both directions\n# MTC TM1 was MD and MD since term is free flow roundtrip_auto_time_to_work\[email protected](\"persons_workplace\")\ndef roundtrip_auto_time_to_work(asim_persons, skim_dict):\n sovmd_skim = skim_dict.get(('SOV_TIME', 'MD'))\n return pd.Series(sovmd_skim.get(asim_persons.home_taz,\n asim_persons.workplace_taz) +\n sovmd_skim.get(asim_persons.workplace_taz,\n asim_persons.home_taz),\n index=asim_persons.index)\n\n\[email protected]('persons_workplace')\ndef workplace_in_cbd(asim_persons, land_use, asim_settings):\n s = asim_utils.reindex(land_use.area_type, asim_persons.workplace_taz)\n return s < asim_settings['cbd_threshold']\n", "id": "4641569", "language": "Python", "matching_score": 4.800474166870117, "max_stars_count": 0, "path": "bayarea_urbansim/baus/asim_datasources.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport numpy as np\nimport orca\nimport pandas as pd\n\nfrom activitysim.core import pipeline\n\nfrom activitysim.core import tracing\nfrom activitysim.core.util import other_than, reindex\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef persons(store, households_sample_size, households, trace_hh_id):\n\n df = store[\"persons\"]\n\n if households_sample_size > 0:\n # keep all persons in the sampled households\n df = df[df.household_id.isin(households.index)]\n\n logger.info(\"loaded persons %s\" % (df.shape,))\n\n # replace table function with dataframe\n orca.add_table('persons', df)\n\n pipeline.get_rn_generator().add_channel(df, 'persons')\n\n if trace_hh_id:\n tracing.register_traceable_table('persons', df)\n tracing.trace_df(df, \"persons\", warn_if_empty=True)\n\n return df\n\n\n# another common merge for persons\[email protected]()\ndef persons_merged(persons, households, land_use, accessibility):\n return orca.merge_tables(persons.name, tables=[\n persons, households, land_use, accessibility])\n\n\n# this is the placeholder for all the columns to update after the\n# non-mandatory tour frequency model\[email protected]()\ndef persons_nmtf(persons):\n return pd.DataFrame(index=persons.index)\n\n\[email protected](\"persons_nmtf\")\ndef num_escort_tours(persons, non_mandatory_tours):\n nmt = non_mandatory_tours.to_frame()\n return nmt[nmt.tour_type == \"escort\"].groupby(\"person_id\").size()\\\n .reindex(persons.index).fillna(0)\n\n\[email protected](\"persons_nmtf\")\ndef num_non_escort_tours(persons, non_mandatory_tours):\n nmt = non_mandatory_tours.to_frame()\n return nmt[nmt.tour_type != \"escort\"].groupby(\"person_id\").size()\\\n .reindex(persons.index).fillna(0)\n\n\n# this is the placeholder for all the columns to update after the\n# mandatory tour frequency model\[email protected]()\ndef persons_mtf(persons):\n return pd.DataFrame(index=persons.index)\n\n\n# count the number of mandatory tours for each person\[email protected](\"persons_mtf\")\ndef num_mand(persons):\n\n s = persons.mandatory_tour_frequency.map({\n \"work1\": 1,\n \"work2\": 2,\n \"school1\": 1,\n \"school2\": 2,\n \"work_and_school\": 2\n }, na_action='ignore')\n return s.fillna(0)\n\n\[email protected](\"persons_mtf\")\ndef work_and_school_and_worker(persons):\n\n s = (persons.mandatory_tour_frequency == \"work_and_school\").\\\n reindex(persons.index).fillna(False)\n\n return s & persons.is_worker\n\n\[email protected](\"persons_mtf\")\ndef work_and_school_and_student(persons):\n\n s = (persons.mandatory_tour_frequency == \"work_and_school\").\\\n reindex(persons.index).fillna(False)\n\n return s & persons.is_student\n\n\n# this is the placeholder for all the columns to update after the\n# workplace location choice model\[email protected]()\ndef persons_workplace(persons):\n return pd.DataFrame(index=persons.index)\n\n\n# this use the distance skims to compute the raw distance to work from home\[email protected](\"persons_workplace\")\ndef distance_to_work(persons, skim_dict):\n distance_skim = skim_dict.get('DIST')\n return pd.Series(distance_skim.get(persons.home_taz,\n persons.workplace_taz),\n index=persons.index)\n\n\n# this uses the free flow travel time in both directions\n# MTC TM1 was MD and MD since term is free flow roundtrip_auto_time_to_work\[email protected](\"persons_workplace\")\ndef roundtrip_auto_time_to_work(persons, skim_dict):\n sovmd_skim = skim_dict.get(('SOV_TIME', 'MD'))\n return pd.Series(sovmd_skim.get(persons.home_taz,\n persons.workplace_taz) +\n sovmd_skim.get(persons.workplace_taz,\n persons.home_taz),\n index=persons.index)\n\n\[email protected]('persons_workplace')\ndef workplace_in_cbd(persons, land_use, settings):\n s = reindex(land_use.area_type, persons.workplace_taz)\n return s < settings['cbd_threshold']\n\n\n# this is the placeholder for all the columns to update after the\n# school location choice model\[email protected]()\ndef persons_school(persons):\n return pd.DataFrame(index=persons.index)\n\n\n# same deal as distance_to_work but to school\[email protected](\"persons_school\")\ndef distance_to_school(persons, skim_dict):\n logger.debug(\"eval computed column persons_school.roundtrip_auto_time_to_school\")\n distance_skim = skim_dict.get('DIST')\n return pd.Series(distance_skim.get(persons.home_taz,\n persons.school_taz),\n index=persons.index)\n\n\n# this uses the free flow travel time in both directions\n# MTC TM1 was MD and MD since term is free flow roundtrip_auto_time_to_school\[email protected](\"persons_school\")\ndef roundtrip_auto_time_to_school(persons, skim_dict):\n sovmd_skim = skim_dict.get(('SOV_TIME', 'MD'))\n return pd.Series(sovmd_skim.get(persons.home_taz,\n persons.school_taz) +\n sovmd_skim.get(persons.school_taz,\n persons.home_taz),\n index=persons.index)\n\n\n# this is an idiom to grab the person of the specified type and check to see if\n# there is 1 or more of that kind of person in each household\ndef presence_of(ptype, persons, at_home=False):\n if at_home:\n # if at_home, they need to be of given type AND at home\n bools = (persons.ptype_cat == ptype) & (persons.cdap_activity == \"H\")\n else:\n bools = persons.ptype_cat == ptype\n\n return other_than(persons.household_id, bools)\n\n\n# this is the placeholder for all the columns to update after the\n# workplace location choice model\[email protected]()\ndef persons_cdap(persons):\n return pd.DataFrame(index=persons.index)\n\n\[email protected](\"persons_cdap\")\ndef under16_not_at_school(persons):\n return (persons.ptype_cat.isin([\"school\", \"preschool\"]) &\n persons.cdap_activity.isin([\"N\", \"H\"]))\n\n\[email protected]('persons_cdap')\ndef has_preschool_kid_at_home(persons):\n return presence_of(\"preschool\", persons, at_home=True)\n\n\[email protected]('persons_cdap')\ndef has_school_kid_at_home(persons):\n return presence_of(\"school\", persons, at_home=True)\n", "id": "10221830", "language": "Python", "matching_score": 4.768115520477295, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/persons.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport numpy as np\nimport orca\nimport pandas as pd\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef households(store, households_sample_size, trace_hh_id):\n\n df_full = store[\"households\"]\n\n # if we are tracing hh exclusively\n if trace_hh_id and households_sample_size == 1:\n\n # df contains only trace_hh (or empty if not in full store)\n df = tracing.slice_ids(df_full, trace_hh_id)\n\n # if we need sample a subset of full store\n elif households_sample_size > 0 and len(df_full.index) > households_sample_size:\n\n # take the requested random sample\n df = asim.random_rows(df_full, households_sample_size)\n\n # if tracing and we missed trace_hh in sample, but it is in full store\n if trace_hh_id and trace_hh_id not in df.index and trace_hh_id in df_full.index:\n # replace first hh in sample with trace_hh\n logger.debug(\"replacing household %s with %s in household sample\" %\n (df.index[0], trace_hh_id))\n df_hh = tracing.slice_ids(df_full, trace_hh_id)\n df = pd.concat([df_hh, df[1:]])\n\n else:\n df = df_full\n\n logger.info(\"loaded households %s\" % (df.shape,))\n\n # replace table function with dataframe\n orca.add_table('households', df)\n\n pipeline.get_rn_generator().add_channel(df, 'households')\n\n if trace_hh_id:\n tracing.register_traceable_table('households', df)\n tracing.trace_df(df, \"households\", warn_if_empty=True)\n\n return df\n\n\n# this assigns a chunk_id to each household so we can iterate over persons by whole households\[email protected](\"households\", cache=True)\ndef chunk_id(households):\n\n # FIXME - pathological knowledge of name of chunk_id column used by hh_chunked_choosers\n\n chunk_ids = pd.Series(range(len(households)), households.index)\n return chunk_ids\n\n\[email protected]('households')\ndef work_tour_auto_time_savings(households):\n # FIXME - fix this variable from auto ownership model\n return pd.Series(0, households.index)\n\n\n# this is the placeholder for all the columns to update after the\n# workplace location choice model\[email protected]()\ndef households_cdap(households):\n return pd.DataFrame(index=households.index)\n\n\[email protected](\"households_cdap\")\ndef num_under16_not_at_school(persons, households):\n return persons.under16_not_at_school.groupby(persons.household_id).size().\\\n reindex(households.index).fillna(0)\n\n\n# this is a placeholder table for columns that get computed after the\n# auto ownership model\[email protected]()\ndef households_autoown(households):\n return pd.DataFrame(index=households.index)\n\n\[email protected]('households_autoown')\ndef no_cars(households):\n return (households.auto_ownership == 0)\n\n\[email protected]('households_autoown')\ndef car_sufficiency(households, persons):\n return households.auto_ownership - persons.household_id.value_counts()\n\n\n# this is a common merge so might as well define it once here and use it\[email protected]()\ndef households_merged(households, land_use, accessibility):\n return orca.merge_tables(households.name, tables=[\n households, land_use, accessibility])\n\n\norca.broadcast('households', 'persons', cast_index=True, onto_on='household_id')\n", "id": "11047862", "language": "Python", "matching_score": 2.361036539077759, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/households.py" }, { "content": "import os\nimport yaml\nimport orca\nimport asim_utils\nimport asim_datasources\nimport tracing\nfrom interaction_sample import interaction_sample\nfrom interaction_sample_simulate import interaction_sample_simulate\nfrom urbansim.utils import misc\nimport pandas as pd\n\n\nDUMP = False\n\n############################################################\n# #\n# (1) ACTIVITYSIM ORCA STEPS FOR DATA MODEL INITIALIZATION #\n# #\n############################################################\n\n\[email protected]('asim_settings', cache=True)\ndef asim_settings():\n \"\"\"\n This step loads the ActivitySim settings, which are kept\n separate for clarity. Ultimately we might want to merge\n them with ual_settings or the main settings config\n\n Data expectations\n -----------------\n - 'configs' folder contains a file called 'ual_settings.yaml'\n - 'os.path' is expected to provide the root level of the urbansim\n instance, so be sure to either (a) launch the python process\n from that directory, or (b) use os.chdir to\n switch to that directory before running any model steps\n \"\"\"\n with open(os.path.join(misc.configs_dir(), 'asim_settings.yaml')) as f:\n return yaml.load(f)\n\n\[email protected]()\ndef workplace_location_sample_spec(configs_dir):\n return asim_utils.read_model_spec(\n configs_dir, 'workplace_location_sample.csv')\n\n\[email protected]()\ndef workplace_location_settings(configs_dir):\n return asim_utils.read_model_settings(\n configs_dir, 'workplace_location.yaml')\n\n\[email protected]()\ndef workplace_location_spec(configs_dir):\n return asim_utils.read_model_spec(\n configs_dir, 'workplace_location.csv')\n\n\n###################################################\n# #\n# (2) ACTIVITYSIM ORCA STEPS FOR SIMULATION LOGIC #\n# #\n###################################################\n\n\[email protected]()\ndef workplace_location_sample(asim_persons_merged,\n workplace_location_sample_spec,\n workplace_location_settings,\n skim_dict,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n \"\"\"\n build a table of workers * all zones in order\n to select a sample of alternative work locations.\n\n PERID, dest_TAZ, rand, pick_count\n 23750, 14, 0.565502716034, 4\n 23750, 16, 0.711135838871, 6\n ...\n 23751, 12, 0.408038878552, 1\n 23751, 14, 0.972732479292, 2\n \"\"\"\n\n trace_label = 'workplace_location_sample'\n\n choosers = asim_persons_merged.to_frame()\n alternatives = destination_size_terms.to_frame()\n\n constants = asim_utils.get_model_constants(workplace_location_settings)\n\n sample_size = workplace_location_settings[\"SAMPLE_SIZE\"]\n alt_col_name = workplace_location_settings[\"ALT_COL_NAME\"]\n\n print(\"Running workplace_location_sample with %d persons\" % len(choosers))\n\n # create wrapper with keys for this lookup - in this case there is a TAZ\n # in the choosers and a TAZ in the alternatives which get merged during\n # interaction the skims will be available under the name \"skims\" for any\n # @ expressions\n skims = skim_dict.wrap(\"TAZ\", \"TAZ_r\")\n\n locals_d = {\n 'skims': skims\n }\n if constants is not None:\n locals_d.update(constants)\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = workplace_location_settings['SIMULATE_CHOOSER_COLUMNS']\n choosers = choosers[chooser_columns]\n\n choices = interaction_sample(\n choosers,\n alternatives,\n sample_size=sample_size,\n alt_col_name=alt_col_name,\n spec=workplace_location_sample_spec,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_label)\n\n orca.add_table('workplace_location_sample', choices)\n\n\[email protected]()\ndef workplace_location_logsums(asim_persons_merged,\n land_use,\n skim_dict, skim_stack,\n workplace_location_sample,\n configs_dir,\n chunk_size,\n trace_hh_id):\n \"\"\"\n add logsum column to existing workplace_location_sample able\n\n logsum is calculated by running the mode_choice model for each\n sample (person, dest_taz) pair in workplace_location_sample,\n and computing the logsum of all the utilities\n\n <added>\n PERID, dest_TAZ, rand, pick_count, logsum\n 23750, 14, 0.565502716034, 4 1.85659498857\n 23750, 16, 0.711135838871, 6 1.92315598631\n ...\n 23751, 12, 0.408038878552, 1 2.40612135416\n 23751, 14, 0.972732479292, 2 1.44009018355\n\n \"\"\"\n\n trace_label = 'workplace_location_logsums'\n\n logsums_spec = asim_utils.mode_choice_logsums_spec(configs_dir, 'work')\n\n workplace_location_settings = asim_utils.read_model_settings(\n configs_dir, 'workplace_location.yaml')\n\n alt_col_name = workplace_location_settings[\"ALT_COL_NAME\"]\n\n # FIXME - just using settings from tour_mode_choice\n logsum_settings = asim_utils.read_model_settings(\n configs_dir, 'tour_mode_choice.yaml')\n\n asim_persons_merged = asim_persons_merged.to_frame()\n workplace_location_sample = workplace_location_sample.to_frame()\n\n print(\"Running workplace_location_sample with %s rows\" % len(\n workplace_location_sample))\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = workplace_location_settings['LOGSUM_CHOOSER_COLUMNS']\n asim_persons_merged = asim_persons_merged[chooser_columns]\n\n choosers = pd.merge(workplace_location_sample,\n asim_persons_merged,\n left_index=True,\n right_index=True,\n how=\"left\")\n\n choosers['in_period'] = asim_utils.time_period_label(\n workplace_location_settings['IN_PERIOD'])\n choosers['out_period'] = asim_utils.time_period_label(\n workplace_location_settings['OUT_PERIOD'])\n\n # FIXME - should do this in expression file?\n choosers['dest_topology'] = asim_utils.reindex(\n land_use.TOPOLOGY, choosers[alt_col_name])\n choosers['dest_density_index'] = asim_utils.reindex(\n land_use.density_index, choosers[alt_col_name])\n\n tracing.dump_df(\n DUMP, asim_persons_merged, trace_label, 'asim_persons_merged')\n tracing.dump_df(\n DUMP, choosers, trace_label, 'choosers')\n\n logsums = asim_utils.compute_logsums(\n choosers, logsums_spec, logsum_settings, skim_dict, skim_stack,\n alt_col_name, chunk_size, trace_hh_id, trace_label)\n\n # \"add_column series should have an index matching the table to which\n # it is being added\" when the index has duplicates, however, in the\n # special case that the series index exactly matches the table index,\n # then the series value order is preserved logsums now does, since\n # workplace_location_sample was on left side of merge de-dup merge\n orca.add_column(\"workplace_location_sample\", \"mode_choice_logsum\", logsums)\n\n\[email protected]()\ndef workplace_location_simulate(asim_persons_merged,\n workplace_location_sample,\n workplace_location_spec,\n workplace_location_settings,\n skim_dict,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n \"\"\"\n Workplace location model on workplace_location_sample\n annotated with mode_choice logsum to select a work_taz\n from sample alternatives\n \"\"\"\n\n # for now I'm going to generate a workplace location for everyone -\n # presumably it will not get used in downstream models for everyone -\n # it should depend on CDAP and mandatory tour generation as to whether\n # it gets used\n choosers = asim_persons_merged.to_frame()\n\n alt_col_name = workplace_location_settings[\"ALT_COL_NAME\"]\n\n # alternatives are pre-sampled and annotated with logsums and pick_count\n # but we have to merge additional alt columns into alt sample list\n workplace_location_sample = workplace_location_sample.to_frame()\n destination_size_terms = destination_size_terms.to_frame()\n alternatives = \\\n pd.merge(workplace_location_sample, destination_size_terms,\n left_on=alt_col_name, right_index=True, how=\"left\")\n\n tracing.dump_df(\n DUMP, alternatives, 'workplace_location_simulate', 'alternatives')\n\n constants = asim_utils.get_model_constants(workplace_location_settings)\n\n sample_pool_size = len(destination_size_terms.index)\n\n print(\"Running workplace_location_simulate with %d persons\" % len(\n choosers))\n\n # create wrapper with keys for this lookup - in this case there is a\n # TAZ in the choosers and a TAZ in the alternatives which get merged\n # during interaction the skims will be available under the name\n # \"skims\" for any @ expressions\n skims = skim_dict.wrap(\"TAZ\", alt_col_name)\n\n locals_d = {\n 'skims': skims,\n 'sample_pool_size': float(sample_pool_size)\n }\n if constants is not None:\n locals_d.update(constants)\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = workplace_location_settings['SIMULATE_CHOOSER_COLUMNS']\n choosers = choosers[chooser_columns]\n\n tracing.dump_df(DUMP, choosers, 'workplace_location_simulate', 'choosers')\n\n choices = interaction_sample_simulate(\n choosers,\n alternatives,\n spec=workplace_location_spec,\n choice_column=alt_col_name,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_hh_id and 'workplace_location',\n trace_choice_name='workplace_location')\n\n # FIXME - no need to reindex since we didn't slice choosers\n # choices = choices.reindex(persons_merged.index)\n\n tracing.print_summary('workplace_taz', choices, describe=True)\n\n orca.add_column(\"asim_persons\", \"workplace_taz\", choices)\n\n asim_utils.add_dependent_columns(\"asim_persons\", \"persons_workplace\")\n\n if trace_hh_id:\n trace_columns = ['workplace_taz'] + orca.get_table(\n 'persons_workplace').columns\n tracing.trace_df(orca.get_table('asim_persons_merged').to_frame(),\n label=\"workplace_location\",\n columns=trace_columns,\n warn_if_empty=True)\n", "id": "6351763", "language": "Python", "matching_score": 7.265401363372803, "max_stars_count": 0, "path": "bayarea_urbansim/baus/asim_models.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport pandas as pd\nimport numpy as np\nimport orca\n\nfrom activitysim.core import tracing\nfrom activitysim.core import config\nfrom activitysim.core import pipeline\nfrom activitysim.core import simulate\n\nfrom activitysim.core.interaction_sample_simulate import interaction_sample_simulate\nfrom activitysim.core.interaction_sample import interaction_sample\n\nfrom activitysim.core.util import reindex\nfrom activitysim.core.util import left_merge_on_index_and_col\n\nfrom .util.logsums import compute_logsums\nfrom .util.logsums import time_period_label\nfrom .util.logsums import mode_choice_logsums_spec\n\n\"\"\"\nThe workplace location model predicts the zones in which various people will\nwork.\n\nfor now we generate a workplace location for everyone -\npresumably it will not get used in downstream models for everyone -\nit should depend on CDAP and mandatory tour generation as to whether\nit gets used\n\"\"\"\n\nlogger = logging.getLogger(__name__)\nDUMP = False\n\n\[email protected]()\ndef workplace_location_sample_spec(configs_dir):\n return simulate.read_model_spec(configs_dir, 'workplace_location_sample.csv')\n\n\[email protected]()\ndef workplace_location_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'workplace_location.yaml')\n\n\[email protected]()\ndef workplace_location_sample(persons_merged,\n workplace_location_sample_spec,\n workplace_location_settings,\n skim_dict,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n \"\"\"\n build a table of workers * all zones in order to select a sample of alternative work locations.\n\n PERID, dest_TAZ, rand, pick_count\n 23750, 14, 0.565502716034, 4\n 23750, 16, 0.711135838871, 6\n ...\n 23751, 12, 0.408038878552, 1\n 23751, 14, 0.972732479292, 2\n \"\"\"\n\n trace_label = 'workplace_location_sample'\n\n choosers = persons_merged.to_frame()\n alternatives = destination_size_terms.to_frame()\n\n constants = config.get_model_constants(workplace_location_settings)\n\n sample_size = workplace_location_settings[\"SAMPLE_SIZE\"]\n alt_col_name = workplace_location_settings[\"ALT_COL_NAME\"]\n\n logger.info(\"Running workplace_location_sample with %d persons\" % len(choosers))\n\n # create wrapper with keys for this lookup - in this case there is a TAZ in the choosers\n # and a TAZ in the alternatives which get merged during interaction\n # the skims will be available under the name \"skims\" for any @ expressions\n skims = skim_dict.wrap(\"TAZ\", \"TAZ_r\")\n\n locals_d = {\n 'skims': skims\n }\n if constants is not None:\n locals_d.update(constants)\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = workplace_location_settings['SIMULATE_CHOOSER_COLUMNS']\n choosers = choosers[chooser_columns]\n\n choices = interaction_sample(\n choosers,\n alternatives,\n sample_size=sample_size,\n alt_col_name=alt_col_name,\n spec=workplace_location_sample_spec,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_label)\n\n orca.add_table('workplace_location_sample', choices)\n\n\[email protected]()\ndef workplace_location_logsums(persons_merged,\n land_use,\n skim_dict, skim_stack,\n workplace_location_sample,\n configs_dir,\n chunk_size,\n trace_hh_id):\n \"\"\"\n add logsum column to existing workplace_location_sample able\n\n logsum is calculated by running the mode_choice model for each sample (person, dest_taz) pair\n in workplace_location_sample, and computing the logsum of all the utilities\n\n <added>\n PERID, dest_TAZ, rand, pick_count, logsum\n 23750, 14, 0.565502716034, 4 1.85659498857\n 23750, 16, 0.711135838871, 6 1.92315598631\n ...\n 23751, 12, 0.408038878552, 1 2.40612135416\n 23751, 14, 0.972732479292, 2 1.44009018355\n\n \"\"\"\n\n trace_label = 'workplace_location_logsums'\n\n logsums_spec = mode_choice_logsums_spec(configs_dir, 'work')\n\n workplace_location_settings = config.read_model_settings(configs_dir, 'workplace_location.yaml')\n\n alt_col_name = workplace_location_settings[\"ALT_COL_NAME\"]\n\n # FIXME - just using settings from tour_mode_choice\n logsum_settings = config.read_model_settings(configs_dir, 'tour_mode_choice.yaml')\n\n persons_merged = persons_merged.to_frame()\n workplace_location_sample = workplace_location_sample.to_frame()\n\n logger.info(\"Running workplace_location_sample with %s rows\" % len(workplace_location_sample))\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = workplace_location_settings['LOGSUM_CHOOSER_COLUMNS']\n persons_merged = persons_merged[chooser_columns]\n\n choosers = pd.merge(workplace_location_sample,\n persons_merged,\n left_index=True,\n right_index=True,\n how=\"left\")\n\n choosers['in_period'] = time_period_label(workplace_location_settings['IN_PERIOD'])\n choosers['out_period'] = time_period_label(workplace_location_settings['OUT_PERIOD'])\n\n # FIXME - should do this in expression file?\n choosers['dest_topology'] = reindex(land_use.TOPOLOGY, choosers[alt_col_name])\n choosers['dest_density_index'] = reindex(land_use.density_index, choosers[alt_col_name])\n\n tracing.dump_df(DUMP, persons_merged, trace_label, 'persons_merged')\n tracing.dump_df(DUMP, choosers, trace_label, 'choosers')\n\n logsums = compute_logsums(\n choosers, logsums_spec, logsum_settings,\n skim_dict, skim_stack, alt_col_name, chunk_size, trace_hh_id, trace_label)\n\n # \"add_column series should have an index matching the table to which it is being added\"\n # when the index has duplicates, however, in the special case that the series index exactly\n # matches the table index, then the series value order is preserved\n # logsums now does, since workplace_location_sample was on left side of merge de-dup merge\n orca.add_column(\"workplace_location_sample\", \"mode_choice_logsum\", logsums)\n\n\[email protected]()\ndef workplace_location_spec(configs_dir):\n return simulate.read_model_spec(configs_dir, 'workplace_location.csv')\n\n\[email protected]()\ndef workplace_location_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'workplace_location.yaml')\n\n\[email protected]()\ndef workplace_location_simulate(persons_merged,\n workplace_location_sample,\n workplace_location_spec,\n workplace_location_settings,\n skim_dict,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n \"\"\"\n Workplace location model on workplace_location_sample annotated with mode_choice logsum\n to select a work_taz from sample alternatives\n \"\"\"\n\n # for now I'm going to generate a workplace location for everyone -\n # presumably it will not get used in downstream models for everyone -\n # it should depend on CDAP and mandatory tour generation as to whether\n # it gets used\n choosers = persons_merged.to_frame()\n\n alt_col_name = workplace_location_settings[\"ALT_COL_NAME\"]\n\n # alternatives are pre-sampled and annotated with logsums and pick_count\n # but we have to merge additional alt columns into alt sample list\n workplace_location_sample = workplace_location_sample.to_frame()\n destination_size_terms = destination_size_terms.to_frame()\n alternatives = \\\n pd.merge(workplace_location_sample, destination_size_terms,\n left_on=alt_col_name, right_index=True, how=\"left\")\n\n tracing.dump_df(DUMP, alternatives, 'workplace_location_simulate', 'alternatives')\n\n constants = config.get_model_constants(workplace_location_settings)\n\n sample_pool_size = len(destination_size_terms.index)\n\n logger.info(\"Running workplace_location_simulate with %d persons\" % len(choosers))\n\n # create wrapper with keys for this lookup - in this case there is a TAZ in the choosers\n # and a TAZ in the alternatives which get merged during interaction\n # the skims will be available under the name \"skims\" for any @ expressions\n skims = skim_dict.wrap(\"TAZ\", alt_col_name)\n\n locals_d = {\n 'skims': skims,\n 'sample_pool_size': float(sample_pool_size)\n }\n if constants is not None:\n locals_d.update(constants)\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = workplace_location_settings['SIMULATE_CHOOSER_COLUMNS']\n choosers = choosers[chooser_columns]\n\n tracing.dump_df(DUMP, choosers, 'workplace_location_simulate', 'choosers')\n\n choices = interaction_sample_simulate(\n choosers,\n alternatives,\n spec=workplace_location_spec,\n choice_column=alt_col_name,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_hh_id and 'workplace_location',\n trace_choice_name='workplace_location')\n\n # FIXME - no need to reindex since we didn't slice choosers\n # choices = choices.reindex(persons_merged.index)\n\n tracing.print_summary('workplace_taz', choices, describe=True)\n\n orca.add_column(\"persons\", \"workplace_taz\", choices)\n\n pipeline.add_dependent_columns(\"persons\", \"persons_workplace\")\n\n if trace_hh_id:\n trace_columns = ['workplace_taz'] + orca.get_table('persons_workplace').columns\n tracing.trace_df(orca.get_table('persons_merged').to_frame(),\n label=\"workplace_location\",\n columns=trace_columns,\n warn_if_empty=True)\n", "id": "11369745", "language": "Python", "matching_score": 5.2312331199646, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/workplace_location.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport orca\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core import tracing\nfrom activitysim.core import config\nfrom activitysim.core import pipeline\nfrom activitysim.core import simulate\n\nfrom activitysim.core.interaction_sample_simulate import interaction_sample_simulate\nfrom activitysim.core.interaction_sample import interaction_sample\n\nfrom activitysim.core.util import reindex\nfrom activitysim.core.util import left_merge_on_index_and_col\n\nfrom .util.logsums import compute_logsums\nfrom .util.logsums import time_period_label\nfrom .util.logsums import mode_choice_logsums_spec\n\nfrom .mode import get_segment_and_unstack\n\n\"\"\"\nThe school location model predicts the zones in which various people will\ngo to school.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\nDUMP = False\n\n\[email protected]()\ndef school_location_sample_spec(configs_dir):\n return simulate.read_model_spec(configs_dir, 'school_location_sample.csv')\n\n\[email protected]()\ndef school_location_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'school_location.yaml')\n\n\[email protected]()\ndef school_location_sample(\n persons_merged,\n school_location_sample_spec,\n school_location_settings,\n skim_dict,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n\n \"\"\"\n build a table of persons * all zones to select a sample of alternative school locations.\n\n PERID, dest_TAZ, rand, pick_count\n 23750, 14, 0.565502716034, 4\n 23750, 16, 0.711135838871, 6\n ...\n 23751, 12, 0.408038878552, 1\n 23751, 14, 0.972732479292, 2\n \"\"\"\n\n choosers = persons_merged.to_frame()\n alternatives = destination_size_terms.to_frame()\n\n constants = config.get_model_constants(school_location_settings)\n\n sample_size = school_location_settings[\"SAMPLE_SIZE\"]\n alt_col_name = school_location_settings[\"ALT_COL_NAME\"]\n\n logger.info(\"Running school_location_simulate with %d persons\" % len(choosers))\n\n # create wrapper with keys for this lookup - in this case there is a TAZ in the choosers\n # and a TAZ in the alternatives which get merged during interaction\n # the skims will be available under the name \"skims\" for any @ expressions\n skims = skim_dict.wrap(\"TAZ\", \"TAZ_r\")\n\n locals_d = {\n 'skims': skims\n }\n if constants is not None:\n locals_d.update(constants)\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = school_location_settings['SIMULATE_CHOOSER_COLUMNS']\n choosers = choosers[chooser_columns]\n\n choices_list = []\n for school_type in ['university', 'highschool', 'gradeschool']:\n\n locals_d['segment'] = school_type\n\n choosers_segment = choosers[choosers[\"is_\" + school_type]]\n\n # FIXME - no point in considering impossible alternatives\n alternatives_segment = alternatives[alternatives[school_type] > 0]\n\n logger.info(\"school_type %s: %s persons %s alternatives\" %\n (school_type, len(choosers_segment), len(alternatives_segment)))\n\n if len(choosers_segment.index) > 0:\n\n choices = interaction_sample(\n choosers_segment,\n alternatives_segment,\n sample_size=sample_size,\n alt_col_name=alt_col_name,\n spec=school_location_sample_spec[[school_type]],\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_hh_id and 'school_location_sample.%s' % school_type)\n\n choices['school_type'] = school_type\n choices_list.append(choices)\n\n choices = pd.concat(choices_list)\n\n orca.add_table('school_location_sample', choices)\n\n\[email protected]()\ndef school_location_logsums(\n persons_merged,\n land_use,\n skim_dict, skim_stack,\n school_location_sample,\n configs_dir,\n chunk_size,\n trace_hh_id):\n \"\"\"\n add logsum column to existing school_location_sample able\n\n logsum is calculated by running the mode_choice model for each sample (person, dest_taz) pair\n in school_location_sample, and computing the logsum of all the utilities\n\n <added>\n PERID, dest_TAZ, rand, pick_count, logsum\n 23750, 14, 0.565502716034, 4 1.85659498857\n 23750, 16, 0.711135838871, 6 1.92315598631\n ...\n 23751, 12, 0.408038878552, 1 2.40612135416\n 23751, 14, 0.972732479292, 2 1.44009018355\n\n \"\"\"\n\n trace_label = 'school_location_logsums'\n\n # extract logsums_spec from omnibus_spec\n # omnibus_spec = orca.get_injectable('tour_mode_choice_spec')\n # for tour_type in ['school', 'university']:\n # logsums_spec = get_segment_and_unstack(omnibus_spec, tour_type)\n # tracing.dump_df(DUMP, logsums_spec, trace_label, 'logsums_spec_%s' % tour_type)\n\n school_location_settings = config.read_model_settings(configs_dir, 'school_location.yaml')\n\n alt_col_name = school_location_settings[\"ALT_COL_NAME\"]\n\n # FIXME - just using settings from tour_mode_choice\n logsum_settings = config.read_model_settings(configs_dir, 'tour_mode_choice.yaml')\n\n persons_merged = persons_merged.to_frame()\n school_location_sample = school_location_sample.to_frame()\n\n logger.info(\"Running school_location_sample with %s rows\" % len(school_location_sample))\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = school_location_settings['LOGSUM_CHOOSER_COLUMNS']\n persons_merged = persons_merged[chooser_columns]\n\n tracing.dump_df(DUMP, persons_merged, trace_label, 'persons_merged')\n\n logsums_list = []\n for school_type in ['university', 'highschool', 'gradeschool']:\n\n logsums_spec = mode_choice_logsums_spec(configs_dir, school_type)\n\n choosers = school_location_sample[school_location_sample['school_type'] == school_type]\n\n choosers = pd.merge(\n choosers,\n persons_merged,\n left_index=True,\n right_index=True,\n how=\"left\")\n\n choosers['in_period'] = time_period_label(school_location_settings['IN_PERIOD'])\n choosers['out_period'] = time_period_label(school_location_settings['OUT_PERIOD'])\n\n # FIXME - should do this in expression file?\n choosers['dest_topology'] = reindex(land_use.TOPOLOGY, choosers[alt_col_name])\n choosers['dest_density_index'] = reindex(land_use.density_index, choosers[alt_col_name])\n\n tracing.dump_df(DUMP, choosers, trace_label, '%s_choosers' % school_type)\n\n logsums = compute_logsums(\n choosers, logsums_spec, logsum_settings,\n skim_dict, skim_stack, alt_col_name, chunk_size,\n trace_hh_id, trace_label)\n\n logsums_list.append(logsums)\n\n logsums = pd.concat(logsums_list)\n\n # add_column series should have an index matching the table to which it is being added\n # logsums does, since school_location_sample was on left side of merge creating choosers\n orca.add_column(\"school_location_sample\", \"mode_choice_logsum\", logsums)\n\n\[email protected]()\ndef school_location_spec(configs_dir):\n return simulate.read_model_spec(configs_dir, 'school_location.csv')\n\n\[email protected]()\ndef school_location_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'school_location.yaml')\n\n\[email protected]()\ndef school_location_simulate(persons_merged,\n school_location_sample,\n school_location_spec,\n school_location_settings,\n skim_dict,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n \"\"\"\n School location model on school_location_sample annotated with mode_choice logsum\n to select a school_taz from sample alternatives\n \"\"\"\n\n choosers = persons_merged.to_frame()\n school_location_sample = school_location_sample.to_frame()\n destination_size_terms = destination_size_terms.to_frame()\n\n trace_label = 'school_location_simulate'\n alt_col_name = school_location_settings[\"ALT_COL_NAME\"]\n\n constants = config.get_model_constants(school_location_settings)\n\n # create wrapper with keys for this lookup - in this case there is a TAZ in the choosers\n # and a TAZ in the alternatives which get merged during interaction\n # the skims will be available under the name \"skims\" for any @ expressions\n skims = skim_dict.wrap(\"TAZ\", alt_col_name)\n\n locals_d = {\n 'skims': skims,\n }\n if constants is not None:\n locals_d.update(constants)\n\n # FIXME - MEMORY HACK - only include columns actually used in spec\n chooser_columns = school_location_settings['SIMULATE_CHOOSER_COLUMNS']\n choosers = choosers[chooser_columns]\n tracing.dump_df(DUMP, choosers, 'school_location_simulate', 'choosers')\n\n choices_list = []\n for school_type in ['university', 'highschool', 'gradeschool']:\n\n locals_d['segment'] = school_type\n\n choosers_segment = choosers[choosers[\"is_\" + school_type]]\n alts_segment = school_location_sample[school_location_sample['school_type'] == school_type]\n\n # alternatives are pre-sampled and annotated with logsums and pick_count\n # but we have to merge additional alt columns into alt sample list\n alts_segment = \\\n pd.merge(alts_segment, destination_size_terms,\n left_on=alt_col_name, right_index=True, how=\"left\")\n\n tracing.dump_df(DUMP, alts_segment, trace_label, '%s_alternatives' % school_type)\n\n choices = interaction_sample_simulate(\n choosers_segment,\n alts_segment,\n spec=school_location_spec[[school_type]],\n choice_column=alt_col_name,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_hh_id and 'school_location_simulate',\n trace_choice_name='school_location')\n\n choices_list.append(choices)\n\n choices = pd.concat(choices_list)\n\n # We only chose school locations for the subset of persons who go to school\n # so we backfill the empty choices with -1 to code as no school location\n choices = choices.reindex(persons_merged.index).fillna(-1)\n\n tracing.dump_df(DUMP, choices, trace_label, 'choices')\n\n tracing.print_summary('school_taz', choices, describe=True)\n\n orca.add_column(\"persons\", \"school_taz\", choices)\n\n pipeline.add_dependent_columns(\"persons\", \"persons_school\")\n\n if trace_hh_id:\n trace_columns = ['school_taz'] + orca.get_table('persons_school').columns\n tracing.trace_df(orca.get_table('persons_merged').to_frame(),\n label=\"school_location\",\n columns=trace_columns,\n warn_if_empty=True)\n", "id": "2284132", "language": "Python", "matching_score": 5.443791389465332, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/school_location.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport orca\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core.simulate import read_model_spec\nfrom activitysim.core.interaction_simulate import interaction_simulate\n\nfrom activitysim.core import tracing\nfrom activitysim.core import config\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef destination_choice_spec(configs_dir):\n return read_model_spec(configs_dir, 'destination_choice.csv')\n\n\[email protected]()\ndef destination_choice_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'destination_choice.yaml')\n\n\[email protected]()\ndef destination_choice(non_mandatory_tours_merged,\n skim_dict,\n destination_choice_spec,\n destination_choice_settings,\n destination_size_terms,\n chunk_size,\n trace_hh_id):\n\n \"\"\"\n Given the tour generation from the above, each tour needs to have a\n destination, so in this case tours are the choosers (with the associated\n person that's making the tour)\n \"\"\"\n\n # choosers are tours - in a sense tours are choosing their destination\n choosers = non_mandatory_tours_merged.to_frame()\n alternatives = destination_size_terms.to_frame()\n spec = destination_choice_spec.to_frame()\n\n constants = config.get_model_constants(destination_choice_settings)\n\n sample_size = destination_choice_settings[\"SAMPLE_SIZE\"]\n\n # create wrapper with keys for this lookup - in this case there is a TAZ in the choosers\n # and a TAZ in the alternatives which get merged during interaction\n # the skims will be available under the name \"skims\" for any @ expressions\n skims = skim_dict.wrap(\"TAZ\", \"TAZ_r\")\n\n locals_d = {\n 'skims': skims\n }\n if constants is not None:\n locals_d.update(constants)\n\n logger.info(\"Running destination_choice with %d non_mandatory_tours\" % len(choosers.index))\n\n choices_list = []\n # segment by trip type and pick the right spec for each person type\n for name, segment in choosers.groupby('tour_type'):\n\n # FIXME - there are two options here escort with kids and without\n kludge_name = name\n if name == \"escort\":\n logging.error(\"destination_choice escort not implemented - running shopping instead\")\n kludge_name = \"shopping\"\n\n # the segment is now available to switch between size terms\n locals_d['segment'] = kludge_name\n\n # FIXME - no point in considering impossible alternatives\n alternatives_segment = alternatives[alternatives[kludge_name] > 0]\n\n logger.info(\"Running segment '%s' of %d tours %d alternatives\" %\n (name, len(segment), len(alternatives_segment)))\n\n # name index so tracing knows how to slice\n segment.index.name = 'tour_id'\n\n choices = interaction_simulate(\n segment,\n alternatives_segment,\n spec[[kludge_name]],\n skims=skims,\n locals_d=locals_d,\n sample_size=sample_size,\n chunk_size=chunk_size,\n trace_label='destination.%s' % name)\n\n choices_list.append(choices)\n\n choices = pd.concat(choices_list)\n\n # FIXME - can there be null destinations?\n if choices.isnull().any():\n logger.error(\"destination_choice had %s null destinations\" % choices.isnull().sum())\n assert choices.isnull().sum() == 0\n\n tracing.print_summary('destination', choices, describe=True)\n\n # every trip now has a destination which is the index from the\n # alternatives table - in this case it's the destination taz\n orca.add_column(\"non_mandatory_tours\", \"destination\", choices)\n\n if trace_hh_id:\n tracing.trace_df(orca.get_table('non_mandatory_tours').to_frame(),\n label=\"destination\",\n slicer='person_id',\n index_label='tour',\n columns=None,\n warn_if_empty=True)\n", "id": "9353112", "language": "Python", "matching_score": 3.640470027923584, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/destination.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\nimport yaml\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import config\nfrom activitysim.core.util import memory_info\n\nfrom .util.mode import _mode_choice_spec\n\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nGeneric functions for both tour and trip mode choice\n\"\"\"\n\n\ndef _mode_choice_simulate(records,\n odt_skim_stack_wrapper,\n dot_skim_stack_wrapper,\n od_skim_stack_wrapper,\n spec,\n constants,\n nest_spec,\n trace_label=None, trace_choice_name=None\n ):\n \"\"\"\n This is a utility to run a mode choice model for each segment (usually\n segments are tour/trip purposes). Pass in the tours/trip that need a mode,\n the Skim object, the spec to evaluate with, and any additional expressions\n you want to use in the evaluation of variables.\n \"\"\"\n\n locals_d = {\n \"odt_skims\": odt_skim_stack_wrapper,\n \"dot_skims\": dot_skim_stack_wrapper,\n \"od_skims\": od_skim_stack_wrapper\n }\n if constants is not None:\n locals_d.update(constants)\n\n skims = []\n if odt_skim_stack_wrapper is not None:\n skims.append(odt_skim_stack_wrapper)\n if dot_skim_stack_wrapper is not None:\n skims.append(dot_skim_stack_wrapper)\n if od_skim_stack_wrapper is not None:\n skims.append(od_skim_stack_wrapper)\n\n choices = asim.simple_simulate(records,\n spec,\n nest_spec,\n skims=skims,\n locals_d=locals_d,\n trace_label=trace_label,\n trace_choice_name=trace_choice_name)\n\n alts = spec.columns\n choices = choices.map(dict(zip(range(len(alts)), alts)))\n\n return choices\n\n\ndef get_segment_and_unstack(omnibus_spec, segment):\n \"\"\"\n This does what it says. Take the spec, get the column from the spec for\n the given segment, and unstack. It is assumed that the last column of\n the multiindex is alternatives so when you do this unstacking,\n each alternative is in a column (which is the format this as used for the\n simple_simulate call. The weird nuance here is the \"Rowid\" column -\n since many expressions are repeated (e.g. many are just \"1\") a Rowid\n column is necessary to identify which alternatives are actually part of\n which original row - otherwise the unstack is incorrect (i.e. the index\n is not unique)\n \"\"\"\n spec = omnibus_spec[segment].unstack().reset_index(level=\"Rowid\", drop=True).fillna(0)\n\n spec = spec.groupby(spec.index).sum()\n\n return spec\n\n\n\"\"\"\nTour mode choice is run for all tours to determine the transportation mode that\nwill be used for the tour\n\"\"\"\n\n\[email protected]()\ndef tour_mode_choice_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'tour_mode_choice.yaml')\n\n\[email protected]()\ndef tour_mode_choice_spec_df(configs_dir):\n return asim.read_model_spec(configs_dir, 'tour_mode_choice.csv')\n\n\[email protected]()\ndef tour_mode_choice_coeffs(configs_dir):\n with open(os.path.join(configs_dir, 'tour_mode_choice_coeffs.csv')) as f:\n return pd.read_csv(f, index_col='Expression')\n\n\[email protected]()\ndef tour_mode_choice_spec(tour_mode_choice_spec_df,\n tour_mode_choice_coeffs,\n tour_mode_choice_settings):\n return _mode_choice_spec(tour_mode_choice_spec_df,\n tour_mode_choice_coeffs,\n tour_mode_choice_settings,\n trace_label='tour_mode_choice')\n\n\[email protected]()\ndef tour_mode_choice_simulate(tours_merged,\n tour_mode_choice_spec,\n tour_mode_choice_settings,\n skim_dict, skim_stack,\n trace_hh_id):\n \"\"\"\n Tour mode choice simulate\n \"\"\"\n\n trace_label = trace_hh_id and 'tour_mode_choice'\n\n tours = tours_merged.to_frame()\n\n nest_spec = config.get_logit_model_settings(tour_mode_choice_settings)\n constants = config.get_model_constants(tour_mode_choice_settings)\n\n logger.info(\"Running tour_mode_choice_simulate with %d tours\" % len(tours.index))\n\n tracing.print_summary('tour_mode_choice_simulate tour_type',\n tours.tour_type, value_counts=True)\n\n if trace_hh_id:\n tracing.trace_df(tour_mode_choice_spec,\n tracing.extend_trace_label(trace_label, 'spec'),\n slicer='NONE', transpose=False)\n\n # setup skim keys\n odt_skim_stack_wrapper = skim_stack.wrap(left_key='TAZ', right_key='destination',\n skim_key=\"out_period\")\n dot_skim_stack_wrapper = skim_stack.wrap(left_key='destination', right_key='TAZ',\n skim_key=\"in_period\")\n od_skims = skim_dict.wrap('TAZ', 'destination')\n\n choices_list = []\n\n for tour_type, segment in tours.groupby('tour_type'):\n\n # if tour_type != 'work':\n # continue\n\n logger.info(\"tour_mode_choice_simulate tour_type '%s' (%s tours)\" %\n (tour_type, len(segment.index), ))\n\n # name index so tracing knows how to slice\n segment.index.name = 'tour_id'\n\n spec = get_segment_and_unstack(tour_mode_choice_spec, tour_type)\n\n if trace_hh_id:\n tracing.trace_df(spec, tracing.extend_trace_label(trace_label, 'spec.%s' % tour_type),\n slicer='NONE', transpose=False)\n\n choices = _mode_choice_simulate(\n segment,\n odt_skim_stack_wrapper=odt_skim_stack_wrapper,\n dot_skim_stack_wrapper=dot_skim_stack_wrapper,\n od_skim_stack_wrapper=od_skims,\n spec=spec,\n constants=constants,\n nest_spec=nest_spec,\n trace_label=tracing.extend_trace_label(trace_label, tour_type),\n trace_choice_name='tour_mode_choice')\n\n tracing.print_summary('tour_mode_choice_simulate %s choices' % tour_type,\n choices, value_counts=True)\n\n choices_list.append(choices)\n\n # FIXME - force garbage collection\n mem = memory_info()\n logger.debug('memory_info tour_type %s, %s' % (tour_type, mem))\n\n choices = pd.concat(choices_list)\n\n tracing.print_summary('tour_mode_choice_simulate all tour type choices',\n choices, value_counts=True)\n\n orca.add_column(\"tours\", \"mode\", choices)\n\n if trace_hh_id:\n trace_columns = ['mode', 'person_id', 'tour_type', 'tour_num']\n tracing.trace_df(orca.get_table('tours').to_frame(),\n label=tracing.extend_trace_label(trace_label, 'mode'),\n slicer='tour_id',\n index_label='tour_id',\n columns=trace_columns,\n warn_if_empty=True)\n\n # FIXME - this forces garbage collection\n memory_info()\n\n\n\"\"\"\nTrip mode choice is run for all trips to determine the transportation mode that\nwill be used for the trip\n\"\"\"\n\n\[email protected]()\ndef trip_mode_choice_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'trip_mode_choice.yaml')\n\n\[email protected]()\ndef trip_mode_choice_spec_df(configs_dir):\n return asim.read_model_spec(configs_dir, 'trip_mode_choice.csv')\n\n\[email protected]()\ndef trip_mode_choice_coeffs(configs_dir):\n with open(os.path.join(configs_dir, 'trip_mode_choice_coeffs.csv')) as f:\n return pd.read_csv(f, index_col='Expression')\n\n\[email protected]()\ndef trip_mode_choice_spec(trip_mode_choice_spec_df,\n trip_mode_choice_coeffs,\n trip_mode_choice_settings):\n return _mode_choice_spec(trip_mode_choice_spec_df,\n trip_mode_choice_coeffs,\n trip_mode_choice_settings)\n\n\[email protected]()\ndef trip_mode_choice_simulate(trips_merged,\n trip_mode_choice_spec,\n trip_mode_choice_settings,\n skim_dict,\n skim_stack,\n trace_hh_id):\n \"\"\"\n Trip mode choice simulate\n \"\"\"\n\n trips = trips_merged.to_frame()\n\n nest_spec = config.get_logit_model_settings(trip_mode_choice_settings)\n constants = config.get_model_constants(trip_mode_choice_settings)\n\n logger.info(\"Running trip_mode_choice_simulate with %d trips\" % len(trips))\n\n odt_skim_stack_wrapper = skim_stack.wrap(left_key='OTAZ', right_key='DTAZ',\n skim_key=\"start_period\")\n\n od_skims = skim_dict.wrap('OTAZ', 'DTAZ')\n\n choices_list = []\n\n # loop by tour_type in order to easily query the expression coefficient file\n for tour_type, segment in trips.groupby('tour_type'):\n\n logger.info(\"running %s tour_type '%s'\" % (len(segment.index), tour_type, ))\n\n # name index so tracing knows how to slice\n segment.index.name = 'trip_id'\n\n # FIXME - check that destination is not null\n\n trace_label = trace_hh_id and ('trip_mode_choice_%s' % tour_type)\n\n choices = _mode_choice_simulate(\n segment,\n odt_skim_stack_wrapper=odt_skim_stack_wrapper,\n dot_skim_stack_wrapper=None,\n od_skim_stack_wrapper=od_skims,\n spec=get_segment_and_unstack(trip_mode_choice_spec, tour_type),\n constants=constants,\n nest_spec=nest_spec,\n trace_label=trace_label,\n trace_choice_name='trip_mode_choice')\n\n # FIXME - no point in printing verbose value_counts now that we have tracing?\n tracing.print_summary('trip_mode_choice_simulate %s choices' % tour_type,\n choices, value_counts=True)\n\n choices_list.append(choices)\n\n # FIXME - force garbage collection\n mem = memory_info()\n logger.debug('memory_info tour_type %s, %s' % (tour_type, mem))\n\n choices = pd.concat(choices_list)\n\n tracing.print_summary('trip_mode_choice_simulate all tour type choices',\n choices, value_counts=True)\n\n # FIXME - is this a NOP if trips table doesn't exist\n orca.add_column(\"trips\", \"trip_mode\", choices)\n\n if trace_hh_id:\n\n tracing.trace_df(orca.get_table('trips').to_frame(),\n label=\"trip_mode\",\n slicer='trip_id',\n index_label='trip_id',\n warn_if_empty=True)\n\n # FIXME - this forces garbage collection\n memory_info()\n", "id": "8471652", "language": "Python", "matching_score": 5.087625026702881, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/mode.py" }, { "content": "import os\nimport yaml\nimport pandas as pd\nimport numpy as np\nimport random\nimport orca\nfrom skim import SkimDictWrapper, SkimStackWrapper\nimport tracing\nimport asim_simulate\n\n_OPEN_FILES = {}\n_PRNG = random.Random()\n\n\n#########################\n# from asim.core.config #\n#########################\n\ndef setting(key, default=None):\n settings = orca.get_injectable('asim_settings')\n return settings.get(key, default)\n\n\ndef read_model_settings(configs_dir, file_name):\n settings = None\n file_path = os.path.join(configs_dir, file_name)\n if os.path.isfile(file_path):\n with open(file_path) as f:\n settings = yaml.load(f)\n\n if settings is None:\n settings = {}\n\n return settings\n\n\ndef get_model_constants(model_settings):\n \"\"\"\n Read constants from model settings file\n\n Returns\n -------\n constants : dict\n dictionary of constants to add to locals\n for use by expressions in model spec\n \"\"\"\n return model_settings.get('CONSTANTS', {})\n\n\ndef get_logit_model_settings(model_settings):\n \"\"\"\n Read nest spec (for nested logit) from model settings file\n\n Returns\n -------\n nests : dict\n dictionary specifying nesting structure and nesting coefficients\n\n constants : dict\n dictionary of constants to add to locals for use\n by expressions in model spec\n \"\"\"\n nests = None\n\n if model_settings is not None:\n\n # default to MNL\n logit_type = model_settings.get('LOGIT_TYPE', 'MNL')\n\n if logit_type not in ['NL', 'MNL']:\n print(\"Unrecognized logit type '%s'\" % logit_type)\n raise RuntimeError(\"Unrecognized logit type '%s'\" % logit_type)\n\n if logit_type == 'NL':\n nests = model_settings.get('NESTS', None)\n if nests is None:\n print(\"No NEST found in model spec for NL model type\")\n raise RuntimeError(\n \"No NEST found in model spec for NL model type\")\n\n return nests\n\n\n#######################\n# from asim.core.util #\n#######################\n\ndef reindex(series1, series2):\n \"\"\"\n This reindexes the first series by the second series. This is an extremely\n common operation that does not appear to be in Pandas at this time.\n If anyone knows of an easier way to do this in Pandas, please inform the\n UrbanSim developers.\n\n The canonical example would be a parcel series which has an index which is\n parcel_ids and a value which you want to fetch, let's say it's land_area.\n Another dataset, let's say of buildings has a series which indicate the\n parcel_ids that the buildings are located on, but which does not have\n land_area. If you pass parcels.land_area as the first series and\n buildings.parcel_id as the second series, this function returns a series\n which is indexed by buildings and has land_area as values and can be\n added to the buildings dataset.\n\n In short, this is a join on to a different table using a foreign key\n stored in the current table, but with only one attribute rather than\n for a full dataset.\n\n This is very similar to the pandas \"loc\" function or \"reindex\" function,\n but neither of those functions return the series indexed on the current\n table. In both of those cases, the series would be indexed on the foreign\n table and would require a second step to change the index.\n\n Parameters\n ----------\n series1, series2 : pandas.Series\n\n Returns\n -------\n reindexed : pandas.Series\n\n \"\"\"\n\n # turns out the merge is much faster than the .loc below\n df = pd.merge(series2.to_frame(name='left'),\n series1.to_frame(name='right'),\n left_on=\"left\",\n right_index=True,\n how=\"left\")\n return df.right\n\n\ndef quick_loc_series(loc_list, target_series):\n \"\"\"\n faster replacement for target_series.loc[loc_list]\n\n pandas Series.loc[] indexing doesn't scale\n for large arrays (e.g. > 1,000,000 elements)\n\n Parameters\n ----------\n loc_list : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)\n target_series : pandas.Series\n\n Returns\n -------\n pandas.Series\n \"\"\"\n\n left_on = \"left\"\n\n if isinstance(loc_list, pd.Int64Index):\n left_df = pd.DataFrame({left_on: loc_list.values})\n elif isinstance(loc_list, pd.Series):\n left_df = loc_list.to_frame(name=left_on)\n elif isinstance(loc_list, np.ndarray):\n left_df = pd.DataFrame({left_on: loc_list})\n else:\n raise RuntimeError(\n \"quick_loc_series loc_list of unexpected type %s\" %\n type(loc_list))\n\n df = pd.merge(left_df,\n target_series.to_frame(name='right'),\n left_on=left_on,\n right_index=True,\n how=\"left\")\n\n # regression test\n # assert list(df.right) == list(target_series.loc[loc_list])\n\n return df.right\n\n\n##########################################\n# from asim.abm.models.tables.size_terms #\n##########################################\n\ndef size_term(land_use, destination_choice_coeffs):\n \"\"\"\n This method takes the land use data and multiplies various columns of the\n land use data by coefficients from the spec table in order\n to yield a size term (a linear combination of land use variables).\n\n Parameters\n ----------\n land_use : DataFrame\n A dataframe of land use attributes - the column names should match\n the index of destination_choice_coeffs\n destination_choice_coeffs : Series\n A series of coefficients for the land use attributes - the index\n describes the link to the land use table, and the values are floating\n points numbers used to do the linear combination\n\n Returns\n -------\n values : Series\n The index will be the same as land use, and the values will the\n linear combination of the land use table columns specified by the\n coefficients series.\n \"\"\"\n coeffs = destination_choice_coeffs\n\n # first check for missing column in the land_use table\n missing = coeffs[~coeffs.index.isin(land_use.columns)]\n\n if len(missing) > 0:\n print(\"%s missing columns in land use\" % len(missing.index))\n for v in missing.index.values:\n print(\"missing: %s\" % v)\n\n return land_use[coeffs.index].dot(coeffs)\n\n\n###########################\n# from asim.core.pipeline #\n###########################\n\ndef close_on_exit(file, name):\n assert name not in _OPEN_FILES\n _OPEN_FILES[name] = file\n\n\ndef get_rn_generator():\n \"\"\"\n Return the singleton random number object\n\n Returns\n -------\n activitysim.random.Random\n \"\"\"\n\n return _PRNG\n\n\ndef add_dependent_columns(base_dfname, new_dfname):\n tbl = orca.get_table(new_dfname)\n for col in tbl.columns:\n # logger.debug(\"Adding dependent column %s\" % col)\n orca.add_column(base_dfname, col, tbl[col])\n\n\n#####################################\n# from asim.abm.models.util.logsums #\n#####################################\n\ndef time_period_label(hour):\n time_periods = setting('time_periods')\n bin = np.digitize([hour % 24], time_periods['hours'])[0] - 1\n return time_periods['labels'][bin]\n\n\ndef mode_choice_logsums_spec(configs_dir, dest_type):\n DEST_TO_TOUR_TYPE = \\\n {'university': 'university',\n 'highschool': 'school',\n 'gradeschool': 'school',\n 'work': 'work'}\n\n tour_type = DEST_TO_TOUR_TYPE.get(dest_type)\n spec = asim_simulate.read_model_spec(\n configs_dir, 'logsums_spec_%s.csv' % tour_type)\n return spec\n\n\ndef compute_logsums(choosers, logsum_spec, logsum_settings,\n skim_dict, skim_stack, alt_col_name,\n chunk_size, trace_hh_id, trace_label):\n \"\"\"\n\n Parameters\n ----------\n choosers\n logsum_spec\n logsum_settings\n skim_dict\n skim_stack\n alt_col_name\n chunk_size\n trace_hh_id\n trace_label\n\n Returns\n -------\n logsums: pandas series\n computed logsums with same index as choosers\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'compute_logsums')\n\n nest_spec = get_logit_model_settings(logsum_settings)\n constants = get_model_constants(logsum_settings)\n\n print(\"Running compute_logsums with %d choosers\" % len(choosers.index))\n\n if trace_hh_id:\n tracing.trace_df(logsum_spec,\n tracing.extend_trace_label(trace_label, 'spec'),\n slicer='NONE', transpose=False)\n\n # setup skim keys\n odt_skim_stack_wrapper = skim_stack.wrap(\n left_key='TAZ', right_key=alt_col_name, skim_key=\"out_period\")\n dot_skim_stack_wrapper = skim_stack.wrap(\n left_key=alt_col_name, right_key='TAZ', skim_key=\"in_period\")\n od_skim_stack_wrapper = skim_dict.wrap('TAZ', alt_col_name)\n\n skims = [\n odt_skim_stack_wrapper, dot_skim_stack_wrapper, od_skim_stack_wrapper]\n\n locals_d = {\n \"odt_skims\": odt_skim_stack_wrapper,\n \"dot_skims\": dot_skim_stack_wrapper,\n \"od_skims\": od_skim_stack_wrapper\n }\n if constants is not None:\n locals_d.update(constants)\n\n logsums = asim_simulate.simple_simulate_logsums(\n choosers,\n logsum_spec,\n nest_spec,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_label)\n\n return logsums\n", "id": "3834016", "language": "Python", "matching_score": 6.298673152923584, "max_stars_count": 0, "path": "bayarea_urbansim/baus/asim_utils.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom activitysim.core import simulate\nfrom activitysim.core import tracing\nfrom activitysim.core import config\n\n\nlogger = logging.getLogger(__name__)\n\n\n# FIXME - needs a better home?\ndef time_period_label(hour):\n time_periods = config.setting('time_periods')\n bin = np.digitize([hour % 24], time_periods['hours'])[0] - 1\n return time_periods['labels'][bin]\n\n\ndef mode_choice_logsums_spec(configs_dir, dest_type):\n DEST_TO_TOUR_TYPE = \\\n {'university': 'university',\n 'highschool': 'school',\n 'gradeschool': 'school',\n 'work': 'work'}\n\n tour_type = DEST_TO_TOUR_TYPE.get(dest_type)\n spec = simulate.read_model_spec(configs_dir, 'logsums_spec_%s.csv' % tour_type)\n return spec\n\n\ndef compute_logsums(choosers, logsum_spec, logsum_settings,\n skim_dict, skim_stack, alt_col_name,\n chunk_size, trace_hh_id, trace_label):\n \"\"\"\n\n Parameters\n ----------\n choosers\n logsum_spec\n logsum_settings\n skim_dict\n skim_stack\n alt_col_name\n chunk_size\n trace_hh_id\n trace_label\n\n Returns\n -------\n logsums: pandas series\n computed logsums with same index as choosers\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'compute_logsums')\n\n nest_spec = config.get_logit_model_settings(logsum_settings)\n constants = config.get_model_constants(logsum_settings)\n\n logger.info(\"Running compute_logsums with %d choosers\" % len(choosers.index))\n\n if trace_hh_id:\n tracing.trace_df(logsum_spec,\n tracing.extend_trace_label(trace_label, 'spec'),\n slicer='NONE', transpose=False)\n\n # setup skim keys\n odt_skim_stack_wrapper = skim_stack.wrap(left_key='TAZ', right_key=alt_col_name,\n skim_key=\"out_period\")\n dot_skim_stack_wrapper = skim_stack.wrap(left_key=alt_col_name, right_key='TAZ',\n skim_key=\"in_period\")\n od_skim_stack_wrapper = skim_dict.wrap('TAZ', alt_col_name)\n\n skims = [odt_skim_stack_wrapper, dot_skim_stack_wrapper, od_skim_stack_wrapper]\n\n locals_d = {\n \"odt_skims\": odt_skim_stack_wrapper,\n \"dot_skims\": dot_skim_stack_wrapper,\n \"od_skims\": od_skim_stack_wrapper\n }\n if constants is not None:\n locals_d.update(constants)\n\n logsums = simulate.simple_simulate_logsums(\n choosers,\n logsum_spec,\n nest_spec,\n skims=skims,\n locals_d=locals_d,\n chunk_size=chunk_size,\n trace_label=trace_label)\n\n return logsums\n", "id": "5111004", "language": "Python", "matching_score": 1.4194133281707764, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/logsums.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom activitysim.core.interaction_simulate import interaction_simulate\nfrom activitysim.core import tracing\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_previous_tour_by_tourid(current_tour_person_ids,\n previous_tour_by_personid,\n alts):\n \"\"\"\n Matches current tours with attributes of previous tours for the same\n person. See the return value below for more information.\n\n Parameters\n ----------\n current_tour_person_ids : Series\n A Series of person ids for the tours we're about make the choice for\n - index should match the tours DataFrame.\n previous_tour_by_personid : Series\n A Series where the index is the person id and the value is the index\n of the alternatives of the scheduling.\n alts : DataFrame\n The alternatives of the scheduling.\n\n Returns\n -------\n prev_alts : DataFrame\n A DataFrame with an index matching the CURRENT tours we're making a\n decision for, but with columns from the PREVIOUS tour of the person\n associated with each of the CURRENT tours. Every column of the\n alternatives will have \"_previous\" added as a suffix to keep\n differentiated from the current alternatives that will be part of the\n interaction.\n \"\"\"\n previous_tour_by_tourid = \\\n previous_tour_by_personid.loc[current_tour_person_ids]\n\n previous_tour_by_tourid = alts.loc[previous_tour_by_tourid]\n\n previous_tour_by_tourid.index = current_tour_person_ids.index\n previous_tour_by_tourid.columns = [x+'_previous' for x in\n previous_tour_by_tourid.columns]\n\n return previous_tour_by_tourid\n\n\ndef vectorize_tour_scheduling(tours, alts, spec, constants={},\n chunk_size=0, trace_label=None):\n \"\"\"\n The purpose of this method is fairly straightforward - it takes tours\n and schedules them into time slots. Alternatives should be specified so\n as to define those time slots (usually with start and end times).\n\n The difficulty of doing this in Python is that subsequent tours are\n dependent on certain characteristics of previous tours for the same\n person. This is a problem with Python's vectorization requirement,\n so this method does all the 1st tours, then all the 2nd tours, and so forth.\n\n This method also adds variables that can be used in the spec which have\n to do with the previous tours per person. Every column in the\n alternatives table is appended with the suffix \"_previous\" and made\n available. So if your alternatives table has columns for start and end,\n then start_previous and end_previous will be set to the start and end of\n the most recent tour for a person. The first time through,\n start_previous and end_previous are undefined, so make sure to protect\n with a tour_num >= 2 in the variable computation.\n\n Parameters\n ----------\n tours : DataFrame\n DataFrame of tours containing tour attributes, as well as a person_id\n column to define the nth tour for each person.\n alts : DataFrame\n DataFrame of alternatives which represent time slots. Will be passed to\n interaction_simulate in batches for each nth tour.\n spec : DataFrame\n The spec which will be passed to interaction_simulate.\n\n Returns\n -------\n choices : Series\n A Series of choices where the index is the index of the tours\n DataFrame and the values are the index of the alts DataFrame.\n \"\"\"\n\n max_num_trips = tours.groupby('person_id').size().max()\n\n if np.isnan(max_num_trips):\n s = pd.Series()\n s.index.name = 'tour_id'\n return s\n\n # because this is Python, we have to vectorize everything by doing the\n # \"nth\" trip for each person in a for loop (in other words, because each\n # trip is dependent on the time windows left by the previous decision) -\n # hopefully this will work out ok!\n\n choices = []\n\n # keep a series of the the most recent tours for each person\n previous_tour_by_personid = pd.Series(\n pd.Series(alts.index).iloc[0], index=tours.person_id.unique())\n\n for i in range(max_num_trips):\n\n # this reset_index / set_index stuff keeps the index as the tours\n # index rather that switching to person_id as the index which is\n # what happens when you groupby person_id\n index_name = tours.index.name or 'index'\n nth_tours = tours.reset_index().\\\n groupby('person_id').nth(i).reset_index().set_index(index_name)\n\n nth_tours.index.name = 'tour_id'\n\n if trace_label:\n logger.info(\"%s running %d #%d tour choices\" % (trace_label, len(nth_tours), i+1))\n\n # tour num can be set by the user, but if it isn't we set it here\n if \"tour_num\" not in nth_tours:\n nth_tours[\"tour_num\"] = i+1\n\n nth_tours = nth_tours.join(get_previous_tour_by_tourid(\n nth_tours.person_id,\n previous_tour_by_personid,\n alts))\n\n tour_trace_label = tracing.extend_trace_label(trace_label, 'tour_%s' % i)\n\n nth_choices = interaction_simulate(\n nth_tours,\n alts.copy(),\n spec,\n locals_d=constants,\n chunk_size=chunk_size,\n trace_label=tour_trace_label\n )\n\n choices.append(nth_choices)\n\n previous_tour_by_personid.loc[nth_tours.person_id] = nth_choices.values\n\n choices = pd.concat(choices)\n\n # return the concatenated choices\n return choices\n", "id": "10090178", "language": "Python", "matching_score": 4.143911838531494, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/vectorize_tour_scheduling.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nimport orca\n\nimport pandas.util.testing as pdt\n\nfrom activitysim.core import pipeline\n\nfrom ..vectorize_tour_scheduling import get_previous_tour_by_tourid, \\\n vectorize_tour_scheduling\n\n\ndef test_vts():\n\n alts = pd.DataFrame({\n \"start\": [1, 2, 3],\n \"end\": [4, 5, 6],\n }, index=[10, 20, 30])\n\n current_tour_person_ids = pd.Series(['b', 'c'],\n index=['d', 'e'])\n\n previous_tour_by_personid = pd.Series([20, 20, 10],\n index=['a', 'b', 'c'])\n\n prev_tour_attrs = get_previous_tour_by_tourid(current_tour_person_ids,\n previous_tour_by_personid,\n alts)\n\n pdt.assert_series_equal(\n prev_tour_attrs.start_previous,\n pd.Series([2, 1], index=['d', 'e'], name='start_previous'))\n\n pdt.assert_series_equal(\n prev_tour_attrs.end_previous,\n pd.Series([5, 4], index=['d', 'e'], name='end_previous'))\n\n tours = pd.DataFrame({\n \"person_id\": [1, 1, 2, 3, 3],\n \"income\": [20, 20, 30, 25, 25]\n })\n\n spec = pd.DataFrame({\"Coefficient\": [1.2]},\n index=[\"income\"])\n spec.index.name = \"Expression\"\n\n orca.add_injectable(\"check_for_variability\", True)\n\n choices = vectorize_tour_scheduling(tours, alts, spec)\n\n # FIXME - dead reckoning regression\n # there's no real logic here - this is just what came out of the monte carlo\n # note that the result comes out ordered by the nth trips and not ordered\n # by the trip index. shrug?\n expected = [20, 30, 20, 20, 30]\n pdt.assert_series_equal(\n choices,\n pd.Series(expected,\n index=pd.Index([0, 2, 3, 1, 4], name='tour_id')))\n", "id": "4702071", "language": "Python", "matching_score": 2.066041946411133, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/test/test_vectorize_tour_scheduling.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport pandas as pd\nimport pandas.util.testing as pdt\nfrom ..mode import pre_process_expressions, evaluate_expression_list, \\\n expand_alternatives, _mode_choice_spec\n\n\ndef test_ppe():\n ret = pre_process_expressions(\n ['1', '$expr.format(var=\"bar\")'],\n {'expr': '@foo * {var}'}\n )\n assert ret[0] == '1'\n assert ret[1] == '@foo * bar'\n\n\ndef test_eel():\n ret = evaluate_expression_list(\n pd.Series(\n ['.7', 'ivt * .7 * COST'],\n index=['ivt', 'ivt_lr']\n ),\n {'COST': 2.0}\n )\n pdt.assert_series_equal(\n ret,\n pd.Series(\n [.7, .98],\n index=['ivt', 'ivt_lr']\n )\n )\n\n\ndef test_ea():\n df = pd.DataFrame({\n \"Alternative\": [\"One\", \"One,Two\"],\n \"Other column\": [1, 2]\n }).set_index(\"Alternative\")\n\n df = expand_alternatives(df)\n\n pdt.assert_series_equal(\n df.reset_index().Alternative,\n pd.Series(\n [\"One\", \"One\", \"Two\"], index=[0, 1, 2], name='Alternative'))\n\n pdt.assert_series_equal(\n df.reset_index().Rowid,\n pd.Series(\n [0, 1, 1], index=[0, 1, 2], name='Rowid'))\n\n pdt.assert_series_equal(\n df.reset_index()[\"Other column\"],\n pd.Series(\n [1, 2, 2], index=[0, 1, 2], name='Other column'))\n\n\ndef test_mode_choice_spec():\n\n spec = pd.DataFrame({\n \"Alternative\": [\"One\", \"One,Two\"],\n \"Expression\": ['1', '$expr.format(var=\"bar\")'],\n \"Work\": ['ivt', 'ivt_lr']\n }).set_index([\"Expression\"])\n\n coeffs = pd.DataFrame({\n \"Work\": ['.7', 'ivt * .7 * COST']\n }, index=['ivt', 'ivt_lr'])\n\n settings = {\n \"CONSTANTS\": {\n \"COST\": 2.0\n },\n \"VARIABLE_TEMPLATES\": {\n 'expr': '@foo * {var}'\n }\n }\n\n df = _mode_choice_spec(spec, coeffs, settings)\n\n pdt.assert_series_equal(\n df.reset_index().Alternative,\n pd.Series(\n [\"One\", \"One\", \"Two\"], index=[0, 1, 2], name='Alternative'))\n\n pdt.assert_series_equal(\n df.reset_index().Rowid,\n pd.Series(\n [0, 1, 1], index=[0, 1, 2], name='Rowid'))\n\n pdt.assert_series_equal(\n df.reset_index()[\"Work\"],\n pd.Series(\n [.7, .98, .98], index=[0, 1, 2], name='Work'))\n\n pdt.assert_series_equal(\n df.reset_index()[\"Expression\"],\n pd.Series(\n [\"1\", \"@foo * bar\", \"@foo * bar\"],\n index=[0, 1, 2], name='Expression'))\n", "id": "3406774", "language": "Python", "matching_score": 3.459085702896118, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/test/test_mode.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport copy\nimport string\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core import tracing\n\n\n\"\"\"\nAt this time, these utilities are mostly for transforming the mode choice\nspec, which is more complicated than the other specs, into something that\nlooks like the other specs.\n\"\"\"\n\n\ndef evaluate_expression_list(expressions, constants):\n \"\"\"\n Evaluate a list of expressions - each one can depend on the one before\n it. These are usually used for the coefficients which have relationships\n to each other. So ivt=.7 and then ivt_lr=ivt*.9.\n\n Parameters\n ----------\n expressions : Series\n Same as below except the values are accumulated from expression to\n expression and there is no assumed \"$\" at the beginning. This is a\n Series because the index are the names of the expressions which are\n used in subsequent evals - thus naming the expressions is required.\n For better or worse, the expressions are assumed to evaluate to\n floats and this is guaranteed by casting to float after eval-ing.\n constants : dict\n will be passed as the scope of eval - usually a separate set of\n constants are passed in here\n\n Returns\n -------\n expressions : Series\n\n \"\"\"\n d = {}\n # this could be a simple expression except that the dictionary\n # is accumulating expressions - i.e. they're not all independent\n # and must be evaluated in order\n for k, v in expressions.iteritems():\n # make sure it can be converted to a float\n result = float(eval(str(v), copy.copy(d), constants))\n d[k] = result\n\n return pd.Series(d)\n\n\ndef substitute_coefficients(expressions, constants):\n \"\"\"\n Substitute the named coeffcients in expressions with their numeric values\n\n Parameters\n ----------\n expressions : Series\n Same as below except there is no assumed \"$\" at the beginning.\n For better or worse, the expressions are assumed to evaluate to\n floats and this is guaranteed by casting to float after eval-ing.\n constants : dict\n will be passed as the scope of eval - usually a separate set of\n constants are passed in here\n\n Returns\n -------\n expressions : Series\n\n \"\"\"\n return pd.Series([float(eval(e, constants)) for e in expressions], index=expressions.index)\n\n\ndef pre_process_expressions(expressions, variable_templates):\n \"\"\"\n This one is pretty simple - pass in a list of expressions which contain\n references to templates and pass a dictionary of the templates themselves.\n Strings will only be evaluated which are prepended with $.\n\n Parameters\n ----------\n expressions : list of strs\n These are the expressions that will be evaluated - generally these\n contain templates that get passed below. So will be something like\n ['$SKIM_TEMPLATE.format(sk=\"AMPEAK\")']\n variable_templates : dict of templates\n Will be passed as the scope of eval. Keys are usually template names\n and values are strings. The dict could be something like\n {'SKIM_TEMPLATE': 'skims[{sk}]'}\n\n Returns\n -------\n expressions : list of strs\n Each expression is evaluated with variable_templates in the scope and\n the result is returned.\n \"\"\"\n return [eval(e[1:], variable_templates) if e.startswith('$') else e for\n e in expressions]\n\n\ndef expand_alternatives(df):\n \"\"\"\n Alternatives are kept as a comma separated list. At this stage we need\n need to split them up so that there is only one alternative per row, and\n where an expression is shared among alternatives, that row is copied\n with each alternative alternative value (pun intended) substituted for\n the alternative value for each row. The DataFrame needs an Alternative\n column which is a comma separated list of alternatives. See the test for\n an example.\n \"\"\"\n\n # first split up the alts using string.split\n alts = [string.split(s, \",\") for s in df.reset_index()['Alternative']]\n\n # this is the number of alternatives in each row\n len_alts = [len(x) for x in alts]\n\n # this repeats the locs for the number of alternatives in each row\n ilocs = np.repeat(np.arange(len(df)), len_alts)\n\n # grab the rows the right number of times (after setting a rowid)\n df['Rowid'] = np.arange(len(df))\n df = df.iloc[ilocs]\n\n # now concat all the lists\n new_alts = sum(alts, [])\n\n df.reset_index([\"Alternative\"], inplace=True)\n df[\"Alternative\"] = new_alts\n # rowid needs to be set here - we're going to unstack this and we need\n # a unique identifier to keep track of the rows during the unstack\n df = df.set_index(['Rowid', 'Alternative'], append=True)\n\n return df\n\n\ndef _mode_choice_spec(mode_choice_spec_df, mode_choice_coeffs,\n mode_choice_settings, trace_label=None):\n \"\"\"\n Ok we have read in the spec - we need to do several things to reformat it\n to the same style spec that all the other models have.\n\n mode_choice_spec_df : DataFrame\n This is the actual spec DataFrame, the same as all of the other spec\n dataframes, except that 1) expressions can be prepended with a \"$\"\n - see pre_process_expressions above 2) There is an Alternative column -\n see expand_alternatives above and 3) there are assumed to be\n expressions in the coefficient column which get evaluated by\n evaluate_expression_list above\n mode_choice_coeffs : DataFrame\n This has the same columns as the spec (columns are assumed to be\n independent segments of the model), and the coefficients (values) in\n the spec DataFrame refer to values in the mode_choice_coeffs\n DataFrame. The mode_choice_coeffs DataFrame can also contain\n expressions which refer to previous rows in the same column. Is is\n assumed that all values in mode_choice_coeffs can be cast to float\n after running evaluate_expression_list, and that these floats are\n substituted in multiple place in the mode_choice_spec_df.\n mode_choice_settings : Dict, usually read from YAML\n Has two values which are used. One key in CONSTANTS which is used as\n the scope for the evals which take place here and one that is\n VARIABLE_TEMPLATES which is used as the scope for expressions in\n mode_choice_spec_df which are prepended with \"$\"\n\n Returns\n -------\n new_spec_df : DataFrame\n A new spec DataFrame which is exactly like all of the other models.\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, '_mode_choice_spec')\n\n constants = mode_choice_settings['CONSTANTS']\n templates = mode_choice_settings['VARIABLE_TEMPLATES']\n df = mode_choice_spec_df\n index_name = df.index.name\n\n if trace_label:\n tracing.trace_df(df,\n tracing.extend_trace_label(trace_label, 'raw'),\n slicer='NONE', transpose=False)\n\n # FIXME - this is no longer used and should probably be removed\n # the expressions themselves can be prepended with a \"$\" in order to use\n # model templates that are shared by several different expressions\n df.index = pre_process_expressions(df.index, templates)\n df.index.name = index_name\n\n # set index to ['Expression', 'Alternative']\n df = df.set_index('Alternative', append=True)\n\n if trace_label:\n tracing.trace_df(df,\n tracing.extend_trace_label(trace_label, 'pre_process_expressions'),\n slicer='NONE', transpose=False)\n\n # for each segment - e.g. eatout vs social vs work vs ...\n for col in df.columns:\n\n # first the coeffs come as expressions that refer to previous cells\n # as well as constants that come from the settings file\n mode_choice_coeffs[col] = evaluate_expression_list(\n mode_choice_coeffs[col],\n constants=constants)\n\n # then use the coeffs we just evaluated within the spec (they occur\n # multiple times in the spec which is why they get stored uniquely\n # in a different file\n df[col] = substitute_coefficients(\n df[col],\n mode_choice_coeffs[col].to_dict())\n\n if trace_label:\n tracing.trace_df(df,\n tracing.extend_trace_label(trace_label, 'evaluate_expression_list'),\n slicer='NONE', transpose=False)\n\n df = expand_alternatives(df)\n\n if trace_label:\n tracing.trace_df(df,\n tracing.extend_trace_label(trace_label, 'expand_alternatives'),\n slicer='NONE', transpose=False)\n\n return df\n", "id": "3489691", "language": "Python", "matching_score": 2.7529406547546387, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/mode.py" }, { "content": "import numpy as np\nimport pandas as pd\nimport logit\nimport tracing\nimport asim_utils\nfrom interaction_simulate import eval_interaction_utilities\n\nDUMP = False\n\n\ndef make_sample_choices(\n choosers, probs, interaction_utilities,\n sample_size, alternative_count, alt_col_name,\n trace_label):\n \"\"\"\n\n Parameters\n ----------\n choosers\n probs : pandas DataFrame\n one row per chooser and one column per alternative\n interaction_utilities\n dataframe with len(interaction_df) rows and one utility column\n sample_size : int\n number of samples/choices to make\n alternative_count\n alt_col_name\n trace_label\n\n Returns\n -------\n\n \"\"\"\n\n assert isinstance(probs, pd.DataFrame)\n assert probs.shape == (len(choosers), alternative_count)\n\n assert isinstance(interaction_utilities, pd.DataFrame)\n assert interaction_utilities.shape == (\n len(choosers) * alternative_count, 1)\n\n t0 = tracing.print_elapsed_time()\n\n # probs should sum to 1 across each row\n BAD_PROB_THRESHOLD = 0.001\n bad_probs = \\\n probs.sum(axis=1).sub(np.ones(len(probs.index))).abs() \\\n > BAD_PROB_THRESHOLD * np.ones(len(probs.index))\n\n if bad_probs.any():\n logit.report_bad_choices.report_bad_choices(\n bad_probs, probs,\n tracing.extend_trace_label(trace_label, 'bad_probs'),\n msg=\"probabilities do not add up to 1\",\n trace_choosers=choosers)\n\n t0 = tracing.print_elapsed_time(\"make_choices bad_probs\", t0, debug=True)\n\n cum_probs_arr = probs.as_matrix().cumsum(axis=1)\n t0 = tracing.print_elapsed_time(\n \"make_choices cum_probs_arr\", t0, debug=True)\n\n # alt probs in convenient layout to return prob of chose alternative\n # (same layout as cum_probs_arr and interaction_utilities)\n alt_probs_array = probs.as_matrix().flatten()\n\n # get sample_size rands for each chooser\n # transform as we iterate over alternatives\n # reshape so rands[i] is in broadcastable (2-D) shape for cum_probs_arr\n # i.e rands[i] is a 2-D array of one alt choice rand for each chooser\n rands = asim_utils.get_rn_generator().random_for_df(probs, n=sample_size)\n rands = rands.T.reshape(sample_size, -1, 1)\n t0 = tracing.print_elapsed_time(\n \"make_choices random_for_df\", t0, debug=True)\n\n # the alternative value chosen\n choices_array = np.empty([sample_size, len(choosers)]).astype(int)\n\n # the probability of the chosen alternative\n choice_probs_array = np.empty([sample_size, len(choosers)])\n\n # FIXME - do this all at once rather than iterate?\n for i in range(sample_size):\n\n # FIXME - do this in numpy, not pandas?\n\n # rands for this alt in broadcastable shape\n r = rands[i]\n\n # position of first occurrence of positive value\n positions = np.argmax(cum_probs_arr > r, axis=1)\n\n # FIXME - leave positions as numpy array, not pandas series?\n\n # positions is series with the chosen alternative\n # represented as a column index in probs\n # which is an integer between zero and num alternatives\n # in the alternative sample\n positions = pd.Series(positions, index=probs.index)\n\n # need to get from an integer offset into the alternative\n # sample to the alternative index that is, we want the index\n # value of the row that is offset by <position> rows into the\n # tranche of this choosers alternatives created by cross join\n # of alternatives and choosers\n\n # offsets is the offset into model_design df of first row\n # of chooser alternatives\n offsets = np.arange(len(positions)) * alternative_count\n\n # resulting pandas Int64Index has one element per chooser\n # and is in same order as choosers\n choices_array[i] = interaction_utilities.index.take(\n positions + offsets)\n\n choice_probs_array[i] = np.take(alt_probs_array, positions + offsets)\n\n # explode to one row per chooser.index, alt_TAZ\n choices_df = pd.DataFrame(\n {alt_col_name: choices_array.flatten(order='F'),\n 'rand': rands.flatten(order='F'),\n 'prob': choice_probs_array.flatten(order='F'),\n choosers.index.name: np.repeat(\n np.asanyarray(choosers.index), sample_size)\n })\n\n return choices_df\n\n\ndef _interaction_sample(\n choosers, alternatives, spec, sample_size, alt_col_name,\n skims=None, locals_d=None,\n trace_label=None):\n \"\"\"\n Run a MNL simulation in the situation in which alternatives must\n be merged with choosers because there are interaction terms or\n because alternatives are being sampled.\n\n Parameters are same as for public function interaction_simulate\n\n spec : dataframe\n one row per spec expression and one col with utility coefficient\n\n interaction_df : dataframe\n cross join (cartesian product) of choosers with alternatives\n combines columns of choosers and alternatives\n len(df) == len(choosers) * len(alternatives)\n index values (non-unique) are index values from alternatives df\n\n interaction_utilities : dataframe\n the utility of each alternative is sum of the partial\n utilities determined by the various spec expressions and\n their corresponding coefficients yielding a dataframe\n with len(interaction_df) rows and one utility column\n having the same index as interaction_df (non-unique values\n from alternatives df)\n\n utilities : dataframe\n dot product of model_design.dot(spec)\n yields utility value for element in the cross product of\n choosers and alternatives this is then reshaped as a dataframe\n with one row per chooser and one column per alternative\n\n probs : dataframe\n utilities exponentiated and converted to probabilities\n same shape as utilities, one row per chooser and one column\n per alternative\n\n positions : series\n choices among alternatives with the chosen alternative represented\n as the integer index of the selected alternative column in probs\n\n choices : series\n series with the alternative chosen for each chooser\n the index is same as choosers\n and the series value is the alternative df index of chosen alternative\n\n Returns\n -------\n choices_df : pandas.DataFrame\n\n A DataFrame where index should match the index of the choosers\n DataFrame and columns alt_col_name, prob, rand, pick_count\n\n prob: float\n the probability of the chosen alternative\n rand: float\n the rand that did the choosing\n pick_count : int\n number of duplicate picks for chooser, alt\n \"\"\"\n\n trace_label = tracing.extend_trace_label(\n trace_label, 'interaction_simulate')\n have_trace_targets = trace_label and tracing.has_trace_targets(choosers)\n\n if alt_col_name is None:\n alt_col_name = 'alt_%s' % alternatives.index.name\n\n if have_trace_targets:\n tracing.trace_df(choosers, tracing.extend_trace_label(\n trace_label, 'choosers'))\n tracing.trace_df(alternatives, tracing.extend_trace_label(\n trace_label, 'alternatives'),\n slicer='NONE', transpose=False)\n\n if len(spec.columns) > 1:\n raise RuntimeError('spec must have only one column')\n\n alternative_count = len(alternatives)\n # print(\"_interaction_sample alternative_count %s\" % alternative_count)\n\n # if using skims, copy index into the dataframe, so it will be\n # available as the \"destination\" for the skims dereference below\n if skims:\n alternatives[alternatives.index.name] = alternatives.index\n\n # cross join choosers and alternatives (cartesian product)\n # for every chooser, there will be a row for each alternative\n # index values (non-unique) are from alternatives df\n interaction_df = logit.interaction_dataset(\n choosers, alternatives, alternative_count)\n\n assert alternative_count == len(interaction_df.index) / len(choosers.index)\n\n if skims:\n asim_utils.add_skims(interaction_df, skims)\n\n # evaluate expressions from the spec multiply by coefficients and sum\n # spec is df with one row per spec expression and one col\n # with utility coefficient column names of interaction_df match spec\n # index values utilities has utility value for element in the\n # cross product of choosers and alternatives interaction_utilities is\n # a df with one utility column and one row per row in interaction_df\n if have_trace_targets:\n trace_rows, trace_ids \\\n = tracing.interaction_trace_rows(\n interaction_df, choosers, alternative_count)\n\n tracing.trace_df(\n interaction_df[trace_rows], tracing.extend_trace_label(\n trace_label, 'interaction_df'), slicer='NONE', transpose=False)\n else:\n trace_rows = trace_ids = None\n\n interaction_utilities, trace_eval_results \\\n = eval_interaction_utilities(\n spec, interaction_df, locals_d, trace_label, trace_rows)\n\n if have_trace_targets:\n tracing.trace_interaction_eval_results(\n trace_eval_results, trace_ids, tracing.extend_trace_label(\n trace_label, 'eval'))\n\n tracing.trace_df(\n interaction_utilities[trace_rows], tracing.extend_trace_label(\n trace_label, 'interaction_utilities'),\n slicer='NONE', transpose=False)\n\n tracing.dump_df(\n DUMP, interaction_utilities, trace_label, 'interaction_utilities')\n\n # FIXME - do this in numpy, not pandas?\n # reshape utilities (one utility column and one row per\n # row in interaction_utilities) to a dataframe with one\n # row per chooser and one column per alternative\n utilities = pd.DataFrame(\n interaction_utilities.as_matrix().reshape(\n len(choosers), alternative_count), index=choosers.index)\n\n if have_trace_targets:\n tracing.trace_df(utilities, tracing.extend_trace_label(\n trace_label, 'utilities'),\n column_labels=['alternative', 'utility'])\n\n tracing.dump_df(DUMP, utilities, trace_label, 'utilities')\n\n # FIXME - do this in numpy, not pandas?\n # convert to probabilities (utilities exponentiated\n # and normalized to probs) probs is same shape as utilities,\n # one row per chooser and one column for alternative\n probs = logit.utils_to_probs(\n utilities, trace_label=trace_label, trace_choosers=choosers)\n\n if have_trace_targets:\n tracing.trace_df(probs, tracing.extend_trace_label(\n trace_label, 'probs'),\n column_labels=['alternative', 'probability'])\n\n choices_df = make_sample_choices(\n choosers, probs, interaction_utilities,\n sample_size, alternative_count, alt_col_name, trace_label)\n\n # make_sample_choices should return choosers index as choices_df column\n assert choosers.index.name in choices_df.columns\n\n # pick_count and pick_dup\n # pick_count is number of duplicate picks\n # pick_dup flag is True for all but first of duplicates\n pick_group = choices_df.groupby([choosers.index.name, alt_col_name])\n\n # number each item in each group from 0 to the length of that group - 1.\n choices_df['pick_count'] = pick_group.cumcount(ascending=True)\n # flag duplicate rows after first\n choices_df['pick_dup'] = choices_df['pick_count'] > 0\n # add reverse cumcount to get total pick_count\n # (conveniently faster than groupby.count + merge)\n choices_df['pick_count'] += pick_group.cumcount(ascending=False) + 1\n\n # drop the duplicates\n choices_df = choices_df[~choices_df['pick_dup']]\n del choices_df['pick_dup']\n\n # set index after groupby so we can trace on it\n choices_df.set_index(choosers.index.name, inplace=True)\n\n tracing.dump_df(DUMP, choices_df, trace_label, 'choices_df')\n\n if have_trace_targets:\n tracing.trace_df(\n choices_df, tracing.extend_trace_label(\n trace_label, 'sampled_alternatives'),\n transpose=False, column_labels=['sample_alt', 'alternative'])\n\n return choices_df\n\n\ndef interaction_sample(\n choosers, alternatives, spec, sample_size,\n alt_col_name=None,\n skims=None, locals_d=None, chunk_size=0,\n trace_label=None):\n\n \"\"\"\n Run a simulation in the situation in which alternatives must\n be merged with choosers because there are interaction terms or\n because alternatives are being sampled.\n\n optionally (if chunk_size > 0) iterates over choosers in chunk_size chunks\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n DataFrame of choosers\n alternatives : pandas.DataFrame\n DataFrame of alternatives - will be merged with choosers and sampled\n spec : pandas.DataFrame\n A Pandas DataFrame that gives the specification of the variables to\n compute and the coefficients for each variable.\n Variable specifications must be in the table index and the\n table should have only one column of coefficients.\n sample_size : int, optional\n Sample alternatives with sample of given size. By default is None,\n which does not sample alternatives.\n alt_col_name: str or None\n name to give the sampled_alternative column\n skims : Skims object\n The skims object is used to contain multiple matrices of\n origin-destination impedances. Make sure to also add it to the\n locals_d below in order to access it in expressions. The *only* job\n of this method in regards to skims is to call set_df with the\n dataframe that comes back from interacting choosers with\n alternatives. See the skims module for more documentation on how\n the skims object is intended to be used.\n locals_d : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n chunk_size : int\n if chunk_size > 0 iterates over choosers in chunk_size chunks\n trace_label: str\n This is the label to be used for trace log file\n entries and dump file names when household tracing enabled.\n No tracing occurs if label is empty or None.\n\n\n Returns\n -------\n ret : pandas.Series\n A series where index should match the index of the choosers DataFrame\n and values will match the index of the alternatives DataFrame -\n choices are simulated in the standard Monte Carlo fashion\n \"\"\"\n\n assert sample_size > 0\n sample_size = min(sample_size, len(alternatives.index))\n\n rows_per_chunk = asim_utils.num_chunk_rows_for_chunk_size(\n chunk_size, choosers, alternatives)\n\n print(\n \"interaction_simulate chunk_size %s num_choosers %s\" %\n (chunk_size, len(choosers.index)))\n\n result_list = []\n for i, num_chunks, chooser_chunk in asim_utils.chunked_choosers(\n choosers, rows_per_chunk):\n\n print(\"Running chunk %s of %s size %d\" % (\n i, num_chunks, len(chooser_chunk)))\n\n choices = _interaction_sample(\n chooser_chunk, alternatives, spec, sample_size, alt_col_name,\n skims, locals_d, tracing.extend_trace_label(\n trace_label, 'chunk_%s' % i))\n\n result_list.append(choices)\n\n # FIXME: this will require 2X RAM\n # if necessary, could append to hdf5 store on disk:\n # http://pandas.pydata.org/pandas-docs/stable/io.html#id2\n if len(result_list) > 1:\n choices = pd.concat(result_list)\n\n assert len(choosers.index) == len(np.unique(choices.index.values))\n\n return choices\n", "id": "2516448", "language": "Python", "matching_score": 5.976869583129883, "max_stars_count": 0, "path": "bayarea_urbansim/baus/interaction_sample.py" }, { "content": "import os\nimport psutil\nimport gc\nimport numpy as np\nimport pandas as pd\nimport logit\nimport tracing\nfrom asim_simulate import add_skims, \\\n chunked_choosers_and_alts, num_chunk_rows_for_chunk_size\nfrom interaction_simulate import eval_interaction_utilities\n\n\nDUMP = False\n\n\ndef _interaction_sample_simulate(\n choosers, alternatives, spec, choice_column,\n skims, locals_d,\n trace_label=None, trace_choice_name=None):\n \"\"\"\n Run a MNL simulation in the situation in which alternatives must\n be merged with choosers because there are interaction terms or\n because alternatives are being sampled.\n\n Parameters are same as for public function interaction_simulate\n\n spec : dataframe\n one row per spec expression and one col with utility coefficient\n\n interaction_df : dataframe\n cross join (cartesian product) of choosers with alternatives\n combines columns of choosers and alternatives\n len(df) == len(choosers) * len(alternatives)\n index values (non-unique) are index values from alternatives df\n\n interaction_utilities : dataframe\n the utility of each alternative is sum of the partial utilities\n determined by the various spec expressions and their corresponding\n coefficients yielding a dataframe with len(interaction_df)\n rows and one utility column having the same index as\n interaction_df (non-unique values from alternatives df)\n\n utilities : dataframe\n dot product of model_design.dot(spec)\n yields utility value for element in the cross product of choosers\n and alternatives this is then reshaped as a dataframe with one\n row per chooser and one column per alternative\n\n probs : dataframe\n utilities exponentiated and converted to probabilities\n same shape as utilities, one row per chooser and one column\n for alternative\n\n positions : series\n choices among alternatives with the chosen alternative represented\n as the integer index of the selected alternative column in probs\n\n choices : series\n series with the alternative chosen for each chooser\n the index is same as choosers\n and the series value is the alternative df index of chosen alternative\n\n Returns\n -------\n ret : pandas.Series\n A series where index should match the index of the choosers DataFrame\n and values will match the index of the alternatives DataFrame -\n choices are simulated in the standard Monte Carlo fashion\n \"\"\"\n\n assert len(choosers.index) == len(np.unique(alternatives.index.values))\n\n trace_label = tracing.extend_trace_label(\n trace_label, 'interaction_simulate')\n\n have_trace_targets = trace_label and tracing.has_trace_targets(choosers)\n\n if have_trace_targets:\n tracing.trace_df(\n choosers, tracing.extend_trace_label(trace_label, 'choosers'))\n tracing.trace_df(alternatives, tracing.extend_trace_label(\n trace_label, 'alternatives'), slicer='NONE', transpose=False)\n\n if len(spec.columns) > 1:\n raise RuntimeError('spec must have only one column')\n\n # if using skims, copy index into the dataframe, so it will be\n # available as the \"destination\" for the skims dereference below\n if skims:\n alternatives[alternatives.index.name] = alternatives.index\n\n # in vanilla interaction_simulate interaction_df is cross join of\n # choosers and alternatives\n # interaction_df = logit.interaction_dataset(choosers,\n # alternatives, sample_size) here, alternatives is sparsely\n # repeated once for each (non-dup) sample we expect alternatives\n # to have same index of choosers (but with duplicate index values)\n # so we just need to left join alternatives with choosers\n assert alternatives.index.name == choosers.index.name\n\n interaction_df = pd.merge(\n alternatives, choosers,\n left_index=True, right_index=True,\n suffixes=('', '_r'))\n\n tracing.dump_df(DUMP, interaction_df, trace_label, 'interaction_df')\n\n if skims:\n add_skims(interaction_df, skims)\n\n # evaluate expressions from the spec multiply by coefficients and sum\n # spec is df with one row per spec expression and one col with utility\n # coefficient column names of choosers match spec index values\n # utilities has utility value for element in the cross product of\n # choosers and alternatives interaction_utilities is a df with one\n # utility column and one row per row in alternative\n if have_trace_targets:\n trace_rows, trace_ids = tracing.interaction_trace_rows(\n interaction_df, choosers)\n\n tracing.trace_df(interaction_df, tracing.extend_trace_label(\n trace_label, 'interaction_df'), transpose=False)\n else:\n trace_rows = trace_ids = None\n\n interaction_utilities, trace_eval_results \\\n = eval_interaction_utilities(\n spec, interaction_df, locals_d, trace_label, trace_rows)\n\n tracing.dump_df(\n DUMP, interaction_utilities, trace_label, 'interaction_utilities')\n\n if have_trace_targets:\n tracing.trace_interaction_eval_results(\n trace_eval_results, trace_ids,\n tracing.extend_trace_label(trace_label, 'eval'))\n\n tracing.trace_df(\n interaction_utilities, tracing.extend_trace_label(\n trace_label, 'interaction_utilities'), transpose=False)\n\n # reshape utilities (one utility column and one row per row\n # in model_design) to a dataframe with one row per chooser and\n # one column per alternative interaction_utilities is sparse because\n # duplicate sampled alternatives were dropped so we need to pad with\n # dummy utilities so low that they are never chosen\n\n # number of samples per chooser\n sample_counts = interaction_utilities.groupby(\n interaction_utilities.index).size().values\n\n # max number of alternatvies for any chooser\n max_sample_count = sample_counts.max()\n\n # offset of the last row of each chooser in sparse interaction_utilities\n row_offsets = np.insert(sample_counts.cumsum(), 0, 0)\n\n # repeat the row offsets once for each dummy utility to insert\n # (we want to insert dummy utilities at the END of the list of\n # alternative utilities)\n inserts = np.repeat(\n row_offsets[1:], max_sample_count - sample_counts)\n\n # insert the zero-prob utilities to pad each alternative set to same size\n padded_utilities = np.insert(\n interaction_utilities.utility.values, inserts, -999)\n\n # reshape to array with one row per chooser, on column per alternative\n padded_utilities = padded_utilities.reshape(-1, max_sample_count)\n\n # convert to a dataframe with one row per chooser and one column\n # per alternative\n utilities_df = pd.DataFrame(\n padded_utilities,\n index=choosers.index)\n\n # print \"\\nsample_counts\\n\", sample_counts\n # print \"\\nmax_sample_count\\n\", max_sample_count\n # print \"\\nlast_row_offsets\\n\", last_row_offsets\n # print \"\\ninserts\\n\", inserts\n # print \"\\nsparse utilities\\n\", interaction_utilities.utility.values\n # print \"\\npadded_utilities\\n\", padded_utilities\n # print \"\\nreshaped padded_utilities\\n\", padded_utilities\n\n tracing.dump_df(DUMP, utilities_df, trace_label, 'utilities_df')\n\n if have_trace_targets:\n tracing.trace_df(utilities_df, tracing.extend_trace_label(\n trace_label, 'utilities'),\n column_labels=['alternative', 'utility'])\n\n # convert to probabilities (utilities exponentiated and normalized\n # to probs) probs is same shape as utilities, one row per chooser\n # and one column for alternative\n probs = logit.utils_to_probs(\n utilities_df, trace_label=trace_label, trace_choosers=choosers)\n\n if have_trace_targets:\n tracing.trace_df(probs, tracing.extend_trace_label(\n trace_label, 'probs'),\n column_labels=['alternative', 'probability'])\n\n tracing.dump_df(DUMP, probs, trace_label, 'probs')\n\n # make choices\n # positions is series with the chosen alternative represented as a\n # column index in probs which is an integer between zero and num\n # alternatives in the alternative sample\n positions, rands = logit.make_choices(\n probs, trace_label=trace_label, trace_choosers=choosers)\n\n # shouldn't have chosen any of the dummy pad utilities\n assert positions.max() < max_sample_count\n\n # need to get from an integer offset into the alternative sample\n # to the alternative index that is, we want the index value of the\n # row that is offset by <position> rows into the tranche of this\n # choosers alternatives created by cross join of alternatives and choosers\n\n # first_row_offsets is the offset into interaction_df df of\n # first row of chooser alternatives\n first_row_offsets = row_offsets[:-1]\n\n # resulting pandas Int64Index has one element per chooser row and\n # is in same order as choosers\n choices = interaction_df[choice_column].take(positions + first_row_offsets)\n\n # create a series with index from choosers and the index of the\n # chosen alternative\n choices = pd.Series(choices, index=choosers.index)\n\n tracing.dump_df(DUMP, choices, trace_label, 'choices')\n\n if have_trace_targets:\n tracing.trace_df(choices, tracing.extend_trace_label(\n trace_label, 'choices'), columns=[None, trace_choice_name])\n tracing.trace_df(rands, tracing.extend_trace_label(\n trace_label, 'rands'), columns=[None, 'rand'])\n\n return choices\n\n\ndef interaction_sample_simulate(\n choosers, alternatives, spec, choice_column=None,\n skims=None, locals_d=None, chunk_size=0,\n trace_label=None, trace_choice_name=None):\n\n \"\"\"\n Run a simulation in the situation in which alternatives must\n be merged with choosers because there are interaction terms or\n because alternatives are being sampled.\n\n optionally (if chunk_size > 0) iterates over choosers in chunk_size chunks\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n DataFrame of choosers\n alternatives : pandas.DataFrame\n DataFrame of alternatives - will be merged with choosers\n spec : pandas.DataFrame\n A Pandas DataFrame that gives the specification of the variables to\n compute and the coefficients for each variable.\n Variable specifications must be in the table index and the\n table should have only one column of coefficients.\n skims : Skims object\n The skims object is used to contain multiple matrices of\n origin-destination impedances. Make sure to also add it to the\n locals_d below in order to access it in expressions. The *only* job\n of this method in regards to skims is to call set_df with the\n dataframe that comes back from interacting choosers with\n alternatives. See the skims module for more documentation on how\n the skims object is intended to be used.\n locals_d : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n chunk_size : int\n if chunk_size > 0 iterates over choosers in chunk_size chunks\n trace_label: str\n This is the label to be used for trace log file entries and\n dump file names when household tracing enabled. No tracing\n occurs if label is empty or None.\n trace_choice_name: str\n This is the column label to be used in trace file csv dump of choices\n\n Returns\n -------\n choices : pandas.Series\n A series where index should match the index of the choosers DataFrame\n and values will match the index of the alternatives DataFrame -\n choices are simulated in the standard Monte Carlo fashion\n \"\"\"\n\n rows_per_chunk = num_chunk_rows_for_chunk_size(\n chunk_size, choosers, alternatives)\n\n print(\"interaction_simulate chunk_size %s num_choosers %s\" % (\n chunk_size, len(choosers.index)))\n\n result_list = []\n for i, num_chunks, chooser_chunk, alternative_chunk \\\n in chunked_choosers_and_alts(\n choosers, alternatives, rows_per_chunk):\n\n print(\"Running chunk %s of %s size %d\" % (\n i, num_chunks, len(chooser_chunk)))\n\n choices = _interaction_sample_simulate(\n chooser_chunk, alternative_chunk, spec, choice_column,\n skims, locals_d,\n tracing.extend_trace_label(\n trace_label, 'chunk_%s' % i), trace_choice_name)\n\n result_list.append(choices)\n\n # FIXME: this will require 2X RAM\n # if necessary, could append to hdf5 store on disk:\n # http://pandas.pydata.org/pandas-docs/stable/io.html#id2\n if len(result_list) > 1:\n choices = pd.concat(result_list)\n\n assert len(choices.index == len(choosers.index))\n\n return choices\n", "id": "5233096", "language": "Python", "matching_score": 6.901908874511719, "max_stars_count": 0, "path": "bayarea_urbansim/baus/interaction_sample_simulate.py" }, { "content": "import os\nimport psutil\nimport gc\nimport numpy as np\nimport pandas as pd\nimport logit\nimport tracing\nimport asim_utils\n\nDUMP = False\n\n\ndef eval_interaction_utilities(spec, df, locals_d, trace_label, trace_rows):\n \"\"\"\n Compute the utilities for a single-alternative spec evaluated in\n the context of df\n\n We could compute the utilities for interaction datasets just as we\n do for simple_simulate specs with multiple alternative columns by\n calling eval_variables and then computing the utilities by\n matrix-multiplication of eval results with the utility coefficients in the\n spec alternative columns.\n\n But interaction simulate computes the utilities of each alternative in\n the context of a separate row in interaction dataset df, and so there\n is only one alternative in spec. This turns out to be quite a bit faster\n (in this special case) than the pandas dot function.\n\n For efficiency, we combine eval_variables and multiplication of\n coefficients into a single step, so we don't have to create a separate\n column for each partial utility. Instead, we simply multiply the eval\n result by a single alternative coefficient and sum the partial utilities.\n\n\n spec : dataframe\n one row per spec expression and one col with utility coefficient\n\n df : dataframe\n cross join (cartesian product) of choosers with alternatives\n combines columns of choosers and alternatives\n len(df) == len(choosers) * len(alternatives)\n index values (non-unique) are index values from alternatives df\n\n interaction_utilities : dataframe\n the utility of each alternative is sum of the partial utilities\n determined by the various spec expressions and their\n corresponding coefficients yielding a dataframe with\n len(interaction_df) rows and one utility column having the same\n index as interaction_df (non-unique values from alternatives df)\n\n Returns\n -------\n utilities : pandas.DataFrame\n Will have the index of `df` and a single column of utilities\n\n \"\"\"\n assert(len(spec.columns) == 1)\n\n # avoid altering caller's passed-in locals_d parameter\n # (they may be looping)\n locals_d = locals_d.copy() if locals_d is not None else {}\n locals_d.update(locals())\n\n def to_series(x):\n if np.isscalar(x):\n return pd.Series([x] * len(df), index=df.index)\n return x\n\n if trace_rows is not None and trace_rows.any():\n # # convert to numpy array so we can slice ndarrays as well as series\n # trace_rows = np.asanyarray(trace_rows)\n assert type(trace_rows) == np.ndarray\n trace_eval_results = []\n else:\n trace_eval_results = None\n\n check_for_variability = tracing.check_for_variability()\n\n # need to be able to identify which variables causes an error, which keeps\n # this from being expressed more parsimoniously\n\n utilities = pd.DataFrame({'utility': 0.0}, index=df.index)\n no_variability = has_missing_vals = 0\n\n for expr, coefficient in zip(spec.index, spec.iloc[:, 0]):\n try:\n\n if expr.startswith('@'):\n v = to_series(eval(expr[1:], globals(), locals_d))\n else:\n v = df.eval(expr)\n\n if check_for_variability and v.std() == 0:\n print(\"%s: no variability (%s) in: %s\" % (\n trace_label, v.iloc[0], expr))\n no_variability += 1\n\n # FIXME - how likely is this to happen?\n # Not sure it is really a problem?\n if check_for_variability and np.count_nonzero(\n v.isnull().values) > 0:\n print(\"%s: missing values in: %s\" % (trace_label, expr))\n has_missing_vals += 1\n\n utilities.utility += (v * coefficient).astype('float')\n\n if trace_eval_results is not None:\n trace_eval_results.append((expr,\n v[trace_rows]))\n trace_eval_results.append((\n 'partial utility (coefficient = %s)' % coefficient,\n v[trace_rows] * coefficient))\n\n except Exception as err:\n print(\"Variable evaluation failed for: %s\" % str(expr))\n raise err\n\n if no_variability > 0:\n print(\"%s: %s columns have no variability\" % (\n trace_label, no_variability))\n\n if has_missing_vals > 0:\n print(\"%s: %s columns have missing values\" % (\n trace_label, has_missing_vals))\n\n if trace_eval_results is not None:\n\n trace_eval_results.append(('total utility',\n utilities.utility[trace_rows]))\n\n trace_eval_results = pd.DataFrame.from_items(trace_eval_results)\n trace_eval_results.index = df[trace_rows].index\n\n # add df columns to trace_results\n trace_eval_results = pd.concat(\n [df[trace_rows], trace_eval_results], axis=1)\n\n return utilities, trace_eval_results\n\n\ndef _interaction_simulate(\n choosers, alternatives, spec,\n skims=None, locals_d=None, sample_size=None,\n trace_label=None, trace_choice_name=None):\n \"\"\"\n Run a MNL simulation in the situation in which alternatives must\n be merged with choosers because there are interaction terms or\n because alternatives are being sampled.\n\n Parameters are same as for public function interaction_simulate\n\n spec : dataframe\n one row per spec expression and one col with utility coefficient\n\n interaction_df : dataframe\n cross join (cartesian product) of choosers with alternatives\n combines columns of choosers and alternatives\n len(df) == len(choosers) * len(alternatives)\n index values (non-unique) are index values from alternatives df\n\n interaction_utilities : dataframe\n the utility of each alternative is sum of the partial utilities\n determined by the various spec expressions and their\n corresponding coefficients yielding a dataframe with\n len(interaction_df) rows and one utility column having the same\n index as interaction_df (non-unique values from alternatives df)\n\n utilities : dataframe\n dot product of model_design.dot(spec)\n yields utility value for element in the cross product of choosers\n and alternatives this is then reshaped as a dataframe with one\n row per chooser and one column per alternative\n\n probs : dataframe\n utilities exponentiated and converted to probabilities\n same shape as utilities, one row per chooser and one column\n for alternative\n\n positions : series\n choices among alternatives with the chosen alternative represented\n as the integer index of the selected alternative column in probs\n\n choices : series\n series with the alternative chosen for each chooser\n the index is same as choosers\n and the series value is the alternative df index of chosen alternative\n\n Returns\n -------\n ret : pandas.Series\n A series where index should match the index of the choosers DataFrame\n and values will match the index of the alternatives DataFrame -\n choices are simulated in the standard Monte Carlo fashion\n \"\"\"\n\n trace_label = tracing.extend_trace_label(\n trace_label, 'interaction_simulate')\n have_trace_targets = trace_label and tracing.has_trace_targets(choosers)\n\n if have_trace_targets:\n tracing.trace_df(choosers, tracing.extend_trace_label(\n trace_label, 'choosers'))\n tracing.trace_df(alternatives, tracing.extend_trace_label(\n trace_label, 'alternatives'), slicer='NONE', transpose=False)\n\n if len(spec.columns) > 1:\n raise RuntimeError('spec must have only one column')\n\n sample_size = sample_size or len(alternatives)\n\n if sample_size > len(alternatives):\n print(\"clipping sample size %s to len(alternatives) %s\" % (\n sample_size, len(alternatives)))\n sample_size = min(sample_size, len(alternatives))\n\n # if using skims, copy index into the dataframe, so it will be\n # available as the \"destination\" for the skims dereference below\n if skims:\n alternatives[alternatives.index.name] = alternatives.index\n\n # cross join choosers and alternatives (cartesian product)\n # for every chooser, there will be a row for each alternative\n # index values (non-unique) are from alternatives df\n interaction_df = logit.interaction_dataset(\n choosers, alternatives, sample_size)\n\n if skims:\n asim_utils.add_skims(interaction_df, skims)\n\n # evaluate expressions from the spec multiply by coefficients and sum\n # spec is df with one row per spec expression and one col with\n # utility coefficient column names of model_design match spec index values\n # utilities has utility value for element in the cross product of\n # choosers and alternatives interaction_utilities is a df with one\n # utility column and one row per row in model_design\n if have_trace_targets:\n trace_rows, trace_ids \\\n = tracing.interaction_trace_rows(\n interaction_df, choosers, sample_size)\n\n tracing.trace_df(\n interaction_df[trace_rows], tracing.extend_trace_label(\n trace_label, 'interaction_df'), slicer='NONE', transpose=False)\n else:\n trace_rows = trace_ids = None\n\n interaction_utilities, trace_eval_results \\\n = eval_interaction_utilities(\n spec, interaction_df, locals_d, trace_label, trace_rows)\n\n if have_trace_targets:\n tracing.trace_interaction_eval_results(\n trace_eval_results, trace_ids, tracing.extend_trace_label(\n trace_label, 'eval'))\n\n tracing.trace_df(\n interaction_utilities[trace_rows], tracing.extend_trace_label(\n trace_label, 'interaction_utilities'), slicer='NONE',\n transpose=False)\n\n # reshape utilities (one utility column and one row per\n # row in model_design) to a dataframe with one row per chooser and\n # one column per alternative\n utilities = pd.DataFrame(\n interaction_utilities.as_matrix().reshape(len(choosers), sample_size),\n index=choosers.index)\n\n if have_trace_targets:\n tracing.trace_df(\n utilities, tracing.extend_trace_label(trace_label, 'utilities'),\n column_labels=['alternative', 'utility'])\n\n tracing.dump_df(DUMP, utilities, trace_label, 'utilities')\n\n # convert to probabilities (utilities exponentiated and\n # normalized to probs) probs is same shape as utilities,\n # one row per chooser and one column for alternative\n probs = logit.utils_to_probs(\n utilities, trace_label=trace_label, trace_choosers=choosers)\n\n if have_trace_targets:\n tracing.trace_df(\n probs, tracing.extend_trace_label(trace_label, 'probs'),\n column_labels=['alternative', 'probability'])\n\n # make choices\n # positions is series with the chosen alternative represented\n # as a column index in probs which is an integer between zero and\n # num alternatives in the alternative sample\n positions, rands = logit.make_choices(\n probs, trace_label=trace_label, trace_choosers=choosers)\n\n # need to get from an integer offset into the alternative sample\n # to the alternative index that is, we want the index value of the\n # row that is offset by <position> rows into the tranche of this\n # choosers alternatives created by cross join of alternatives and choosers\n\n # offsets is the offset into model_design df of first row of\n # chooser alternatives\n offsets = np.arange(len(positions)) * sample_size\n # resulting pandas Int64Index has one element per chooser row and is\n # in same order as choosers\n choices = interaction_utilities.index.take(positions + offsets)\n\n # create a series with index from choosers and the\n # index of the chosen alternative\n choices = pd.Series(choices, index=choosers.index)\n\n if have_trace_targets:\n tracing.trace_df(\n choices, tracing.extend_trace_label(trace_label, 'choices'),\n columns=[None, trace_choice_name])\n tracing.trace_df(\n rands, tracing.extend_trace_label(trace_label, 'rands'),\n columns=[None, 'rand'])\n\n return choices\n\n\ndef interaction_simulate(\n choosers, alternatives, spec,\n skims=None, locals_d=None, sample_size=None, chunk_size=0,\n trace_label=None, trace_choice_name=None):\n\n \"\"\"\n Run a simulation in the situation in which alternatives must\n be merged with choosers because there are interaction terms or\n because alternatives are being sampled.\n\n optionally (if chunk_size > 0) iterates over choosers in chunk_size chunks\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n DataFrame of choosers\n alternatives : pandas.DataFrame\n DataFrame of alternatives - will be merged with choosers, currently\n without sampling\n spec : pandas.DataFrame\n A Pandas DataFrame that gives the specification of the variables to\n compute and the coefficients for each variable.\n Variable specifications must be in the table index and the\n table should have only one column of coefficients.\n skims : Skims object\n The skims object is used to contain multiple matrices of\n origin-destination impedances. Make sure to also add it to the\n locals_d below in order to access it in expressions. The *only* job\n of this method in regards to skims is to call set_df with the\n dataframe that comes back from interacting choosers with\n alternatives. See the skims module for more documentation on how\n the skims object is intended to be used.\n locals_d : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n sample_size : int, optional\n Sample alternatives with sample of given size. By default is None,\n which does not sample alternatives.\n chunk_size : int\n if chunk_size > 0 iterates over choosers in chunk_size chunks\n trace_label: str\n This is the label to be used for trace log file\n entries and dump file names when household tracing enabled.\n No tracing occurs if label is empty or None.\n trace_choice_name: str\n This is the column label to be used in trace file csv dump of choices\n\n Returns\n -------\n choices : pandas.Series\n A series where index should match the index of the choosers DataFrame\n and values will match the index of the alternatives DataFrame -\n choices are simulated in the standard Monte Carlo fashion\n \"\"\"\n\n assert len(choosers) > 0\n\n rows_per_chunk = asim_utils.num_chunk_rows_for_chunk_size(\n chunk_size, choosers, alternatives)\n\n print(\"interaction_simulate chunk_size %s num_choosers %s\" % (\n chunk_size, len(choosers.index)))\n\n result_list = []\n for i, num_chunks, chooser_chunk in asim_utils.chunked_choosers(\n choosers, rows_per_chunk):\n\n print(\"Running chunk %s of %s size %d\" % (\n i, num_chunks, len(chooser_chunk)))\n\n choices = _interaction_simulate(\n chooser_chunk, alternatives, spec, skims, locals_d, sample_size,\n tracing.extend_trace_label(trace_label, 'chunk_%s' % i),\n trace_choice_name)\n\n result_list.append(choices)\n\n # FIXME: this will require 2X RAM\n # if necessary, could append to hdf5 store on disk:\n # http://pandas.pydata.org/pandas-docs/stable/io.html#id2\n if len(result_list) > 1:\n choices = pd.concat(result_list)\n\n assert len(choices.index == len(choosers.index))\n\n return choices\n", "id": "9071568", "language": "Python", "matching_score": 6.416699409484863, "max_stars_count": 0, "path": "bayarea_urbansim/baus/interaction_simulate.py" }, { "content": "from math import ceil\nimport os\nimport numpy as np\nimport pandas as pd\nfrom skim import SkimDictWrapper, SkimStackWrapper\nimport logit\nimport tracing\nimport asim_utils\n\n\ndef num_chunk_rows_for_chunk_size(chunk_size, choosers, alternatives=None,\n by_chunk_id=False):\n\n # FIXME - chunk size should take number of chooser and alternative\n # columns into account\n # FIXME - that is, chunk size should represent memory footprint\n # (rows X columns) not just rows\n\n if by_chunk_id:\n num_choosers = choosers['chunk_id'].max() + 1\n else:\n num_choosers = len(choosers.index)\n\n # if not chunking, then return num_choosers\n if chunk_size == 0:\n return num_choosers\n\n row_size = len(choosers.columns)\n\n if alternatives is not None:\n alt_row_size = len(alternatives.columns)\n row_size = row_size * alt_row_size\n\n if by_chunk_id:\n # scale row_size by average number of chooser rows per chunk_id\n rows_per_chunk_id = len(choosers.index) / float(num_choosers)\n row_size = int(rows_per_chunk_id * row_size)\n\n # closest number of chooser rows to achieve chunk_size\n rows_per_chunk = int(round(chunk_size / float(row_size)))\n rows_per_chunk = max(rows_per_chunk, 1)\n\n print(\n \"num_chunk_rows_for_chunk_size %s row_size %s rows_per_chunk %s \"\n \"num_choosers %s chunks %s\" % (\n chunk_size, row_size, rows_per_chunk, num_choosers,\n int(ceil(num_choosers / float(rows_per_chunk)))))\n\n return rows_per_chunk\n\n\ndef chunked_choosers(choosers, rows_per_chunk):\n # generator to iterate over chooses in chunk_size chunks\n num_choosers = len(choosers.index)\n num_chunks = (num_choosers // rows_per_chunk) + (\n num_choosers % rows_per_chunk > 0)\n\n i = offset = 0\n while offset < num_choosers:\n yield i + 1, num_chunks, choosers[offset: offset + rows_per_chunk]\n offset += rows_per_chunk\n i += 1\n\n\ndef chunked_choosers_and_alts(choosers, alternatives, rows_per_chunk):\n \"\"\"\n like chunked_choosers, but also chunks alternatives\n for use with sampled alternatives which will have\n different alternatives (and numbers of alts)\n\n There may be up to sample_size (or as few as one) alternatives\n for each chooser because alternatives may have been sampled more\n than once, but pick_count for those alternatives will always sum\n to sample_size.\n\n When we chunk the choosers, we need to take care chunking the\n alternatives as there are varying numbers of them for each chooser.\n Since alternatives appear in the same order as choosers, we can use\n cumulative pick_counts to identify boundaries of sets of alternatives\n\n Parameters\n ----------\n choosers\n alternatives : pandas DataFrame\n sample alternatives including pick_count column in same\n order as choosers\n rows_per_chunk : int\n\n Yields\n -------\n i : int\n one-based index of current chunk\n num_chunks : int\n total number of chunks that will be yielded\n choosers : pandas DataFrame slice\n chunk of choosers\n alternatives : pandas DataFrame slice\n chunk of alternatives for chooser chunk\n \"\"\"\n\n assert 'cum_pick_count' not in alternatives.columns\n alternatives['cum_pick_count'] = alternatives['pick_count'].cumsum()\n\n # currently no convenient way to remember sample_size across steps\n pick_count = alternatives.cum_pick_count.iat[-1]\n sample_size = pick_count / len(choosers.index)\n assert pick_count % sample_size == 0\n\n # generator to iterate over choosers and alternatives in chunk_size chunks\n num_choosers = len(choosers.index)\n num_chunks = (num_choosers // rows_per_chunk) + \\\n (num_choosers % rows_per_chunk > 0)\n\n alt_chunk_size = rows_per_chunk * sample_size\n\n # array of indices of starts of alt chunks\n alt_chunk_end = np.where(alternatives[\n 'cum_pick_count'] % alt_chunk_size == 0)[0] + 1\n # plus index of end of array for any final partial chunk\n alt_chunk_end = np.append(alt_chunk_end, [len(alternatives.index)])\n\n i = offset = alt_offset = 0\n while offset < num_choosers:\n\n alt_end = alt_chunk_end[i]\n\n chooser_chunk = choosers[offset: offset + rows_per_chunk]\n alternative_chunk = alternatives[alt_offset: alt_end]\n\n assert len(chooser_chunk.index) == len(\n np.unique(alternative_chunk.index.values))\n\n yield i + 1, num_chunks, chooser_chunk, alternative_chunk\n\n i += 1\n offset += rows_per_chunk\n alt_offset = alt_end\n\n\ndef hh_chunked_choosers(choosers, rows_per_chunk):\n # generator to iterate over choosers in chunk_size chunks\n # like chunked_choosers but based on chunk_id field\n # rather than dataframe length\n # the presumption is that choosers has multiple rows with the\n # same chunk_id that all have to be included in the same chunk)\n # FIXME - we pathologically know name of chunk_id col in households table\n\n num_choosers = choosers['chunk_id'].max() + 1\n num_chunks = (num_choosers // rows_per_chunk) + \\\n (num_choosers % rows_per_chunk > 0)\n\n i = offset = 0\n while offset < num_choosers:\n chooser_chunk = choosers[choosers['chunk_id'].between(\n offset, offset + rows_per_chunk - 1)]\n yield i + 1, num_chunks, chooser_chunk\n offset += rows_per_chunk\n i += 1\n\n\ndef random_rows(df, n):\n\n # only sample if df has more than n rows\n if len(df.index) > n:\n prng = asim_utils.get_rn_generator().get_global_rng()\n return df.take(prng.choice(len(df), size=n, replace=False))\n\n else:\n return df\n\n\ndef read_model_spec(fpath, fname,\n description_name=\"Description\",\n expression_name=\"Expression\"):\n \"\"\"\n Read a CSV model specification into a Pandas DataFrame or Series.\n\n The CSV is expected to have columns for component descriptions\n and expressions, plus one or more alternatives.\n\n The CSV is required to have a header with column names. For example:\n\n Description,Expression,alt0,alt1,alt2\n\n Parameters\n ----------\n fpath : str\n path to directory containing file.\n fname : str\n Name of a CSV spec file\n description_name : str, optional\n Name of the column in `fname` that contains the component description.\n expression_name : str, optional\n Name of the column in `fname` that contains the component expression.\n\n Returns\n -------\n spec : pandas.DataFrame\n The description column is dropped from the returned data and the\n expression values are set as the table index.\n \"\"\"\n\n with open(os.path.join(fpath, fname)) as f:\n spec = pd.read_csv(f, comment='#')\n\n spec = spec.dropna(subset=[expression_name])\n\n # don't need description and set the expression to the index\n if description_name in spec.columns:\n spec = spec.drop(description_name, axis=1)\n\n spec = spec.set_index(expression_name).fillna(0)\n\n return spec\n\n\ndef eval_variables(exprs, df, locals_d=None, target_type=np.float64):\n \"\"\"\n Evaluate a set of variable expressions from a spec in the context\n of a given data table.\n\n There are two kinds of supported expressions: \"simple\" expressions are\n evaluated in the context of the DataFrame using DataFrame.eval.\n This is the default type of expression.\n\n Python expressions are evaluated in the context of this function using\n Python's eval function. Because we use Python's eval this type of\n expression supports more complex operations than a simple expression.\n Python expressions are denoted by beginning with the @ character.\n Users should take care that these expressions must result in\n a Pandas Series.\n\n Parameters\n ----------\n exprs : sequence of str\n df : pandas.DataFrame\n locals_d : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n target_type: dtype or None\n type to coerce results or None if no coercion desired\n\n Returns\n -------\n variables : pandas.DataFrame\n Will have the index of `df` and columns of eval results of `exprs`.\n \"\"\"\n\n # avoid altering caller's passed-in locals_d parameter\n # (they may be looping)\n locals_d = locals_d.copy() if locals_d is not None else {}\n locals_d.update(locals())\n\n def to_series(x):\n if np.isscalar(x):\n return pd.Series([x] * len(df), index=df.index)\n return x\n\n lll = []\n # need to be able to identify which variables causes an error, which keeps\n # this from being expressed more parsimoniously\n for expr in exprs:\n try:\n if expr.startswith('@'):\n expr_values = to_series(eval(expr[1:], globals(), locals_d))\n else:\n expr_values = df.eval(expr)\n lll.append((expr, expr_values))\n except Exception as err:\n print(\"Variable evaluation failed for: %s\" % str(expr))\n raise err\n\n values = pd.DataFrame.from_items(lll)\n\n # FIXME - for performance, it is essential that spec and expression_values\n # FIXME - not contain booleans when dotted with spec values\n # FIXME - or the arrays will be converted to dtype=object within dot()\n if target_type is not None:\n values = values.astype(target_type)\n\n return values\n\n\ndef compute_utilities(expression_values, spec):\n\n # matrix product of spec expression_values with utility\n # coefficients of alternatives sums the partial utilities\n # (represented by each spec row) of the alternatives resulting in\n # a dataframe with one row per chooser and one column per alternative\n # pandas.dot depends on column names of expression_values matching\n # spec index values\n\n # FIXME - for performance, it is essential that spec and expression_values\n # FIXME - not contain booleans when dotted with spec values\n # FIXME - or the arrays will be converted to dtype=object within dot()\n\n spec = spec.astype(np.float64)\n\n utilities = expression_values.dot(spec)\n\n return utilities\n\n\ndef add_skims(df, skims):\n \"\"\"\n Add the dataframe to the SkimDictWrapper\n object so that it can be dereferenced\n using the parameters of the skims object.\n\n Parameters\n ----------\n df : pandas.DataFrame\n Table to which to add skim data as new columns.\n `df` is modified in-place.\n skims : SkimDictWrapper object\n The skims object is used to contain multiple matrices of\n origin-destination impedances. Make sure to also add it to the\n locals_d below in order to access it in expressions. The *only* job\n of this method in regards to skims is to call set_df with the\n dataframe that comes back from interacting choosers with\n alternatives. See the skims module for more documentation on how\n the skims object is intended to be used.\n \"\"\"\n if not isinstance(skims, list):\n assert isinstance(skims, SkimDictWrapper) or isinstance(\n skims, SkimStackWrapper)\n skims.set_df(df)\n else:\n for skim in skims:\n assert isinstance(skim, SkimDictWrapper) or isinstance(\n skim, SkimStackWrapper)\n skim.set_df(df)\n\n\ndef _check_for_variability(expression_values, trace_label):\n \"\"\"\n This is an internal method which checks for variability in each\n expression - under the assumption that you probably wouldn't be using a\n variable (in live simulations) if it had no variability. This is a\n warning to the user that they might have constructed the variable\n incorrectly. It samples 1000 rows in order to not hurt performance -\n it's likely that if 1000 rows have no variability, the whole dataframe\n will have no variability.\n \"\"\"\n\n if trace_label is None:\n trace_label = '_check_for_variability'\n\n lll = min(1000, len(expression_values))\n\n sample = random_rows(expression_values, lll)\n\n no_variability = has_missing_vals = 0\n for i in range(len(sample.columns)):\n v = sample.iloc[:, i]\n if v.min() == v.max():\n col_name = sample.columns[i]\n print(\"%s: no variability (%s) in: %s\" % (\n trace_label, v.iloc[0], col_name))\n no_variability += 1\n # FIXME - how could this happen? Not sure it is really a problem?\n if np.count_nonzero(v.isnull().values) > 0:\n col_name = sample.columns[i]\n print(\"%s: missing values in: %s\" % (\n trace_label, v.iloc[0], col_name))\n has_missing_vals += 1\n\n if no_variability > 0:\n print(\"%s: %s columns have no variability\" % (\n trace_label, no_variability))\n\n if has_missing_vals > 0:\n print(\"%s: %s columns have missing values\" % (\n trace_label, has_missing_vals))\n\n\ndef compute_nested_exp_utilities(raw_utilities, nest_spec):\n \"\"\"\n compute exponentiated nest utilities based on nesting coefficients\n\n For nest nodes this is the exponentiated logsum of alternatives\n adjusted by nesting coefficient\n\n leaf <- exp( raw_utility )\n nest <- exp( ln(sum of exponentiated raw_utility of\n leaves) * nest_coefficient)\n\n Parameters\n ----------\n raw_utilities : pandas.DataFrame\n dataframe with the raw alternative utilities of all leaves\n (what in non-nested logit would be the utilities of all the\n alternatives)\n nest_spec : dict\n Nest tree dict from the model spec yaml file\n\n Returns\n -------\n nested_utilities : pandas.DataFrame\n Will have the index of `raw_utilities` and columns for\n exponentiated leaf and node utilities\n \"\"\"\n nested_utilities = pd.DataFrame(index=raw_utilities.index)\n\n for nest in logit.each_nest(nest_spec, post_order=True):\n\n name = nest.name\n\n if nest.is_leaf:\n # leaf_utility = raw_utility / nest.product_of_coefficients\n nested_utilities[name] = raw_utilities[name].astype(\n float) / nest.product_of_coefficients\n\n else:\n # nest node\n # the alternative nested_utilities will already have\n # been computed due to post_order\n # this will RuntimeWarning: divide by zero encountered in log\n # if all nest alternative utilities are zero\n # but the resulting inf will become 0 when exp is applied below\n nested_utilities[name] = \\\n nest.coefficient * np.log(\n nested_utilities[nest.alternatives].sum(axis=1))\n\n # exponentiate the utility\n nested_utilities[name] = np.exp(nested_utilities[name])\n\n return nested_utilities\n\n\ndef compute_nested_probabilities(nested_exp_utilities, nest_spec, trace_label):\n \"\"\"\n compute nested probabilities for nest leafs and nodes\n probability for nest alternatives is simply the alternatives's\n local (to nest) probability\n computed in the same way as the probability of non-nested alternatives\n in multinomial logit\n i.e. the fractional share of the sum of the exponentiated utility of\n itself and its siblings\n except in nested logit, its sib group is restricted to the nest\n\n Parameters\n ----------\n nested_exp_utilities : pandas.DataFrame\n dataframe with the exponentiated nested utilities of all leaves\n and nodes\n nest_spec : dict\n Nest tree dict from the model spec yaml file\n Returns\n -------\n nested_probabilities : pandas.DataFrame\n Will have the index of `nested_exp_utilities` and columns for leaf\n and node probabilities\n \"\"\"\n\n nested_probabilities = pd.DataFrame(index=nested_exp_utilities.index)\n\n for nest in logit.each_nest(nest_spec, type='node', post_order=False):\n\n probs = logit.utils_to_probs(nested_exp_utilities[nest.alternatives],\n trace_label=trace_label,\n exponentiated=True,\n allow_zero_probs=True)\n\n nested_probabilities = pd.concat([nested_probabilities, probs], axis=1)\n\n return nested_probabilities\n\n\ndef compute_base_probabilities(nested_probabilities, nests):\n \"\"\"\n compute base probabilities for nest leaves\n Base probabilities will be the nest-adjusted probabilities of all leaves\n This flattens or normalizes all the nested probabilities so that they\n have the proper global\n relative values (the leaf probabilities sum to 1 for each row.)\n\n Parameters\n ----------\n nested_probabilities : pandas.DataFrame\n dataframe with the nested probabilities for nest leafs and nodes\n nest_spec : dict\n Nest tree dict from the model spec yaml file\n Returns\n -------\n base_probabilities : pandas.DataFrame\n Will have the index of `nested_probabilities` and columns for\n leaf base probabilities\n \"\"\"\n\n base_probabilities = pd.DataFrame(index=nested_probabilities.index)\n\n for nest in logit.each_nest(nests, type='leaf', post_order=False):\n\n # skip root: it has a prob of 1 but we didn't compute a nested\n # probability column for it\n ancestors = nest.ancestors[1:]\n\n base_probabilities[nest.name] = nested_probabilities[\n ancestors].prod(axis=1)\n\n return base_probabilities\n\n\ndef eval_mnl(choosers, spec, locals_d,\n trace_label=None, trace_choice_name=None):\n \"\"\"\n Run a simulation for when the model spec does not involve alternative\n specific data, e.g. there are no interactions with alternative\n properties and no need to sample from alternatives.\n\n Each row in spec computes a partial utility for each alternative,\n by providing a spec expression (often a boolean 0-1 trigger)\n and a column of utility coefficients for each alternative.\n\n We compute the utility of each alternative by matrix-multiplication\n of eval results\n with the utility coefficients in the spec alternative columns\n yielding one row per chooser and one column per alternative\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n spec : pandas.DataFrame\n A table of variable specifications and coefficient values.\n Variable expressions should be in the table index and the table\n should have a column for each alternative.\n locals_d : Dict or None\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n trace_label: str\n This is the label to be used for trace log file entries and\n dump file names\n when household tracing enabled. No tracing occurs if label is\n empty or None.\n trace_choice_name: str\n This is the column label to be used in trace file csv dump of choices\n\n Returns\n -------\n choices : pandas.Series\n Index will be that of `choosers`, values will match the columns\n of `spec`.\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'mnl')\n check_for_variability = tracing.check_for_variability()\n\n t0 = tracing.print_elapsed_time()\n\n expression_values = eval_variables(spec.index, choosers, locals_d)\n t0 = tracing.print_elapsed_time(\"eval_variables\", t0, debug=True)\n\n if check_for_variability:\n _check_for_variability(expression_values, trace_label)\n\n # matrix product of spec expression_values with utility coefficients\n # of alternatives sums the partial utilities (represented by each spec\n # row) of the alternatives resulting in a dataframe with one row per\n # chooser and one column per alternative pandas.dot depends on column\n # names of expression_values matching spec index values\n\n utilities = compute_utilities(expression_values, spec)\n t0 = tracing.print_elapsed_time(\"expression_values.dot\", t0, debug=True)\n\n probs = logit.utils_to_probs(\n utilities, trace_label=trace_label, trace_choosers=choosers)\n t0 = tracing.print_elapsed_time(\"logit.utils_to_probs\", t0, debug=True)\n\n choices, rands = logit.make_choices(\n probs, trace_label=trace_label, trace_choosers=choosers)\n t0 = tracing.print_elapsed_time(\"logit.make_choices\", t0, debug=True)\n\n if trace_label:\n\n tracing.trace_df(choosers, '%s.choosers' % trace_label)\n tracing.trace_df(utilities, '%s.utilities' % trace_label,\n column_labels=['alternative', 'utility'])\n tracing.trace_df(probs, '%s.probs' % trace_label,\n column_labels=['alternative', 'probability'])\n tracing.trace_df(choices, '%s.choices' % trace_label,\n columns=[None, trace_choice_name])\n tracing.trace_df(rands, '%s.rands' % trace_label,\n columns=[None, 'rand'])\n tracing.trace_df(\n expression_values, '%s.expression_values' % trace_label,\n column_labels=['expression', None])\n\n return choices\n\n\ndef eval_nl(choosers, spec, nest_spec, locals_d,\n trace_label=None, trace_choice_name=None):\n \"\"\"\n Run a nested-logit simulation for when the model spec does not\n involve alternative\n specific data, e.g. there are no interactions with alternative\n properties and no need to sample from alternatives.\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n spec : pandas.DataFrame\n A table of variable specifications and coefficient values.\n Variable expressions should be in the table index and the table\n should have a column for each alternative.\n nest_spec:\n dictionary specifying nesting structure and nesting coefficients\n (from the model spec yaml file)\n locals_d : Dict or None\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n trace_label: str\n This is the label to be used for trace log file entries\n and dump file names\n when household tracing enabled. No tracing occurs if\n label is empty or None.\n trace_choice_name: str\n This is the column label to be used in trace file csv dump of choices\n\n Returns\n -------\n choices : pandas.Series\n Index will be that of `choosers`, values will match the columns\n of `spec`.\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'nl')\n check_for_variability = tracing.check_for_variability()\n\n t0 = tracing.print_elapsed_time()\n\n # column names of expression_values match spec index values\n expression_values = eval_variables(spec.index, choosers, locals_d)\n t0 = tracing.print_elapsed_time(\"eval_variables\", t0, debug=True)\n\n if check_for_variability:\n _check_for_variability(expression_values, trace_label)\n t0 = tracing.print_elapsed_time(\"_check_for_variability\", t0, debug=True)\n\n # raw utilities of all the leaves\n raw_utilities = compute_utilities(expression_values, spec)\n t0 = tracing.print_elapsed_time(\"expression_values.dot\", t0, debug=True)\n\n # exponentiated utilities of leaves and nests\n nested_exp_utilities = compute_nested_exp_utilities(\n raw_utilities, nest_spec)\n t0 = tracing.print_elapsed_time(\n \"compute_nested_exp_utilities\", t0, debug=True)\n\n # probabilities of alternatives relative to siblings sharing the same nest\n nested_probabilities = compute_nested_probabilities(\n nested_exp_utilities, nest_spec, trace_label=trace_label)\n t0 = tracing.print_elapsed_time(\n \"compute_nested_probabilities\", t0, debug=True)\n\n # global (flattened) leaf probabilities based on relative nest coefficients\n base_probabilities = compute_base_probabilities(\n nested_probabilities, nest_spec)\n t0 = tracing.print_elapsed_time(\n \"compute_base_probabilities\", t0, debug=True)\n\n # note base_probabilities could all be zero since we allowed all probs\n # for nests to be zero check here to print a clear message but\n # make_choices will raise error if probs don't sum to 1\n BAD_PROB_THRESHOLD = 0.001\n no_choices = \\\n base_probabilities.sum(axis=1).sub(\n np.ones(len(base_probabilities.index))).abs() \\\n > BAD_PROB_THRESHOLD * np.ones(len(base_probabilities.index))\n\n if no_choices.any():\n logit.report_bad_choices(\n no_choices, base_probabilities,\n tracing.extend_trace_label(trace_label, 'eval_nl'),\n tag='bad_probs',\n msg=\"base_probabilities all zero\")\n\n t0 = tracing.print_elapsed_time(\"report_bad_choices\", t0, debug=True)\n\n choices, rands = logit.make_choices(\n base_probabilities, trace_label, trace_choosers=choosers)\n t0 = tracing.print_elapsed_time(\"logit.make_choices\", t0, debug=True)\n\n if trace_label:\n tracing.trace_df(choosers, '%s.choosers' % trace_label)\n tracing.trace_df(raw_utilities, '%s.raw_utilities' % trace_label,\n column_labels=['alternative', 'utility'])\n tracing.trace_df(\n nested_exp_utilities, '%s.nested_exp_utilities' % trace_label,\n column_labels=['alternative', 'utility'])\n tracing.trace_df(\n nested_probabilities, '%s.nested_probabilities' % trace_label,\n column_labels=['alternative', 'probability'])\n tracing.trace_df(\n base_probabilities, '%s.base_probabilities' % trace_label,\n column_labels=['alternative', 'probability'])\n tracing.trace_df(\n choices, '%s.choices' % trace_label,\n columns=[None, trace_choice_name])\n tracing.trace_df(\n rands, '%s.rands' % trace_label,\n columns=[None, 'rand'])\n tracing.trace_df(\n expression_values, '%s.expression_values' % trace_label,\n column_labels=['expression', None])\n\n return choices\n\n\ndef _simple_simulate(choosers, spec, nest_spec, skims=None, locals_d=None,\n trace_label=None, trace_choice_name=None):\n \"\"\"\n Run an MNL or NL simulation for when the model\n spec does not involve alternative specific data,\n e.g. there are no interactions with alternative\n properties and no need to sample from alternatives.\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n spec : pandas.DataFrame\n A table of variable specifications and coefficient values.\n Variable expressions should be in the table index and the table\n should have a column for each alternative.\n nest_spec:\n for nested logit (nl): dictionary specifying nesting structure\n and nesting coefficients\n for multinomial logit (mnl): None\n skims : Skims object\n The skims object is used to contain multiple matrices of\n origin-destination impedances. Make sure to also add it to the\n locals_d below in order to access it in expressions. The *only* job\n of this method in regards to skims is to call set_df with the\n dataframe that comes back from interacting choosers with\n alternatives. See the skims module for more documentation on how\n the skims object is intended to be used.\n locals_d : Dict\n This is a dictionary of local variables that will be the environment\n for an evaluation of an expression that begins with @\n trace_label: str\n This is the label to be used for trace log file entries\n and dump file names when household tracing enabled. No\n tracing occurs if label is empty or None.\n trace_choice_name: str\n This is the column label to be used in trace file csv dump of choices\n\n Returns\n -------\n choices : pandas.Series\n Index will be that of `choosers`, values will match the columns\n of `spec`.\n \"\"\"\n if skims:\n add_skims(choosers, skims)\n\n trace_label = tracing.extend_trace_label(trace_label, 'simple_simulate')\n\n if nest_spec is None:\n choices = eval_mnl(choosers, spec, locals_d,\n trace_label=trace_label,\n trace_choice_name=trace_choice_name)\n else:\n choices = eval_nl(choosers, spec, nest_spec, locals_d,\n trace_label=trace_label,\n trace_choice_name=trace_choice_name)\n\n return choices\n\n\ndef simple_simulate(choosers, spec, nest_spec, skims=None, locals_d=None,\n chunk_size=0, trace_label=None, trace_choice_name=None):\n \"\"\"\n Run an MNL or NL simulation for when the model spec does not\n involve alternative specific data, e.g. there are no interactions\n with alternative properties and no need to sample from alternatives.\n \"\"\"\n\n assert len(choosers) > 0\n\n num_chunk_rows = num_chunk_rows_for_chunk_size(chunk_size, choosers)\n\n print(\"simple_simulate num_chunk_rows %s num_choosers %s\" % (\n num_chunk_rows, len(choosers.index)))\n\n result_list = []\n # segment by person type and pick the right spec for each person type\n for i, num_chunks, chooser_chunk in chunked_choosers(\n choosers, num_chunk_rows):\n\n print(\"Running chunk %s of %s size %d\" % (\n i, num_chunks, len(chooser_chunk)))\n\n choices = _simple_simulate(\n chooser_chunk, spec, nest_spec,\n skims, locals_d,\n tracing.extend_trace_label(trace_label, 'chunk_%s' % i),\n trace_choice_name)\n\n result_list.append(choices)\n\n if len(result_list) > 1:\n choices = pd.concat(result_list)\n\n assert len(choices.index == len(choosers.index))\n\n return choices\n\n\ndef eval_mnl_logsums(choosers, spec, locals_d, trace_label=None):\n \"\"\"\n like eval_nl except return logsums instead of making choices\n\n Returns\n -------\n logsums : pandas.Series\n Index will be that of `choosers`, values will be\n logsum across spec column values\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'mnl')\n check_for_variability = tracing.check_for_variability()\n\n print(\"running eval_mnl_logsums\")\n\n expression_values = eval_variables(spec.index, choosers, locals_d)\n\n if check_for_variability:\n _check_for_variability(expression_values, trace_label)\n\n # utility values\n utilities = compute_utilities(expression_values, spec)\n\n # logsum is log of exponentiated utilities summed across\n # columns of each chooser row\n utils_arr = utilities.as_matrix().astype('float')\n logsums = np.log(np.exp(utils_arr).sum(axis=1))\n logsums = pd.Series(logsums, index=choosers.index)\n\n if trace_label:\n # add logsum to utilities for tracing\n utilities['logsum'] = logsums\n\n tracing.trace_df(choosers, '%s.choosers' % trace_label)\n tracing.trace_df(utilities, '%s.utilities' % trace_label,\n column_labels=['alternative', 'utility'])\n tracing.trace_df(logsums, '%s.logsums' % trace_label,\n column_labels=['alternative', 'logsum'])\n tracing.trace_df(\n expression_values, '%s.expression_values' % trace_label,\n column_labels=['expression', None])\n\n return logsums\n\n\ndef eval_nl_logsums(choosers, spec, nest_spec, locals_d, trace_label=None):\n \"\"\"\n like eval_nl except return logsums instead of making choices\n\n Returns\n -------\n logsums : pandas.Series\n Index will be that of `choosers`, values will be nest logsum\n based on spec column values\n \"\"\"\n\n trace_label = tracing.extend_trace_label(trace_label, 'nl_logsums')\n check_for_variability = tracing.check_for_variability()\n\n # logger.debug(\"running eval_nl_logsums\")\n t0 = tracing.print_elapsed_time()\n\n # column names of expression_values match spec index values\n expression_values = eval_variables(spec.index, choosers, locals_d)\n t0 = tracing.print_elapsed_time(\"eval_variables\", t0, debug=True)\n\n if check_for_variability:\n _check_for_variability(expression_values, trace_label)\n t0 = tracing.print_elapsed_time(\n \"_check_for_variability\", t0, debug=True)\n\n # raw utilities of all the leaves\n raw_utilities = compute_utilities(expression_values, spec)\n t0 = tracing.print_elapsed_time(\"expression_values.dot\", t0, debug=True)\n\n # exponentiated utilities of leaves and nests\n nested_exp_utilities = compute_nested_exp_utilities(\n raw_utilities, nest_spec)\n t0 = tracing.print_elapsed_time(\n \"compute_nested_exp_utilities\", t0, debug=True)\n\n logsums = np.log(nested_exp_utilities.root)\n logsums = pd.Series(logsums, index=choosers.index)\n t0 = tracing.print_elapsed_time(\"logsums\", t0, debug=True)\n\n if trace_label:\n # add logsum to nested_exp_utilities for tracing\n nested_exp_utilities['logsum'] = logsums\n\n tracing.trace_df(choosers, '%s.choosers' % trace_label)\n tracing.trace_df(raw_utilities, '%s.raw_utilities' % trace_label,\n column_labels=['alternative', 'utility'])\n tracing.trace_df(\n nested_exp_utilities, '%s.nested_exp_utilities' % trace_label,\n column_labels=['alternative', 'utility'])\n tracing.trace_df(logsums, '%s.logsums' % trace_label,\n column_labels=['alternative', 'logsum'])\n\n return logsums\n\n\ndef _simple_simulate_logsums(choosers, spec, nest_spec,\n skims=None, locals_d=None, trace_label=None):\n \"\"\"\n like simple_simulate except return logsums instead of making choices\n\n Returns\n -------\n logsums : pandas.Series\n Index will be that of `choosers`, values will be nest\n logsum based on spec column values\n \"\"\"\n if skims:\n add_skims(choosers, skims)\n\n trace_label = tracing.extend_trace_label(\n trace_label, 'simple_simulate_logsums')\n\n if nest_spec is None:\n logsums = eval_mnl_logsums(\n choosers, spec, locals_d, trace_label=trace_label)\n else:\n logsums = eval_nl_logsums(\n choosers, spec, nest_spec, locals_d, trace_label=trace_label)\n\n return logsums\n\n\ndef simple_simulate_logsums(choosers, spec, nest_spec,\n skims=None, locals_d=None, chunk_size=0,\n trace_label=None):\n \"\"\"\n like simple_simulate except return logsums instead of making choices\n\n Returns\n -------\n logsums : pandas.Series\n Index will be that of `choosers`, values will be nest\n logsum based on spec column values\n \"\"\"\n\n assert len(choosers) > 0\n\n num_chunk_rows = num_chunk_rows_for_chunk_size(chunk_size, choosers)\n\n print(\n \"simple_simulate_logsums chunk_size\"\n \" %s num_choosers %s, num_chunk_rows %s\" %\n (chunk_size, len(choosers.index), num_chunk_rows))\n\n result_list = []\n # segment by person type and pick the right spec for each person type\n for i, num_chunks, chooser_chunk in chunked_choosers(\n choosers, num_chunk_rows):\n\n print(\"Running chunk %s of %s size %d\" % (\n i, num_chunks, len(chooser_chunk)))\n\n logsums = _simple_simulate_logsums(\n chooser_chunk, spec, nest_spec,\n skims, locals_d,\n tracing.extend_trace_label(trace_label, 'chunk_%s' % i))\n\n result_list.append(logsums)\n\n if len(result_list) > 1:\n logsums = pd.concat(result_list)\n\n assert len(logsums.index == len(choosers.index))\n\n return logsums\n", "id": "7873153", "language": "Python", "matching_score": 4.51664400100708, "max_stars_count": 0, "path": "bayarea_urbansim/baus/asim_simulate.py" }, { "content": "from __future__ import division\nimport numpy as np\nimport pandas as pd\nimport tracing\nimport asim_utils\n\n\ndef report_bad_choices(bad_row_map, df, trace_label, msg, trace_choosers=None):\n \"\"\"\n\n Parameters\n ----------\n bad_row_map\n df : pandas.DataFrame\n utils or probs dataframe\n msg : str\n message describing the type of bad choice that necessitates\n error being thrown\n trace_choosers : pandas.dataframe\n the choosers df (for interaction_simulate) to facilitate the reporting\n of hh_id because we can't deduce hh_id from the interaction_dataset\n which is indexed on index values from alternatives df\n\n Returns\n -------\n raises RuntimeError\n \"\"\"\n MAX_DUMP = 1000\n MAX_PRINT = 10\n\n msg_with_count = \"%s for %s rows\" % (msg, bad_row_map.sum())\n print(msg_with_count)\n\n if trace_label:\n print(\"dumping %s\" % trace_label)\n tracing.write_csv(df[bad_row_map][:MAX_DUMP],\n file_name=trace_label,\n transpose=False)\n\n # log the indexes of the first MAX_DUMP offending rows\n for idx in df.index[bad_row_map][:MAX_PRINT].values:\n\n if trace_choosers is None:\n hh_id = tracing.hh_id_for_chooser(idx, df)\n else:\n hh_id = tracing.hh_id_for_chooser(idx, trace_choosers)\n\n row_msg = \"%s : %s in: %s = %s (hh_id = %s)\" % (\n trace_label, msg, df.index.name, idx, hh_id)\n print(row_msg)\n\n raise RuntimeError(msg_with_count)\n\n\ndef utils_to_probs(utils, trace_label=None, exponentiated=False,\n allow_zero_probs=False, trace_choosers=None):\n \"\"\"\n Convert a table of utilities to probabilities.\n\n Parameters\n ----------\n utils : pandas.DataFrame\n Rows should be choosers and columns should be alternatives.\n\n trace_label : str\n label for tracing bad utility or probability values\n\n exponentiated : bool\n True if utilities have already been exponentiated\n\n allow_zero_probs : bool\n if True value rows in which all utility alts are EXP_UTIL_MIN\n will result in rows in probs to have all zero probability\n (and not sum to 1.0). This is for the benefit of calculating\n probabilities of nested logit nests\n\n trace_choosers : pandas.dataframe\n the choosers df (for interaction_simulate) to facilitate the\n reporting of hh_id by report_bad_choices because it can't deduce\n hh_id from the interaction_dataset which is indexed on index values\n from alternatives df\n\n Returns\n -------\n probs : pandas.DataFrame\n Will have the same index and columns as `utils`.\n\n \"\"\"\n trace_label = tracing.extend_trace_label(trace_label, 'utils_to_probs')\n\n utils_arr = utils.as_matrix().astype('float')\n if not exponentiated:\n utils_arr = np.exp(utils_arr)\n\n EXP_UTIL_MIN = 1e-300\n EXP_UTIL_MAX = np.inf\n np.clip(utils_arr, EXP_UTIL_MIN, EXP_UTIL_MAX, out=utils_arr)\n\n # FIXME\n utils_arr = np.where(utils_arr == EXP_UTIL_MIN, 0.0, utils_arr)\n\n arr_sum = utils_arr.sum(axis=1)\n\n zero_probs = (arr_sum == 0.0)\n if zero_probs.any() and not allow_zero_probs:\n\n report_bad_choices(\n zero_probs, utils, tracing.extend_trace_label(\n trace_label, 'zero_prob_utils'),\n msg=\"all probabilities are zero\", trace_choosers=trace_choosers)\n\n inf_utils = np.isinf(arr_sum)\n if inf_utils.any():\n report_bad_choices(\n inf_utils, utils,\n tracing.extend_trace_label(trace_label, 'inf_exp_utils'),\n msg=\"infinite exponentiated utilities\",\n trace_choosers=trace_choosers)\n\n # if allow_zero_probs, this may cause a RuntimeWarning: invalid value\n # encountered in divide\n np.divide(utils_arr, arr_sum.reshape(len(utils_arr), 1), out=utils_arr)\n\n PROB_MIN = 0.0\n PROB_MAX = 1.0\n\n # if allow_zero_probs, this will cause EXP_UTIL_MIN util rows to have all\n # zero probabilities\n utils_arr[np.isnan(utils_arr)] = PROB_MIN\n\n np.clip(utils_arr, PROB_MIN, PROB_MAX, out=utils_arr)\n\n probs = pd.DataFrame(utils_arr, columns=utils.columns, index=utils.index)\n\n return probs\n\n\ndef make_choices(probs, trace_label=None, trace_choosers=None):\n \"\"\"\n Make choices for each chooser from among a set of alternatives.\n\n Parameters\n ----------\n probs : pandas.DataFrame\n Rows for choosers and columns for the alternatives from which they\n are choosing. Values are expected to be valid probabilities across\n each row, e.g. they should sum to 1.\n\n trace_choosers : pandas.dataframe\n the choosers df (for interaction_simulate) to facilitate the\n reporting of hh_id by report_bad_choices because it can't deduce\n hh_id from the interaction_dataset which is indexed on index values\n from alternatives df\n\n Returns\n -------\n choices : pandas.Series\n Maps chooser IDs (from `probs` index) to a choice, where the choice\n is an index into the columns of `probs`.\n\n rands : pandas.Series\n The random numbers used to make the choices (for debugging, tracing)\n\n \"\"\"\n trace_label = tracing.extend_trace_label(trace_label, 'make_choices')\n\n # probs should sum to 1 across each row\n\n BAD_PROB_THRESHOLD = 0.001\n bad_probs = \\\n probs.sum(axis=1).sub(np.ones(len(probs.index))).abs() \\\n > BAD_PROB_THRESHOLD * np.ones(len(probs.index))\n\n if bad_probs.any():\n\n report_bad_choices(\n bad_probs, probs,\n tracing.extend_trace_label(trace_label, 'bad_probs'),\n msg=\"probabilities do not add up to 1\",\n trace_choosers=trace_choosers)\n\n rands = asim_utils.get_rn_generator().random_for_df(probs)\n\n probs_arr = probs.as_matrix().cumsum(axis=1) - rands\n\n # rows, cols = np.where(probs_arr > 0)\n # choices = [s.iat[0] for _, s in pd.Series(cols).groupby(rows)]\n choices = np.argmax(probs_arr > 0.0, axis=1)\n\n choices = pd.Series(choices, index=probs.index)\n\n rands = pd.Series(np.asanyarray(rands).flatten(), index=probs.index)\n\n return choices, rands\n\n\ndef interaction_dataset(choosers, alternatives, sample_size=None):\n \"\"\"\n Combine choosers and alternatives into one table for the purposes\n of creating interaction variables and/or sampling alternatives.\n\n Parameters\n ----------\n choosers : pandas.DataFrame\n alternatives : pandas.DataFrame\n sample_size : int, optional\n If sampling from alternatives for each chooser, this is\n how many to sample.\n\n Returns\n -------\n alts_sample : pandas.DataFrame\n Merged choosers and alternatives with data repeated either\n len(alternatives) or `sample_size` times.\n\n \"\"\"\n if not choosers.index.is_unique:\n raise RuntimeError(\n \"ERROR: choosers index is not unique, \"\n \"sample will not work correctly\")\n if not alternatives.index.is_unique:\n raise RuntimeError(\n \"ERROR: alternatives index is not unique, \"\n \"sample will not work correctly\")\n\n numchoosers = len(choosers)\n numalts = len(alternatives)\n sample_size = sample_size or numalts\n\n # FIXME - is this faster or just dumb?\n alts_idx = np.arange(numalts)\n\n if sample_size < numalts:\n sample = asim_utils.get_rn_generator().choice_for_df(\n choosers, alts_idx, sample_size, replace=False)\n else:\n sample = np.tile(alts_idx, numchoosers)\n\n alts_sample = alternatives.take(sample)\n alts_sample['chooser_idx'] = np.repeat(choosers.index.values, sample_size)\n\n alts_sample = pd.merge(\n alts_sample, choosers, left_on='chooser_idx', right_index=True,\n suffixes=('', '_r'))\n\n return alts_sample\n\n\nclass Nest(object):\n \"\"\"\n Data for a nest-logit node or leaf\n\n This object is passed on yield when iterate over nest nodes\n (branch or leaf). The nested logit design is stored in a yaml file as a\n tree of dict objects, but using an object to pass the nest data makes the\n code a little more readable\n\n An example nest specification is in the example tour mode choice model\n yaml configuration file - example/configs/tour_mode_choice.yaml.\n \"\"\"\n\n def __init__(self, name=None, level=0):\n self.name = name\n self.level = level\n self.product_of_coefficients = 1\n self.ancestors = []\n self.alternatives = None\n self.coefficient = 0\n\n @property\n def is_leaf(self):\n return (self.alternatives is None)\n\n @property\n def type(self):\n return 'leaf' if self.is_leaf else 'node'\n\n @classmethod\n def nest_types(cls):\n return ['leaf', 'node']\n\n\ndef _each_nest(spec, parent_nest, post_order):\n \"\"\"\n Iterate over each nest or leaf node in the tree (of subtree)\n\n This internal routine is called by each_nest, which presents a\n slightly higer level interface\n\n Parameters\n ----------\n spec : dict\n Nest spec dict tree (or subtree when recursing) from the\n model spec yaml file\n parent_nest : Nest\n nest of parent node (passed to accumulate level, ancestors,\n and product_of_coefficients)\n post_order : Bool\n Should we iterate over the nodes of the tree in post-order\n or pre-order? (post-order means we yield the alternatives\n sub-tree before current node.)\n\n Yields\n -------\n spec_node : dict\n Nest tree spec dict for this node subtree\n nest : Nest\n Nest object with info about the current node (nest or leaf)\n \"\"\"\n pre_order = not post_order\n level = parent_nest.level + 1\n\n if isinstance(spec, dict):\n name = spec['name']\n coefficient = spec['coefficient']\n alternatives = [\n a['name'] if isinstance(a, dict)\n else a for a in spec['alternatives']]\n\n nest = Nest(name=name)\n nest.level = parent_nest.level + 1\n nest.coefficient = coefficient\n nest.product_of_coefficients = \\\n parent_nest.product_of_coefficients * coefficient\n nest.alternatives = alternatives\n nest.ancestors = parent_nest.ancestors + [name]\n\n if pre_order:\n yield spec, nest\n\n # recursively iterate the list of alternatives\n for alternative in spec['alternatives']:\n for sub_node, sub_nest in _each_nest(\n alternative, nest, post_order):\n yield sub_node, sub_nest\n\n if post_order:\n yield spec, nest\n\n elif isinstance(spec, str):\n name = spec\n\n nest = Nest(name=name)\n nest.level = parent_nest.level + 1\n nest.product_of_coefficients = parent_nest.product_of_coefficients\n nest.ancestors = parent_nest.ancestors + [name]\n\n yield spec, nest\n\n\ndef each_nest(nest_spec, type=None, post_order=False):\n \"\"\"\n Iterate over each nest or leaf node in the tree (of subtree)\n\n Parameters\n ----------\n nest_spec : dict\n Nest tree dict from the model spec yaml file\n type : str\n Nest class type to yield\n None yields all nests\n 'leaf' yields only leaf nodes\n 'branch' yields only branch nodes\n post_order : Bool\n Should we iterate over the nodes of the tree in\n post-order or pre-order? (post-order means we yield\n the alternatives sub-tree before current node.)\n\n Yields\n -------\n nest : Nest\n Nest object with info about the current node (nest or leaf)\n \"\"\"\n if type is not None and type not in Nest.nest_types():\n raise RuntimeError(\n \"Unknown nest type '%s' in call to each_nest\" % type)\n\n for node, nest in _each_nest(\n nest_spec, parent_nest=Nest(), post_order=post_order):\n if type is None or (type == nest.type):\n yield nest\n", "id": "9148551", "language": "Python", "matching_score": 3.8369739055633545, "max_stars_count": 0, "path": "bayarea_urbansim/baus/logit.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os.path\n\nimport numpy as np\nimport pandas as pd\nimport orca\n\nimport pandas.util.testing as pdt\nimport pytest\n\nfrom ..simulate import eval_variables\nfrom .. import logit\n\n\[email protected](scope='module')\ndef data_dir():\n return os.path.join(os.path.dirname(__file__), 'data')\n\n\ndef add_canonical_dirs():\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n\n# this is lifted straight from urbansim's test_mnl.py\[email protected](scope='module', params=[\n ('fish.csv',\n 'fish_choosers.csv',\n pd.DataFrame(\n [[-0.02047652], [0.95309824]], index=['price', 'catch'],\n columns=['Alt']),\n pd.DataFrame([\n [0.2849598, 0.2742482, 0.1605457, 0.2802463],\n [0.1498991, 0.4542377, 0.2600969, 0.1357664]],\n columns=['beach', 'boat', 'charter', 'pier']))])\ndef test_data(request):\n data, choosers, spec, probabilities = request.param\n return {\n 'data': data,\n 'choosers': choosers,\n 'spec': spec,\n 'probabilities': probabilities\n }\n\n\[email protected]\ndef choosers(test_data, data_dir):\n filen = os.path.join(data_dir, test_data['choosers'])\n return pd.read_csv(filen)\n\n\[email protected]\ndef spec(test_data):\n return test_data['spec']\n\n\[email protected]\ndef utilities(choosers, spec, test_data):\n vars = eval_variables(spec.index, choosers)\n utils = vars.dot(spec).astype('float')\n return pd.DataFrame(\n utils.as_matrix().reshape(test_data['probabilities'].shape),\n columns=test_data['probabilities'].columns)\n\n\ndef test_utils_to_probs(utilities, test_data):\n probs = logit.utils_to_probs(utilities, trace_label=None)\n pdt.assert_frame_equal(probs, test_data['probabilities'])\n\n\ndef test_utils_to_probs_raises():\n\n add_canonical_dirs()\n\n with pytest.raises(RuntimeError) as excinfo:\n logit.utils_to_probs(pd.DataFrame([[1, 2, np.inf, 3]]), trace_label=None)\n assert \"infinite exponentiated utilities\" in str(excinfo.value)\n\n with pytest.raises(RuntimeError) as excinfo:\n logit.utils_to_probs(pd.DataFrame([[-999, -999, -999, -999]]), trace_label=None)\n assert \"all probabilities are zero\" in str(excinfo.value)\n\n\ndef test_make_choices_only_one():\n probs = pd.DataFrame(\n [[1, 0, 0], [0, 1, 0]], columns=['a', 'b', 'c'], index=['x', 'y'])\n choices, rands = logit.make_choices(probs)\n\n pdt.assert_series_equal(\n choices,\n pd.Series([0, 1], index=['x', 'y']))\n\n\ndef test_make_choices_real_probs(utilities):\n probs = logit.utils_to_probs(utilities, trace_label=None)\n choices, rands = logit.make_choices(probs)\n\n pdt.assert_series_equal(\n choices,\n pd.Series([1, 2], index=[0, 1]))\n\n\[email protected](scope='module')\ndef interaction_choosers():\n return pd.DataFrame({\n 'attr': ['a', 'b', 'c', 'b']},\n index=['w', 'x', 'y', 'z'])\n\n\[email protected](scope='module')\ndef interaction_alts():\n return pd.DataFrame({\n 'prop': [10, 20, 30, 40]},\n index=[1, 2, 3, 4])\n\n\ndef test_interaction_dataset_no_sample(interaction_choosers, interaction_alts):\n expected = pd.DataFrame({\n 'attr': ['a'] * 4 + ['b'] * 4 + ['c'] * 4 + ['b'] * 4,\n 'prop': [10, 20, 30, 40] * 4,\n 'chooser_idx': ['w'] * 4 + ['x'] * 4 + ['y'] * 4 + ['z'] * 4},\n index=[1, 2, 3, 4] * 4)\n\n interacted = logit.interaction_dataset(\n interaction_choosers, interaction_alts)\n\n interacted, expected = interacted.align(expected, axis=1)\n pdt.assert_frame_equal(interacted, expected)\n\n\ndef test_interaction_dataset_sampled(\n interaction_choosers, interaction_alts):\n expected = pd.DataFrame({\n 'attr': ['a'] * 2 + ['b'] * 2 + ['c'] * 2 + ['b'] * 2,\n 'prop': [30, 40, 10, 30, 40, 10, 20, 10],\n 'chooser_idx': ['w'] * 2 + ['x'] * 2 + ['y'] * 2 + ['z'] * 2},\n index=[3, 4, 1, 3, 4, 1, 2, 1])\n\n interacted = logit.interaction_dataset(\n interaction_choosers, interaction_alts, sample_size=2)\n\n interacted, expected = interacted.align(expected, axis=1)\n pdt.assert_frame_equal(interacted, expected)\n", "id": "6835661", "language": "Python", "matching_score": 3.562513828277588, "max_stars_count": 0, "path": "activitysim/activitysim/core/test/test_logit.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os.path\n\nimport numpy.testing as npt\nimport pandas as pd\nimport pandas.util.testing as pdt\nimport pytest\n\nimport orca\n\nfrom .. import simulate as asim\n\n\[email protected](scope='module')\ndef data_dir():\n return os.path.join(os.path.dirname(__file__), 'data')\n\n\[email protected](scope='module')\ndef spec_name(data_dir):\n return 'sample_spec.csv'\n\n\[email protected](scope='module')\ndef spec(data_dir, spec_name):\n return asim.read_model_spec(data_dir, spec_name,\n description_name='description',\n expression_name='expression')\n\n\[email protected](scope='module')\ndef data(data_dir):\n return pd.read_csv(os.path.join(data_dir, 'data.csv'))\n\n\ndef test_read_model_spec(data_dir, spec_name):\n\n spec = asim.read_model_spec(\n data_dir, spec_name,\n description_name='description', expression_name='expression')\n\n assert len(spec) == 4\n assert spec.index.name == 'expression'\n assert list(spec.columns) == ['alt0', 'alt1']\n npt.assert_array_equal(\n spec.as_matrix(),\n [[1.1, 11], [2.2, 22], [3.3, 33], [4.4, 44]])\n\n\ndef test_eval_variables(spec, data):\n\n result = asim.eval_variables(spec.index, data, target_type=None)\n\n expected_result = pd.DataFrame([\n [True, False, 4, 1],\n [False, True, 4, 1],\n [False, True, 5, 1]],\n index=data.index, columns=spec.index)\n\n pdt.assert_frame_equal(result, expected_result, check_names=False)\n\n result = asim.eval_variables(spec.index, data, target_type=float)\n\n expected_result = pd.DataFrame([\n [1.0, 0.0, 4.0, 1.0],\n [0.0, 1.0, 4.0, 1.0],\n [0.0, 1.0, 5.0, 1.0]],\n index=data.index, columns=spec.index)\n\n pdt.assert_frame_equal(result, expected_result, check_names=False)\n\n\ndef test_simple_simulate(data, spec):\n\n orca.add_injectable(\"check_for_variability\", False)\n\n choices = asim.simple_simulate(data, spec, nest_spec=None)\n expected = pd.Series([1, 1, 1], index=data.index)\n pdt.assert_series_equal(choices, expected)\n\n\ndef test_simple_simulate_chunked(data, spec):\n\n orca.add_injectable(\"check_for_variability\", False)\n\n choices = asim.simple_simulate(data, spec, nest_spec=None, chunk_size=2)\n expected = pd.Series([1, 1, 1], index=data.index)\n pdt.assert_series_equal(choices, expected)\n", "id": "10832486", "language": "Python", "matching_score": 1.512608289718628, "max_stars_count": 0, "path": "activitysim/activitysim/core/test/test_simulate.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\n\nimport pytest\nimport os\nimport pandas as pd\nimport pandas.util.testing as pdt\nfrom ..tour_frequency import process_non_mandatory_tours\n\n\ndef test_nmtf():\n\n non_mandatory_tour_frequency = pd.Series([0, 3, 2, 1])\n\n non_mandatory_tour_frequency_alts = pd.DataFrame(\n {\n \"escort\": [0, 0, 2, 0],\n \"shopping\": [1, 0, 0, 0],\n \"othmaint\": [0, 1, 0, 0]\n },\n index=[0, 1, 2, 3]\n )\n\n nmt = process_non_mandatory_tours(non_mandatory_tour_frequency,\n non_mandatory_tour_frequency_alts)\n\n idx = pd.Index([7, 23, 24, 37], name=\"tour_id\")\n\n pdt.assert_series_equal(\n nmt.person_id,\n pd.Series(\n [0, 2, 2, 3], index=idx, name='person_id'))\n\n pdt.assert_series_equal(\n nmt.tour_type,\n pd.Series(\n [\"shopping\", \"escort\", \"escort\", \"othmaint\"],\n index=idx, name='tour_type'))\n", "id": "12404685", "language": "Python", "matching_score": 3.3955605030059814, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/test/test_non_mandatory_tour_frequency.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\n\nimport pytest\nimport os\nimport pandas as pd\nimport pandas.util.testing as pdt\nfrom ..tour_frequency import process_mandatory_tours\n\n\ndef test_mtf():\n persons = pd.DataFrame({\n \"is_worker\": [True, True, False],\n \"mandatory_tour_frequency\": [\"work1\", \"work_and_school\", \"school2\"],\n \"school_taz\": [1, 2, 3],\n \"workplace_taz\": [10, 20, 30],\n }, index=[10, 20, 30])\n\n mandatory_tours = process_mandatory_tours(persons)\n\n idx = pd.Index([119, 229, 226, 335, 336], name=\"tour_id\")\n\n pdt.assert_series_equal(\n mandatory_tours.person_id,\n pd.Series([10, 20, 20, 30, 30], index=idx, name='person_id'))\n\n pdt.assert_series_equal(\n mandatory_tours.tour_type,\n pd.Series(['work', 'work', 'school', 'school', 'school'], index=idx, name='tour_type'))\n\n pdt.assert_series_equal(\n mandatory_tours.tour_num,\n pd.Series([1, 1, 2, 1, 2], index=idx, name='tour_num'))\n\n pdt.assert_series_equal(\n mandatory_tours.destination,\n pd.Series([10, 20, 2, 3, 3], index=idx, name='destination'))\n", "id": "697455", "language": "Python", "matching_score": 2.278362512588501, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/test/test_mandatory_tour_frequency.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport itertools\nimport numpy as np\nimport pandas as pd\n\n\ndef canonical_tours():\n \"\"\"\n create labels for every the possible tour by combining tour_type/tour_num.\n\n Returns\n -------\n list of canonical tour labels in alphabetical order\n \"\"\"\n # the problem is we don't know what the possible tour_types and their max tour_nums are\n\n # FIXME - should get this from alts table\n # alts = orca.get_table('non_mandatory_tour_frequency_alts').local\n # non_mandatory_tour_flavors = {c : alts[alts].max() for c in alts.columns.names}\n non_mandatory_tour_flavors = {'escort': 2, 'shopping': 1, 'othmaint': 1, 'othdiscr': 1,\n 'eatout': 1, 'social': 1}\n\n # this logic is hardwired in process_mandatory_tours()\n mandatory_tour_flavors = {'work': 2, 'school': 2}\n\n tour_flavors = dict(non_mandatory_tour_flavors, **mandatory_tour_flavors)\n\n sub_channels = [tour_type + str(tour_num)\n for tour_type, max_count in tour_flavors.iteritems()\n for tour_num in range(1, max_count + 1)]\n\n sub_channels.sort()\n return sub_channels\n\n\ndef set_tour_index(tours):\n \"\"\"\n\n Parameters\n ----------\n tours : DataFrame\n Tours dataframe to reindex.\n The new index values are stable based on the person_id, tour_type, and tour_num.\n The existing index is ignored and replaced.\n\n Having a stable (predictable) index value\n It also simplifies attaching random number streams to tours that are stable\n (even across simulations)\n\n Returns\n -------\n\n \"\"\"\n\n possible_tours = canonical_tours()\n possible_tours_count = len(possible_tours)\n\n # concat tour_type + tour_num\n tours['tour_id'] = tours.tour_type + tours.tour_num.map(str)\n\n # map recognized strings to ints\n tours.tour_id = tours.tour_id.replace(to_replace=possible_tours,\n value=range(possible_tours_count))\n # convert to numeric - shouldn't be any NaNs - this will raise error if there are\n tours.tour_id = pd.to_numeric(tours.tour_id, errors='coerce').astype(int)\n\n tours.tour_id = (tours.person_id * possible_tours_count) + tours.tour_id\n\n if len(tours.tour_id) > len(tours.tour_id.unique()):\n print \"\\ntours.tour_id not unique\\n\", tours\n\n tours.set_index('tour_id', inplace=True, verify_integrity=True)\n\n\ndef process_mandatory_tours(persons):\n \"\"\"\n This method processes the mandatory_tour_frequency column that comes out of\n the model of the same name and turns into a DataFrame that represents the\n mandatory tours that were generated\n\n Parameters\n ----------\n persons : DataFrame\n Persons is a DataFrame which has a column call\n mandatory_tour_frequency (which came out of the mandatory tour\n frequency model) and a column is_worker which indicates the person's\n worker status. The only valid values of the mandatory_tour_frequency\n column to take are \"work1\", \"work2\", \"school1\", \"school2\" and\n \"work_and_school\"\n\n Returns\n -------\n tours : DataFrame\n An example of a tours DataFrame is supplied as a comment in the\n source code - it has an index which is a tour identifier, a person_id\n column, a tour_type column which is \"work\" or \"school\" and a tour_num\n column which is set to 1 or 2 depending whether it is the first or\n second mandatory tour made by the person. The logic for whether the\n work or school tour comes first given a \"work_and_school\" choice\n depends on the is_worker column and was copied from the original\n implementation.\n \"\"\"\n\n tours = []\n # this is probably easier to do in non-vectorized fashion like this\n for key, row in persons.iterrows():\n\n mtour = row.mandatory_tour_frequency\n is_worker = row.is_worker\n work_taz = row.workplace_taz\n school_taz = row.school_taz\n\n # this logic came from the CTRAMP model - I copied it as best as I\n # could from the previous code - basically we need to know which\n # tours are the first tour and which are subsequent, and work /\n # school depends on the status of the person (is_worker variable)\n\n # 1 work trip\n if mtour == \"work1\":\n tours += [(key, \"work\", 1, work_taz)]\n # 2 work trips\n elif mtour == \"work2\":\n tours += [(key, \"work\", 1, work_taz), (key, \"work\", 2, work_taz)]\n # 1 school trip\n elif mtour == \"school1\":\n tours += [(key, \"school\", 1, school_taz)]\n # 2 school trips\n elif mtour == \"school2\":\n tours += [(key, \"school\", 1, school_taz), (key, \"school\", 2, school_taz)]\n # 1 work and 1 school trip\n elif mtour == \"work_and_school\":\n if is_worker:\n # is worker, work trip goes first\n tours += [(key, \"work\", 1, work_taz), (key, \"school\", 2, school_taz)]\n else:\n # is student, work trip goes second\n tours += [(key, \"school\", 1, school_taz), (key, \"work\", 2, work_taz)]\n else:\n assert 0\n\n \"\"\"\n Pretty basic at this point - trip table looks like this so far\n person_id tour_type tour_num destination\n tour_id\n 0 4419 work 1 <work_taz>\n 1 4419 school 2 <school_taz>\n 4 4650 school 1 <school_taz>\n 5 10001 school 1 <school_taz>\n 6 10001 work 2 <work_taz>\n \"\"\"\n\n df = pd.DataFrame(tours, columns=[\"person_id\", \"tour_type\", \"tour_num\", \"destination\"])\n\n set_tour_index(df)\n\n return df\n\n\ndef process_non_mandatory_tours(non_mandatory_tour_frequency,\n non_mandatory_tour_frequency_alts):\n \"\"\"\n This method processes the non_mandatory_tour_frequency column that comes\n out of the model of the same name and turns into a DataFrame that\n represents the non mandatory tours that were generated\n\n Parameters\n ----------\n non_mandatory_tour_frequency: Series\n A series which has person id as the index and the chosen alternative\n index as the value\n non_mandatory_tour_frequency_alts: DataFrame\n A DataFrame which has as a unique index which relates to the values\n in the series above typically includes columns which are named for trip\n purposes with values which are counts for that trip purpose. Example\n trip purposes include escort, shopping, othmaint, othdiscr, eatout,\n social, etc. A row would be an alternative which might be to take\n one shopping trip and zero trips of other purposes, etc.\n\n Returns\n -------\n tours : DataFrame\n An example of a tours DataFrame is supplied as a comment in the\n source code - it has an index which is a unique tour identifier,\n a person_id column, and a tour type column which comes from the\n column names of the alternatives DataFrame supplied above.\n \"\"\"\n\n nmtf = non_mandatory_tour_frequency\n\n # get the actual alternatives for each person - have to go back to the\n # non_mandatory_tour_frequency_alts dataframe to get this - the choice\n # above just stored the index values for the chosen alts\n tours = non_mandatory_tour_frequency_alts.loc[nmtf]\n\n # assign person ids to the index\n tours.index = nmtf.index\n\n # reformat with the columns given below\n tours = tours.stack().reset_index()\n tours.columns = [\"person_id\", \"tour_type\", \"tour_count\"]\n\n # map non-zero tour_counts to a list of ranges [1,2,1] -> [[0], [0, 1], [0]]\n tour_nums = map(range, tours.tour_count[tours.tour_count > 0].values)\n # flatten (more baroque but faster than np.hstack)\n tour_nums = np.array(list(itertools.chain.from_iterable(tour_nums))) + 1\n\n # now do a repeat and a take, so if you have two trips of given type you\n # now have two rows, and zero trips yields zero rows\n tours = tours.take(np.repeat(tours.index.values, tours.tour_count.values))\n\n tours['tour_num'] = tour_nums\n\n # make index unique\n set_tour_index(tours)\n\n \"\"\"\n Pretty basic at this point - trip table looks like this so far\n person_id tour_type tour_num\n tour_id\n 0 4419 escort 1\n 1 4419 escort 2\n 2 4419 othmaint 1\n 3 4419 eatout 1\n 4 4419 social 1\n 5 10001 escort 1\n 6 10001 escort 2\n \"\"\"\n return tours\n", "id": "3612920", "language": "Python", "matching_score": 3.5242371559143066, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/util/tour_frequency.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport numpy as np\nimport orca\nimport pandas as pd\n\nfrom activitysim.core.simulate import read_model_spec\nfrom activitysim.core.interaction_simulate import interaction_simulate\n\nfrom activitysim.core import tracing\nfrom activitysim.core.tracing import print_elapsed_time\nfrom activitysim.core import pipeline\nfrom activitysim.core import config\n\nfrom activitysim.core.util import reindex\n\nfrom .util.tour_frequency import process_non_mandatory_tours\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef non_mandatory_tour_frequency_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'non_mandatory_tour_frequency.yaml')\n\n\[email protected]()\ndef non_mandatory_tour_frequency_spec(configs_dir):\n return read_model_spec(configs_dir, 'non_mandatory_tour_frequency.csv')\n\n\[email protected]()\ndef non_mandatory_tour_frequency_alts(configs_dir):\n f = os.path.join(configs_dir, 'non_mandatory_tour_frequency_alternatives.csv')\n return pd.read_csv(f)\n\n\[email protected](\"non_mandatory_tour_frequency_alts\")\ndef tot_tours(non_mandatory_tour_frequency_alts):\n return non_mandatory_tour_frequency_alts.local.sum(axis=1)\n\n\[email protected]()\ndef non_mandatory_tour_frequency(persons_merged,\n non_mandatory_tour_frequency_alts,\n non_mandatory_tour_frequency_spec,\n non_mandatory_tour_frequency_settings,\n chunk_size,\n trace_hh_id):\n\n \"\"\"\n This model predicts the frequency of making non-mandatory trips\n (alternatives for this model come from a separate csv file which is\n configured by the user) - these trips include escort, shopping, othmaint,\n othdiscr, eatout, and social trips in various combination.\n \"\"\"\n\n t0 = print_elapsed_time()\n\n choosers = persons_merged.to_frame()\n alts = non_mandatory_tour_frequency_alts.to_frame()\n\n # filter based on results of CDAP\n choosers = choosers[choosers.cdap_activity.isin(['M', 'N'])]\n\n logger.info(\"Running non_mandatory_tour_frequency with %d persons\" % len(choosers))\n\n constants = config.get_model_constants(non_mandatory_tour_frequency_settings)\n\n choices_list = []\n # segment by person type and pick the right spec for each person type\n for name, segment in choosers.groupby('ptype_cat'):\n\n logger.info(\"Running segment '%s' of size %d\" % (name, len(segment)))\n\n choices = interaction_simulate(\n segment,\n alts,\n # notice that we pick the column for the segment for each segment we run\n spec=non_mandatory_tour_frequency_spec[[name]],\n locals_d=constants,\n chunk_size=chunk_size,\n trace_label=trace_hh_id and 'non_mandatory_tour_frequency.%s' % name,\n trace_choice_name='non_mandatory_tour_frequency')\n\n choices_list.append(choices)\n\n t0 = print_elapsed_time(\"non_mandatory_tour_frequency.%s\" % name, t0)\n\n # FIXME - force garbage collection\n # mem = memory_info()\n # logger.info('memory_info ptype %s, %s' % (name, mem))\n\n choices = pd.concat(choices_list)\n\n # FIXME - no need to reindex?\n orca.add_column(\"persons\", \"non_mandatory_tour_frequency\", choices)\n\n create_non_mandatory_tours_table()\n\n pipeline.add_dependent_columns(\"persons\", \"persons_nmtf\")\n\n if trace_hh_id:\n trace_columns = ['non_mandatory_tour_frequency']\n tracing.trace_df(orca.get_table('persons_merged').to_frame(),\n label=\"non_mandatory_tour_frequency\",\n columns=trace_columns,\n warn_if_empty=True)\n\n\"\"\"\nWe have now generated non-mandatory tours, but they are attributes of the\nperson table - this function creates a \"tours\" table which\nhas one row per tour that has been generated (and the person id it is\nassociated with)\n\"\"\"\n\n\ndef create_non_mandatory_tours_table():\n\n persons = orca.get_table('persons')\n non_mandatory_tour_frequency_alts = orca.get_table('non_mandatory_tour_frequency_alts')\n\n df = process_non_mandatory_tours(\n persons.non_mandatory_tour_frequency.dropna(),\n non_mandatory_tour_frequency_alts.local\n )\n\n orca.add_table(\"non_mandatory_tours\", df)\n tracing.register_traceable_table('non_mandatory_tours', df)\n pipeline.get_rn_generator().add_channel(df, 'tours')\n\n\n\"\"\"\nThis is where I'm currently putting computed columns for non_mandatory_tours\n- there's an argument this should go in the tables directory in tours.py\n\"\"\"\n\n\[email protected](\"non_mandatory_tours\")\ndef destination_in_cbd(non_mandatory_tours, land_use, settings):\n # protection until filled in by destination choice model\n if \"destination\" not in non_mandatory_tours.columns:\n return pd.Series(False, index=non_mandatory_tours.index)\n\n s = reindex(land_use.area_type, non_mandatory_tours.destination)\n return s < settings['cbd_threshold']\n", "id": "405368", "language": "Python", "matching_score": 4.87798547744751, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/non_mandatory_tour_frequency.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\nimport yaml\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\nfrom activitysim.core import config\n\nfrom .util.tour_frequency import process_mandatory_tours\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef mandatory_tour_frequency_spec(configs_dir):\n return asim.read_model_spec(configs_dir, 'mandatory_tour_frequency.csv')\n\n\[email protected]()\ndef mandatory_tour_frequency_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'mandatory_tour_frequency.yaml')\n\n\[email protected]()\ndef mandatory_tour_frequency(persons_merged,\n mandatory_tour_frequency_spec,\n mandatory_tour_frequency_settings,\n trace_hh_id):\n \"\"\"\n This model predicts the frequency of making mandatory trips (see the\n alternatives above) - these trips include work and school in some combination.\n \"\"\"\n\n choosers = persons_merged.to_frame()\n # filter based on results of CDAP\n choosers = choosers[choosers.cdap_activity == 'M']\n logger.info(\"Running mandatory_tour_frequency with %d persons\" % len(choosers))\n\n nest_spec = config.get_logit_model_settings(mandatory_tour_frequency_settings)\n constants = config.get_model_constants(mandatory_tour_frequency_settings)\n\n choices = asim.simple_simulate(\n choosers,\n spec=mandatory_tour_frequency_spec,\n nest_spec=nest_spec,\n locals_d=constants,\n trace_label=trace_hh_id and 'mandatory_tour_frequency',\n trace_choice_name='mandatory_tour_frequency')\n\n # convert indexes to alternative names\n choices = pd.Series(\n mandatory_tour_frequency_spec.columns[choices.values],\n index=choices.index).reindex(persons_merged.local.index)\n\n tracing.print_summary('mandatory_tour_frequency', choices, value_counts=True)\n\n orca.add_column(\"persons\", \"mandatory_tour_frequency\", choices)\n pipeline.add_dependent_columns(\"persons\", \"persons_mtf\")\n\n create_mandatory_tours_table()\n\n # FIXME - test prng repeatability\n r = pipeline.get_rn_generator().random_for_df(choices)\n orca.add_column(\"persons\", \"mtf_rand\", [item for sublist in r for item in sublist])\n\n if trace_hh_id:\n trace_columns = ['mandatory_tour_frequency']\n tracing.trace_df(orca.get_table('persons_merged').to_frame(),\n label=\"mandatory_tour_frequency\",\n columns=trace_columns,\n warn_if_empty=True)\n\n\n\"\"\"\nThis reprocesses the choice of index of the mandatory tour frequency\nalternatives into an actual dataframe of tours. Ending format is\nthe same as got non_mandatory_tours except trip types are \"work\" and \"school\"\n\"\"\"\n\n\ndef create_mandatory_tours_table():\n\n persons = orca.get_table('persons')\n\n persons = persons.to_frame(columns=[\"mandatory_tour_frequency\",\n \"is_worker\", \"school_taz\", \"workplace_taz\"])\n persons = persons[~persons.mandatory_tour_frequency.isnull()]\n df = process_mandatory_tours(persons)\n\n orca.add_table(\"mandatory_tours\", df)\n tracing.register_traceable_table('mandatory_tours', df)\n pipeline.get_rn_generator().add_channel(df, 'tours')\n\n\n# broadcast mandatory_tours on to persons using the person_id foreign key\norca.broadcast('persons', 'mandatory_tours',\n cast_index=True, onto_on='person_id')\norca.broadcast('persons_merged', 'mandatory_tours',\n cast_index=True, onto_on='person_id')\n", "id": "5896241", "language": "Python", "matching_score": 3.0299911499023438, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/mandatory_tour_frequency.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\nimport os\n\nimport orca\nimport pandas as pd\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\nfrom activitysim.core import config\n\n\nfrom .util.cdap import run_cdap\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef cdap_settings(configs_dir):\n \"\"\"\n canonical model settings file to permit definition of local constants for by\n cdap_indiv_spec and cdap_fixed_relative_proportions\n \"\"\"\n return config.read_model_settings(configs_dir, 'cdap.yaml')\n\n\[email protected]()\ndef cdap_indiv_spec(configs_dir):\n \"\"\"\n spec to compute the activity utilities for each individual hh member\n with no interactions with other household members taken into account\n \"\"\"\n return asim.read_model_spec(configs_dir, 'cdap_indiv_and_hhsize1.csv')\n\n\[email protected]()\ndef cdap_interaction_coefficients(configs_dir):\n \"\"\"\n Rules and coefficients for generating interaction specs for different household sizes\n \"\"\"\n f = os.path.join(configs_dir, 'cdap_interaction_coefficients.csv')\n return pd.read_csv(f, comment='#')\n\n\[email protected]()\ndef cdap_fixed_relative_proportions(configs_dir):\n \"\"\"\n spec to compute/specify the relative proportions of each activity (M, N, H)\n that should be used to choose activities for additional household members\n not handled by CDAP\n\n This spec is handled much like an activitysim logit utility spec,\n EXCEPT that the values computed are relative proportions, not utilities\n (i.e. values are not exponentiated before being normalized to probabilities summing to 1.0)\n \"\"\"\n return asim.read_model_spec(configs_dir, 'cdap_fixed_relative_proportions.csv')\n\n\[email protected]()\ndef cdap_simulate(persons_merged,\n cdap_settings,\n cdap_indiv_spec,\n cdap_interaction_coefficients,\n cdap_fixed_relative_proportions,\n chunk_size, trace_hh_id):\n \"\"\"\n CDAP stands for Coordinated Daily Activity Pattern, which is a choice of\n high-level activity pattern for each person, in a coordinated way with other\n members of a person's household.\n\n Because Python requires vectorization of computation, there are some specialized\n routines in the cdap directory of activitysim for this purpose. This module\n simply applies those utilities using the simulation framework.\n \"\"\"\n\n persons_df = persons_merged.to_frame()\n\n constants = config.get_model_constants(cdap_settings)\n\n logger.info(\"Running cdap_simulate with %d persons\" % len(persons_df.index))\n\n choices = run_cdap(persons=persons_df,\n cdap_indiv_spec=cdap_indiv_spec,\n cdap_interaction_coefficients=cdap_interaction_coefficients,\n cdap_fixed_relative_proportions=cdap_fixed_relative_proportions,\n locals_d=constants,\n chunk_size=chunk_size,\n trace_hh_id=trace_hh_id,\n trace_label='cdap')\n\n tracing.print_summary('cdap_activity', choices.cdap_activity, value_counts=True)\n\n print pd.crosstab(persons_df.ptype, choices.cdap_activity, margins=True)\n\n choices = choices.reindex(persons_merged.index)\n orca.add_column(\"persons\", \"cdap_activity\", choices.cdap_activity)\n orca.add_column(\"persons\", \"cdap_rank\", choices.cdap_rank)\n\n pipeline.add_dependent_columns(\"persons\", \"persons_cdap\")\n pipeline.add_dependent_columns(\"households\", \"households_cdap\")\n\n if trace_hh_id:\n\n tracing.trace_df(orca.get_table('persons_merged').to_frame(),\n label=\"cdap\",\n columns=['ptype', 'cdap_rank', 'cdap_activity'],\n warn_if_empty=True)\n", "id": "12678088", "language": "Python", "matching_score": 2.1238858699798584, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/cdap.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport orca\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\nfrom activitysim.core import config\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef auto_ownership_spec(configs_dir):\n return asim.read_model_spec(configs_dir, 'auto_ownership.csv')\n\n\[email protected]()\ndef auto_ownership_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'auto_ownership.yaml')\n\n\[email protected]()\ndef auto_ownership_simulate(households_merged,\n auto_ownership_spec,\n auto_ownership_settings,\n trace_hh_id):\n \"\"\"\n Auto ownership is a standard model which predicts how many cars a household\n with given characteristics owns\n \"\"\"\n\n logger.info(\"Running auto_ownership_simulate with %d households\" % len(households_merged))\n\n nest_spec = config.get_logit_model_settings(auto_ownership_settings)\n constants = config.get_model_constants(auto_ownership_settings)\n\n choices = asim.simple_simulate(\n choosers=households_merged.to_frame(),\n spec=auto_ownership_spec,\n nest_spec=nest_spec,\n locals_d=constants,\n trace_label=trace_hh_id and 'auto_ownership',\n trace_choice_name='auto_ownership')\n\n tracing.print_summary('auto_ownership', choices, value_counts=True)\n\n orca.add_column('households', 'auto_ownership', choices)\n\n pipeline.add_dependent_columns('households', 'households_autoown')\n\n if trace_hh_id:\n trace_columns = ['auto_ownership'] + orca.get_table('households_autoown').columns\n tracing.trace_df(orca.get_table('households').to_frame(),\n label='auto_ownership',\n columns=trace_columns,\n warn_if_empty=True)\n", "id": "5384963", "language": "Python", "matching_score": 1.9958103895187378, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/auto_ownership.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport yaml\n\nimport logging\nimport orca\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef setting(key, default=None):\n\n settings = orca.get_injectable('settings')\n\n return settings.get(key, default)\n\n\ndef read_model_settings(configs_dir, file_name):\n settings = None\n file_path = os.path.join(configs_dir, file_name)\n if os.path.isfile(file_path):\n with open(file_path) as f:\n settings = yaml.load(f)\n\n if settings is None:\n settings = {}\n\n return settings\n\n\ndef get_model_constants(model_settings):\n \"\"\"\n Read constants from model settings file\n\n Returns\n -------\n constants : dict\n dictionary of constants to add to locals for use by expressions in model spec\n \"\"\"\n return model_settings.get('CONSTANTS', {})\n\n\ndef get_logit_model_settings(model_settings):\n \"\"\"\n Read nest spec (for nested logit) from model settings file\n\n Returns\n -------\n nests : dict\n dictionary specifying nesting structure and nesting coefficients\n\n constants : dict\n dictionary of constants to add to locals for use by expressions in model spec\n \"\"\"\n nests = None\n\n if model_settings is not None:\n\n # default to MNL\n logit_type = model_settings.get('LOGIT_TYPE', 'MNL')\n\n if logit_type not in ['NL', 'MNL']:\n logging.error(\"Unrecognized logit type '%s'\" % logit_type)\n raise RuntimeError(\"Unrecognized logit type '%s'\" % logit_type)\n\n if logit_type == 'NL':\n nests = model_settings.get('NESTS', None)\n if nests is None:\n logger.error(\"No NEST found in model spec for NL model type\")\n raise RuntimeError(\"No NEST found in model spec for NL model type\")\n\n return nests\n", "id": "1640341", "language": "Python", "matching_score": 2.1763436794281006, "max_stars_count": 0, "path": "activitysim/activitysim/core/config.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\nimport yaml\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef configs_dir():\n if not os.path.exists('configs'):\n raise RuntimeError(\"configs_dir: directory does not exist\")\n return 'configs'\n\n\[email protected]()\ndef data_dir():\n if not os.path.exists('data'):\n raise RuntimeError(\"data_dir: directory does not exist\")\n return 'data'\n\n\[email protected]()\ndef output_dir():\n if not os.path.exists('output'):\n raise RuntimeError(\"output_dir: directory does not exist\")\n return 'output'\n\n\[email protected]()\ndef extensions_dir():\n if not os.path.exists('extensions'):\n raise RuntimeError(\"output_dir: directory does not exist\")\n return 'extensions'\n\n\[email protected]()\ndef settings(configs_dir):\n with open(os.path.join(configs_dir, 'settings.yaml')) as f:\n return yaml.load(f)\n\n\[email protected](cache=True)\ndef pipeline_path(output_dir, settings):\n \"\"\"\n Orca injectable to return the path to the pipeline hdf5 file based on output_dir and settings\n \"\"\"\n pipeline_file_name = settings.get('pipeline', 'pipeline.h5')\n pipeline_file_path = os.path.join(output_dir, pipeline_file_name)\n return pipeline_file_path\n", "id": "6722229", "language": "Python", "matching_score": 2.087054967880249, "max_stars_count": 0, "path": "activitysim/activitysim/core/inject_defaults.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport tempfile\n\nimport numpy as np\nimport orca\nimport pytest\nimport yaml\n\n# orca injectables complicate matters because the decorators are executed at module load time\n# and since py.test collects modules and loads them at the start of a run\n# if a test method does something that has a lasting side-effect, then that side effect\n# will carry over not just to subsequent test functions, but to subsequently called modules\n# for instance, columns added with add_column will remain attached to orca tables\n# pytest-xdist allows us to run py.test with the --boxed option which runs every function\n# with a brand new python interpreter\n\n# Also note that the following import statement has the side-effect of registering injectables:\nfrom .. import __init__\n\n\ndef test_misc():\n\n orca.clear_cache()\n\n with pytest.raises(RuntimeError) as excinfo:\n orca.get_injectable(\"configs_dir\")\n assert \"directory does not exist\" in str(excinfo.value)\n\n with pytest.raises(RuntimeError) as excinfo:\n orca.get_injectable(\"data_dir\")\n assert \"directory does not exist\" in str(excinfo.value)\n\n with pytest.raises(RuntimeError) as excinfo:\n orca.get_injectable(\"output_dir\")\n assert \"directory does not exist\" in str(excinfo.value)\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs_test_misc')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n settings = orca.get_injectable(\"settings\")\n assert isinstance(settings, dict)\n\n assert orca.get_injectable(\"trace_person_ids\") == []\n\n assert orca.get_injectable(\"trace_tour_ids\") == []\n\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n orca.add_injectable(\"data_dir\", data_dir)\n\n with pytest.raises(RuntimeError) as excinfo:\n orca.get_injectable(\"store\")\n assert \"store file name not specified in settings\" in str(excinfo.value)\n\n settings = {'store': 'bogus.h5'}\n orca.add_injectable(\"settings\", settings)\n with pytest.raises(RuntimeError) as excinfo:\n orca.get_injectable(\"store\")\n assert \"store file not found\" in str(excinfo.value)\n\n # these should be None until overridden\n assert orca.get_injectable(\"hh_index_name\") is None\n assert orca.get_injectable(\"persons_index_name\") is None\n\n # default values if not specified in settings\n assert orca.get_injectable(\"chunk_size\") == 0\n", "id": "5016304", "language": "Python", "matching_score": 3.183468818664551, "max_stars_count": 0, "path": "activitysim/activitysim/abm/test/test_misc.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport tempfile\n\nimport numpy as np\nimport orca\nimport pandas as pd\nimport pandas.util.testing as pdt\nimport pytest\nimport yaml\nimport openmatrix as omx\n\nfrom .. import __init__\nfrom ..tables import size_terms\nfrom . import extensions\n\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\n\n# set the max households for all tests (this is to limit memory use on travis)\nHOUSEHOLDS_SAMPLE_SIZE = 100\nHH_ID = 961042\n\nSKIP_FULL_RUN = True\nSKIP_FULL_RUN = False\n\n\ndef inject_settings(configs_dir, households_sample_size, chunk_size=None,\n trace_hh_id=None, trace_od=None, check_for_variability=None):\n\n with open(os.path.join(configs_dir, 'settings.yaml')) as f:\n settings = yaml.load(f)\n settings['households_sample_size'] = households_sample_size\n if chunk_size is not None:\n settings['chunk_size'] = chunk_size\n if trace_hh_id is not None:\n settings['trace_hh_id'] = trace_hh_id\n if trace_od is not None:\n settings['trace_od'] = trace_od\n if check_for_variability is not None:\n settings['check_for_variability'] = check_for_variability\n\n orca.add_injectable(\"settings\", settings)\n\n\ndef test_rng_access():\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n orca.add_injectable(\"data_dir\", data_dir)\n\n inject_settings(configs_dir, households_sample_size=HOUSEHOLDS_SAMPLE_SIZE)\n\n orca.clear_cache()\n\n pipeline.set_rn_generator_base_seed(0)\n\n pipeline.start_pipeline()\n\n with pytest.raises(RuntimeError) as excinfo:\n pipeline.set_rn_generator_base_seed(0)\n assert \"call set_rn_generator_base_seed before the first step\" in str(excinfo.value)\n\n rng = pipeline.get_rn_generator()\n\n\ndef test_mini_pipeline_run():\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n orca.add_injectable(\"data_dir\", data_dir)\n\n inject_settings(configs_dir, households_sample_size=HOUSEHOLDS_SAMPLE_SIZE)\n\n orca.clear_cache()\n\n # assert len(orca.get_table(\"households\").index) == HOUSEHOLDS_SAMPLE_SIZE\n\n _MODELS = [\n 'compute_accessibility',\n 'school_location_sample',\n 'school_location_logsums',\n 'school_location_simulate',\n 'workplace_location_sample',\n 'workplace_location_logsums',\n 'workplace_location_simulate',\n 'auto_ownership_simulate'\n ]\n\n pipeline.run(models=_MODELS, resume_after=None)\n\n auto_choice = pipeline.get_table(\"households\").auto_ownership\n\n # regression test: these are among the first 10 households in households table\n hh_ids = [582398, 93277, 2601277]\n choices = [0, 1, 2]\n expected_choice = pd.Series(choices, index=pd.Index(hh_ids, name=\"HHID\"),\n name='auto_ownership')\n\n print \"auto_choice\\n\", auto_choice.head(10)\n pdt.assert_series_equal(auto_choice[hh_ids], expected_choice)\n\n pipeline.run_model('cdap_simulate')\n pipeline.run_model('mandatory_tour_frequency')\n\n mtf_choice = pipeline.get_table(\"persons\").mandatory_tour_frequency\n\n per_ids = [23712, 93277, 328095]\n choices = ['work1', 'work_and_school', 'school1']\n expected_choice = pd.Series(choices, index=pd.Index(per_ids, name='PERID'),\n name='mandatory_tour_frequency')\n\n print \"mtf_choice\\n\", mtf_choice.head(20)\n pdt.assert_series_equal(mtf_choice[per_ids], expected_choice)\n\n # try to get a non-existant table\n with pytest.raises(RuntimeError) as excinfo:\n pipeline.get_table(\"bogus\")\n assert \"not in checkpointed tables\" in str(excinfo.value)\n\n # try to get an existing table from a non-existant checkpoint\n with pytest.raises(RuntimeError) as excinfo:\n pipeline.get_table(\"households\", checkpoint_name=\"bogus\")\n assert \"not in checkpoints\" in str(excinfo.value)\n\n pipeline.close()\n\n orca.clear_cache()\n\n\ndef test_mini_pipeline_run2():\n\n # the important thing here is that we should get\n # exactly the same results as for test_mini_pipeline_run\n # when we restart pipeline\n\n configs_dir = os.path.join(os.path.dirname(__file__), 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n orca.add_injectable(\"data_dir\", data_dir)\n\n inject_settings(configs_dir, households_sample_size=HOUSEHOLDS_SAMPLE_SIZE)\n\n orca.clear_cache()\n\n # should be able to get this BEFORE pipeline is opened\n checkpoints_df = pipeline.get_checkpoints()\n prev_checkpoint_count = len(checkpoints_df.index)\n assert prev_checkpoint_count == 11\n\n pipeline.start_pipeline('auto_ownership_simulate')\n\n auto_choice = pipeline.get_table(\"households\").auto_ownership\n\n # regression test: these are the same as in test_mini_pipeline_run1\n hh_ids = [582398, 93277, 2601277]\n choices = [0, 1, 2]\n expected_choice = pd.Series(choices, index=pd.Index(hh_ids, name=\"HHID\"),\n name='auto_ownership')\n\n print \"auto_choice\\n\", auto_choice.head(4)\n pdt.assert_series_equal(auto_choice[hh_ids], expected_choice)\n\n # try to run a model already in pipeline\n with pytest.raises(RuntimeError) as excinfo:\n pipeline.run_model('auto_ownership_simulate')\n assert \"run model 'auto_ownership_simulate' more than once\" in str(excinfo.value)\n\n # and these new ones\n pipeline.run_model('cdap_simulate')\n pipeline.run_model('mandatory_tour_frequency')\n\n mtf_choice = pipeline.get_table(\"persons\").mandatory_tour_frequency\n\n per_ids = [23712, 93277, 328095]\n choices = ['work1', 'work_and_school', 'school1']\n expected_choice = pd.Series(choices, index=pd.Index(per_ids, name='PERID'),\n name='mandatory_tour_frequency')\n\n print \"mtf_choice\\n\", mtf_choice.head(20)\n pdt.assert_series_equal(mtf_choice[per_ids], expected_choice)\n\n # should be able to get this before pipeline is closed (from existing open store)\n assert orca.get_injectable('pipeline_store') is not None\n checkpoints_df = pipeline.get_checkpoints()\n assert len(checkpoints_df.index) == prev_checkpoint_count\n\n pipeline.close()\n\n # should also be able to get this after pipeline is closed (open and close)\n assert orca.get_injectable('pipeline_store') is None\n checkpoints_df = pipeline.get_checkpoints()\n assert len(checkpoints_df.index) == prev_checkpoint_count\n\n orca.clear_cache()\n\n\ndef full_run(resume_after=None, chunk_size=0,\n households_sample_size=HOUSEHOLDS_SAMPLE_SIZE,\n trace_hh_id=None, trace_od=None, check_for_variability=None):\n\n configs_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'example', 'configs')\n orca.add_injectable(\"configs_dir\", configs_dir)\n\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n orca.add_injectable(\"data_dir\", data_dir)\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n orca.add_injectable(\"output_dir\", output_dir)\n\n inject_settings(configs_dir,\n households_sample_size=households_sample_size,\n chunk_size=chunk_size,\n trace_hh_id=trace_hh_id,\n trace_od=trace_od,\n check_for_variability=check_for_variability)\n\n orca.clear_cache()\n\n tracing.config_logger()\n\n # assert orca.get_injectable(\"chunk_size\") == chunk_size\n\n _MODELS = [\n 'compute_accessibility',\n 'school_location_sample',\n 'school_location_logsums',\n 'school_location_simulate',\n 'workplace_location_sample',\n 'workplace_location_logsums',\n 'workplace_location_simulate',\n 'auto_ownership_simulate',\n 'cdap_simulate',\n 'mandatory_tour_frequency',\n 'mandatory_scheduling',\n 'non_mandatory_tour_frequency',\n 'destination_choice',\n 'non_mandatory_scheduling',\n 'tour_mode_choice_simulate',\n 'create_simple_trips',\n 'trip_mode_choice_simulate'\n ]\n\n pipeline.run(models=_MODELS, resume_after=resume_after)\n\n tours = pipeline.get_table('tours')\n tour_count = len(tours.index)\n\n pipeline.close()\n\n orca.clear_cache()\n\n return tour_count\n\n\ndef get_trace_csv(file_name):\n\n output_dir = os.path.join(os.path.dirname(__file__), 'output')\n\n df = pd.read_csv(os.path.join(output_dir, file_name))\n\n # label value_1 value_2 value_3 value_4\n # 0 tour_id 38 201 39 40\n # 1 mode DRIVE_LOC DRIVE_COM DRIVE_LOC DRIVE_LOC\n # 2 person_id 1888694 1888695 1888695 1888696\n # 3 tour_type work othmaint work school\n # 4 tour_num 1 1 1 1\n\n # transpose df and rename columns\n labels = df.label.values\n df = df.transpose()[1:]\n df.columns = labels\n\n return df\n\n\nEXPECT_PERSON_IDS = ['1888694', '1888695', '1888696', '1888696']\nEXPECT_TOUR_TYPES = ['work', 'work', 'othdiscr', 'social']\nEXPECT_MODES = ['DRIVE_LOC', 'DRIVE_LOC', 'DRIVEALONEPAY', 'DRIVEALONEPAY']\nEXPECT_TOUR_COUNT = 155\n\n\ndef test_full_run1():\n\n if SKIP_FULL_RUN:\n return\n\n tour_count = full_run(trace_hh_id=HH_ID, check_for_variability=True,\n households_sample_size=HOUSEHOLDS_SAMPLE_SIZE)\n\n assert(tour_count == EXPECT_TOUR_COUNT)\n\n mode_df = get_trace_csv('tour_mode_choice.mode.csv')\n mode_df.sort_values(by=['person_id', 'tour_type', 'tour_num'], inplace=True)\n\n print mode_df\n # tour_id mode person_id tour_type tour_num\n # value_2 20775643 DRIVE_LOC 1888694 work 1\n # value_3 20775644 DRIVE_LOC 1888694 work 2\n # value_4 20775650 DRIVE_LOC 1888695 school 1\n # value_5 20775651 DRIVE_LOC 1888695 school 2\n # value_1 20775660 DRIVEALONEPAY 1888696 othmaint 1\n\n assert (mode_df.person_id.values == EXPECT_PERSON_IDS).all()\n assert (mode_df.tour_type.values == EXPECT_TOUR_TYPES).all()\n assert (mode_df['mode'].values == EXPECT_MODES).all()\n\n\ndef test_full_run2():\n\n # resume_after should successfully load tours table and replicate results\n\n if SKIP_FULL_RUN:\n return\n\n tour_count = full_run(resume_after='non_mandatory_scheduling', trace_hh_id=HH_ID)\n\n assert(tour_count == EXPECT_TOUR_COUNT)\n\n mode_df = get_trace_csv('tour_mode_choice.mode.csv')\n mode_df.sort_values(by=['person_id', 'tour_type', 'tour_num'], inplace=True)\n\n assert (mode_df.person_id.values == EXPECT_PERSON_IDS).all()\n assert (mode_df.tour_type.values == EXPECT_TOUR_TYPES).all()\n assert (mode_df['mode'].values == EXPECT_MODES).all()\n\n\ndef test_full_run_with_chunks():\n\n # should get the same result with different chunk size\n\n if SKIP_FULL_RUN:\n return\n\n tour_count = full_run(trace_hh_id=HH_ID,\n households_sample_size=HOUSEHOLDS_SAMPLE_SIZE,\n chunk_size=10000)\n\n assert(tour_count == EXPECT_TOUR_COUNT)\n\n mode_df = get_trace_csv('tour_mode_choice.mode.csv')\n mode_df.sort_values(by=['person_id', 'tour_type', 'tour_num'], inplace=True)\n\n assert (mode_df.person_id.values == EXPECT_PERSON_IDS).all()\n assert (mode_df.tour_type.values == EXPECT_TOUR_TYPES).all()\n assert (mode_df['mode'].values == EXPECT_MODES).all()\n\n\ndef test_full_run_stability():\n\n # hh should get the same result with different sample size\n\n if SKIP_FULL_RUN:\n return\n\n tour_count = full_run(trace_hh_id=HH_ID,\n households_sample_size=HOUSEHOLDS_SAMPLE_SIZE+10)\n\n mode_df = get_trace_csv('tour_mode_choice.mode.csv')\n mode_df.sort_values(by=['person_id', 'tour_type', 'tour_num'], inplace=True)\n\n print mode_df\n\n assert (mode_df.person_id.values == EXPECT_PERSON_IDS).any()\n assert (mode_df.tour_type.values == EXPECT_TOUR_TYPES).any()\n assert (mode_df['mode'].values == EXPECT_MODES).any()\n", "id": "11026199", "language": "Python", "matching_score": 5.61209774017334, "max_stars_count": 0, "path": "activitysim/activitysim/abm/test/test_pipeline.py" }, { "content": "import orca\nfrom activitysim import abm\nfrom activitysim.core import tracing\nimport pandas as pd\nimport numpy as np\nimport os\n\nfrom activitysim.core.tracing import print_elapsed_time\n\nfrom activitysim.core import pipeline\nimport extensions\n\n\n# comment out the line below to default base seed to 0 random seed\n# so that run results are reproducible\n# pipeline.set_rn_generator_base_seed(seed=None)\n\n\ntracing.config_logger()\n\nt0 = print_elapsed_time()\n\n_MODELS = [\n 'compute_accessibility',\n 'school_location_sample',\n 'school_location_logsums',\n 'school_location_simulate',\n 'workplace_location_sample', # ~3 hours\n 'workplace_location_logsums',\n 'workplace_location_simulate',\n 'auto_ownership_simulate',\n 'cdap_simulate',\n 'mandatory_tour_frequency',\n 'mandatory_scheduling',\n 'non_mandatory_tour_frequency',\n 'destination_choice',\n 'non_mandatory_scheduling',\n 'tour_mode_choice_simulate',\n 'create_simple_trips',\n 'trip_mode_choice_simulate'\n]\n\n\n# If you provide a resume_after argument to pipeline.run\n# the pipeline manager will attempt to load checkpointed tables from the checkpoint store\n# and resume pipeline processing on the next submodel step after the specified checkpoint\nresume_after = None\n#resume_after = 'school_location_logsums'\n\npipeline.run(models=_MODELS, resume_after=resume_after)\n\nprint \"\\n#### run completed\"\n\n# retrieve the state of a checkpointed table after a specific model was run\ndf = pipeline.get_table(table_name=\"persons\", checkpoint_name=\"school_location_simulate\")\nprint \"\\npersons table columns after school_location_simulate:\", df.columns.values\n\n# get_table without checkpoint_name returns the latest version of the table\ndf = pipeline.get_table(\"tours\")\nprint \"\\ntour_type value counts\\n\", df.tour_type.value_counts()\n\n# get_table for a computed (non-checkpointed, internal, orca) table\n# return the most recent value of a (non-checkpointed, internal) computed table\ndf = pipeline.get_table(\"persons_merged\")\ndf = df[['household_id', 'age', 'auPkTotal', 'roundtrip_auto_time_to_work']]\nprint \"\\npersons_merged selected columns\\n\", df.head(20)\n\n# write final versions of all checkpointed dataframes to CSV files to review results\nfor table_name in pipeline.checkpointed_tables():\n file_name = \"final_%s_table.csv\" % table_name\n file_path = os.path.join(orca.get_injectable(\"output_dir\"), file_name)\n pipeline.get_table(table_name).to_csv(file_path)\n\n# tables will no longer be available after pipeline is closed\npipeline.close()\n\n# write checkpoints (this can be called whether of not pipeline is open)\nfile_path = os.path.join(orca.get_injectable(\"output_dir\"), \"checkpoints.csv\")\npipeline.get_checkpoints().to_csv(file_path)\n\nt0 = print_elapsed_time(\"all models\", t0)\n", "id": "6802512", "language": "Python", "matching_score": 3.696425676345825, "max_stars_count": 0, "path": "activitysim/example/simulation.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport accessibility\nimport auto_ownership\nimport destination\nimport mandatory_tour_frequency\nimport non_mandatory_tour_frequency\nimport mandatory_scheduling\nimport non_mandatory_scheduling\nimport school_location\nimport workplace_location\nimport mode\nimport cdap\nimport create_trips\n", "id": "3488557", "language": "Python", "matching_score": 1.8628937005996704, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/__init__.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import config\n\nfrom .util.vectorize_tour_scheduling import vectorize_tour_scheduling\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef tdd_non_mandatory_spec(configs_dir):\n return asim.read_model_spec(configs_dir, 'tour_departure_and_duration_nonmandatory.csv')\n\n\[email protected]()\ndef non_mandatory_scheduling_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'non_mandatory_scheduling.yaml')\n\n\[email protected]()\ndef non_mandatory_scheduling(non_mandatory_tours_merged,\n tdd_alts,\n tdd_non_mandatory_spec,\n non_mandatory_scheduling_settings,\n chunk_size,\n trace_hh_id):\n \"\"\"\n This model predicts the departure time and duration of each activity for\n non-mandatory tours\n \"\"\"\n\n tours = non_mandatory_tours_merged.to_frame()\n\n logger.info(\"Running non_mandatory_scheduling with %d tours\" % len(tours))\n\n constants = config.get_model_constants(non_mandatory_scheduling_settings)\n\n spec = tdd_non_mandatory_spec.to_frame()\n alts = tdd_alts.to_frame()\n\n choices = vectorize_tour_scheduling(tours, alts, spec,\n constants=constants,\n chunk_size=chunk_size,\n trace_label='non_mandatory_scheduling')\n\n tracing.print_summary('non_mandatory_scheduling tour_departure_and_duration',\n choices, describe=True)\n\n orca.add_column(\n \"non_mandatory_tours\", \"tour_departure_and_duration\", choices)\n\n if trace_hh_id:\n tracing.trace_df(orca.get_table('non_mandatory_tours').to_frame(),\n label=\"non_mandatory_tours\",\n slicer='person_id',\n index_label='tour_id',\n columns=None,\n warn_if_empty=True)\n", "id": "6022352", "language": "Python", "matching_score": 4.654559135437012, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/non_mandatory_scheduling.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\n\nfrom activitysim.core import simulate as asim\nfrom activitysim.core import tracing\nfrom activitysim.core import config\n\nfrom .util.vectorize_tour_scheduling import vectorize_tour_scheduling\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef mandatory_scheduling_settings(configs_dir):\n return config.read_model_settings(configs_dir, 'mandatory_scheduling.yaml')\n\n\[email protected]()\ndef tdd_alts(configs_dir):\n # right now this file just contains the start and end hour\n f = os.path.join(configs_dir, 'tour_departure_and_duration_alternatives.csv')\n return pd.read_csv(f)\n\n\n# used to have duration in the actual alternative csv file,\n# but this is probably better as a computed column like this\[email protected](\"tdd_alts\")\ndef duration(tdd_alts):\n return tdd_alts.end - tdd_alts.start\n\n\[email protected]()\ndef tdd_work_spec(configs_dir):\n return asim.read_model_spec(configs_dir, 'tour_departure_and_duration_work.csv')\n\n\[email protected]()\ndef tdd_school_spec(configs_dir):\n return asim.read_model_spec(configs_dir, 'tour_departure_and_duration_school.csv')\n\n\n# I think it's easier to do this in one model so you can merge the two\n# resulting series together right away\[email protected]()\ndef mandatory_scheduling(mandatory_tours_merged,\n tdd_alts,\n tdd_school_spec,\n tdd_work_spec,\n mandatory_scheduling_settings,\n chunk_size,\n trace_hh_id):\n \"\"\"\n This model predicts the departure time and duration of each activity for\n mandatory tours\n \"\"\"\n\n tours = mandatory_tours_merged.to_frame()\n alts = tdd_alts.to_frame()\n\n constants = config.get_model_constants(mandatory_scheduling_settings)\n\n school_spec = tdd_school_spec.to_frame()\n school_tours = tours[tours.tour_type == \"school\"]\n\n logger.info(\"Running mandatory_scheduling school_tours with %d tours\" % len(school_tours))\n\n school_choices = vectorize_tour_scheduling(\n school_tours, alts, school_spec,\n constants=constants,\n chunk_size=chunk_size,\n trace_label='mandatory_scheduling.school')\n\n work_spec = tdd_work_spec.to_frame()\n work_tours = tours[tours.tour_type == \"work\"]\n\n logger.info(\"Running %d work tour scheduling choices\" % len(work_tours))\n\n work_choices = vectorize_tour_scheduling(\n work_tours, alts, work_spec,\n constants=constants,\n chunk_size=chunk_size,\n trace_label='mandatory_scheduling.work')\n\n choices = pd.concat([school_choices, work_choices])\n\n tracing.print_summary('mandatory_scheduling tour_departure_and_duration',\n choices, describe=True)\n\n orca.add_column(\n \"mandatory_tours\", \"tour_departure_and_duration\", choices)\n\n if trace_hh_id:\n tracing.trace_df(orca.get_table('mandatory_tours').to_frame(),\n label=\"mandatory_tours\",\n slicer='person_id',\n index_label='tour',\n columns=None,\n warn_if_empty=True)\n", "id": "11259168", "language": "Python", "matching_score": 2.4819412231445312, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/mandatory_scheduling.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport orca\nimport numpy as np\nimport pandas as pd\n\nfrom activitysim.core.util import reindex\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef tours(non_mandatory_tours, mandatory_tours, tdd_alts):\n\n non_mandatory_df = non_mandatory_tours.local\n mandatory_df = mandatory_tours.local\n\n # don't expect indexes to overlap\n assert len(non_mandatory_df.index.intersection(mandatory_df.index)) == 0\n\n # expect non-overlapping indexes (so the tripids dont change)\n assert len(np.intersect1d(non_mandatory_df.index, mandatory_df.index, assume_unique=True)) == 0\n\n tours = pd.concat([non_mandatory_tours.to_frame(),\n mandatory_tours.to_frame()],\n ignore_index=False)\n\n # go ahead here and add the start, end, and duration here for future use\n chosen_tours = tdd_alts.to_frame().loc[tours.tour_departure_and_duration]\n chosen_tours.index = tours.index\n\n df = pd.concat([tours, chosen_tours], axis=1)\n assert df.index.name == 'tour_id'\n\n # replace table function with dataframe\n orca.add_table('tours', df)\n\n return df\n\n\[email protected]()\ndef mandatory_tours_merged(mandatory_tours, persons_merged):\n return orca.merge_tables(mandatory_tours.name,\n [mandatory_tours, persons_merged])\n\n\[email protected]()\ndef non_mandatory_tours_merged(non_mandatory_tours, persons_merged):\n tours = non_mandatory_tours\n return orca.merge_tables(tours.name, tables=[\n tours, persons_merged])\n\n\[email protected]()\ndef tours_merged(tours, persons_merged):\n return orca.merge_tables(tours.name, tables=[\n tours, persons_merged])\n\n\n# broadcast trips onto persons using the person_id\norca.broadcast('persons', 'non_mandatory_tours',\n cast_index=True, onto_on='person_id')\norca.broadcast('persons_merged', 'non_mandatory_tours',\n cast_index=True, onto_on='person_id')\norca.broadcast('persons_merged', 'tours', cast_index=True, onto_on='person_id')\n\n\[email protected](\"tours\")\ndef sov_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef hov2_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef hov2toll_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef hov3_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef sovtoll_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef drive_local_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef drive_lrf_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef drive_express_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef drive_heavyrail_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef drive_commuter_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef walk_local_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef walk_lrf_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef walk_commuter_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef walk_express_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef walk_heavyrail_available(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef is_joint(tours):\n # FIXME\n return pd.Series(False, index=tours.index)\n\n\[email protected](\"tours\")\ndef is_not_joint(tours):\n # FIXME\n return pd.Series(True, index=tours.index)\n\n\[email protected](\"tours\")\ndef number_of_participants(tours):\n # FIXME\n return pd.Series(1, index=tours.index)\n\n\[email protected](\"tours\")\ndef work_tour_is_drive(tours):\n # FIXME\n # FIXME note that there's something about whether this is a subtour?\n # FIXME though I'm not sure how it can be a subtour in the tour mode choice\n return pd.Series(0, index=tours.index)\n\n\[email protected](\"tours\")\ndef terminal_time(tours):\n # FIXME\n return pd.Series(0, index=tours.index)\n\n\[email protected](\"tours\")\ndef origin_walk_time(tours):\n # FIXME\n return pd.Series(0, index=tours.index)\n\n\[email protected](\"tours\")\ndef destination_walk_time(tours):\n # FIXME\n return pd.Series(0, index=tours.index)\n\n\[email protected](\"tours\")\ndef daily_parking_cost(tours):\n # FIXME - this is a computation based on the tour destination\n return pd.Series(0, index=tours.index)\n\n\[email protected](\"tours\")\ndef dest_density_index(tours, land_use):\n return reindex(land_use.density_index,\n tours.destination)\n\n\[email protected](\"tours\")\ndef dest_topology(tours, land_use):\n return reindex(land_use.TOPOLOGY,\n tours.destination)\n\n\[email protected](\"tours\")\ndef out_period(tours, settings):\n cats = pd.cut(tours.end,\n settings['time_periods']['hours'],\n labels=settings['time_periods']['labels'])\n # cut returns labelled categories but we convert to str\n return cats.astype(str)\n\n\[email protected](\"tours\")\ndef in_period(tours, settings):\n cats = pd.cut(tours.start,\n settings['time_periods']['hours'],\n labels=settings['time_periods']['labels'])\n # cut returns labelled categories but we convert to str\n return cats.astype(str)\n", "id": "11492934", "language": "Python", "matching_score": 3.9447286128997803, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/tours.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport logging\n\nimport orca\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"trips\")\ndef start_period(trips, settings):\n cats = pd.cut(trips.start_trip,\n settings['time_periods']['hours'],\n labels=settings['time_periods']['labels'])\n # cut returns labelled categories but we convert to str\n return cats.astype(str)\n\n\[email protected]()\ndef trips_merged(trips, tours):\n return orca.merge_tables(trips.name, tables=[\n trips, tours])\n\norca.broadcast('tours', 'trips', cast_index=True, onto_on='tour_id')\n", "id": "6758554", "language": "Python", "matching_score": 1.4219855070114136, "max_stars_count": 0, "path": "activitysim/activitysim/abm/tables/trips.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\nimport os\nimport logging\n\nimport orca\nimport pandas as pd\nimport numpy as np\n\nfrom activitysim.core.util import reindex\n\nfrom activitysim.core import tracing\nfrom activitysim.core import pipeline\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]()\ndef create_simple_trips(tours, households, persons, trace_hh_id):\n \"\"\"\n Create a simple trip table\n \"\"\"\n\n logger.info(\"Running simple trips table creation with %d tours\" % len(tours.index))\n\n tours_df = tours.to_frame()\n\n # we now have a tour_id column\n tours_df.reset_index(inplace=True)\n\n tours_df['household_id'] = reindex(persons.household_id, tours_df.person_id)\n tours_df['TAZ'] = reindex(households.TAZ, tours_df.household_id)\n\n # create inbound and outbound records\n trips = pd.concat([tours_df, tours_df], ignore_index=True)\n\n # first half are outbound, second half are inbound\n trips['INBOUND'] = np.repeat([False, True], len(trips.index)/2)\n\n # TRIPID for outbound trips = 1, inbound_trips = 2\n trips['trip_num'] = np.repeat([1, 2], len(trips.index)/2)\n\n # set key fields from tour fields: 'TAZ','destination','start','end'\n trips['OTAZ'] = trips.TAZ\n trips['OTAZ'][trips.INBOUND] = trips.destination[trips.INBOUND]\n\n trips['DTAZ'] = trips.destination\n trips['DTAZ'][trips.INBOUND] = trips.TAZ[trips.INBOUND]\n\n trips['start_trip'] = trips.start\n trips['start_trip'][trips.INBOUND] = trips.end[trips.INBOUND]\n\n trips['end_trip'] = trips.end\n trips['end_trip'][trips.INBOUND] = trips.start[trips.INBOUND]\n\n # create a stable (predictable) index based on tour_id and trip_num\n possible_trips_count = 2\n trips['trip_id'] = (trips.tour_id * possible_trips_count) + (trips.trip_num - 1)\n trips.set_index('trip_id', inplace=True, verify_integrity=True)\n\n trip_columns = ['tour_id', 'INBOUND', 'trip_num', 'OTAZ', 'DTAZ', 'start_trip', 'end_trip']\n trips = trips[trip_columns]\n\n orca.add_table(\"trips\", trips)\n\n tracing.register_traceable_table('trips', trips)\n pipeline.get_rn_generator().add_channel(trips, 'trips')\n\n if trace_hh_id:\n tracing.trace_df(trips,\n label=\"trips\",\n warn_if_empty=True)\n", "id": "1527705", "language": "Python", "matching_score": 1.7124360799789429, "max_stars_count": 0, "path": "activitysim/activitysim/abm/models/create_trips.py" }, { "content": "import pandas as pd\ndf = pd.read_csv('../output/final_trips_table.csv')\ndf = df[df['trip_mode'].isin(['DRIVEALONEFREE','DRIVE_LRF','SHARED2PAY','SHARED3FREE','SHARED2FREE','DRIVE_COM','DRIVEALONEPAY','DRIVE_LOC','DRIVE_HVY','DRIVE_EXP'])]\ndf = df[(df['end_trip'] < 21) & (df['start_trip'] > 17)]\ndemand = df[['trip_id','OTAZ','DTAZ']].groupby(['OTAZ','DTAZ'], as_index=False).agg('count')\ndemand.rename(columns={'trip_id':'trips'}, inplace=True)\ndemand.to_csv('../output/pm_peak.csv', index=False)\n\n\n", "id": "4680772", "language": "Python", "matching_score": 0.18365874886512756, "max_stars_count": 0, "path": "activitysim/example/extensions/demand_gen.py" }, { "content": "import orca\nfrom urbansim.utils import misc\n\nimport pandas as pd\nimport numpy as np\n\n\n#########################\n# ZONES VARIABLES #\n#########################\n\n# these are primarily used for calculating skim-based\n# acccessibilities\n\[email protected]('zones', cache=True)\ndef total_jobs(jobs, zones):\n return jobs.zone_id_work.groupby(\n jobs.zone_id_work).count().reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef sum_residential_units(parcels, buildings, zones):\n s = buildings.residential_units.groupby(\n buildings.parcel_id).sum().groupby(parcels.zone_id).sum()\n return s.reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef sum_persons(households, buildings, parcels, zones):\n s = households.persons.groupby(\n households.building_id).sum().groupby(\n buildings.parcel_id).sum().groupby(parcels.zone_id).sum()\n return s.reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef sum_income(households, buildings, parcels, zones):\n s = households.income.groupby(\n households.building_id).sum().groupby(\n buildings.parcel_id).sum().groupby(parcels.zone_id).sum()\n return s.reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef avg_income(households, buildings, parcels, zones):\n s = households.income.groupby(\n households.building_id).mean().groupby(\n buildings.parcel_id).mean().groupby(parcels.zone_id).mean()\n return s.reindex(zones.index).fillna(0)\n\n\n############################\n# small drive network vars #\n############################\n\n\[email protected]('parcels')\ndef node_id_small(parcels, netsmall):\n idssmall_parcel = netsmall.get_node_ids(parcels.x, parcels.y)\n return idssmall_parcel\n\n\[email protected]('rentals')\ndef node_id_small(rentals, netsmall):\n idssmall_rentals = netsmall.get_node_ids(\n rentals.longitude, rentals.latitude)\n return idssmall_rentals\n\n\[email protected]('buildings')\ndef node_id_small(parcels, buildings):\n return misc.reindex(parcels.node_id_small, buildings.parcel_id)\n\n\[email protected]('units')\ndef node_id_small(buildings, units):\n return misc.reindex(buildings.node_id_small, units.building_id)\n\n\[email protected]('households')\ndef node_id_small(units, households):\n return misc.reindex(units.node_id_small, households.unit_id)\n\n\[email protected]('persons')\ndef node_id_small(households, persons):\n return misc.reindex(households.node_id_small, persons.household_id)\n\n\[email protected]('jobs')\ndef node_id_small(buildings, jobs):\n return misc.reindex(buildings.node_id_small, jobs.building_id)\n\n\n###########################\n# walk network vars #\n###########################\[email protected]('parcels')\ndef node_id_walk(parcels, netwalk):\n idswalk_parcel = netwalk.get_node_ids(parcels.x, parcels.y)\n return idswalk_parcel\n\n\[email protected]('rentals')\ndef node_id_walk(rentals, netwalk):\n idswalk_rentals = netwalk.get_node_ids(rentals.longitude, rentals.latitude)\n return idswalk_rentals\n\n\[email protected]('buildings')\ndef node_id_walk(parcels, buildings):\n return misc.reindex(parcels.node_id_walk, buildings.parcel_id)\n\n\[email protected]('units')\ndef node_id_walk(buildings, units):\n return misc.reindex(buildings.node_id_walk, units.building_id)\n\n\[email protected]('households')\ndef node_id_walk(units, households):\n return misc.reindex(units.node_id_walk, households.unit_id)\n\n\[email protected]('persons')\ndef node_id_walk(households, persons):\n return misc.reindex(households.node_id_walk, persons.household_id)\n\n\[email protected]('jobs')\ndef node_id_walk(buildings, jobs):\n return misc.reindex(buildings.node_id_walk, jobs.building_id)\n\n\n##########################\n# small network aggregation\n##########################\n\ndef fill_median(Series): # replace inf and NaN with median\n return Series.replace([np.inf, -np.inf],np.nan).fillna(Series.median())\n\[email protected]('nodessmall')\ndef pop_jobs_ratio_10000(nodessmall):\n return fill_median(nodessmall['pop_10000'] / nodessmall['jobs_10000'])\n \[email protected]('nodessmall')\ndef pop_jobs_ratio_25000(nodessmall):\n return fill_median(nodessmall['pop_25000'] / nodessmall['jobs_25000']) \n\n##########################\n# walk network aggregation\n##########################\n\[email protected]('nodeswalk')\ndef prop_children_500_walk(nodeswalk):\n return fill_median((nodeswalk['children_500_walk'] > 0).astype(int) / nodeswalk['hh_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_singles_500_walk (nodeswalk):\n return fill_median(nodeswalk['singles_500_walk'] / nodeswalk['hh_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_elderly_500_walk (nodeswalk):\n return fill_median(nodeswalk['elderly_hh_500_walk'] / nodeswalk['hh_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_black_500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_black_500_walk'] / nodeswalk['pop_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_white_500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_white_500_walk'] / nodeswalk['pop_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_asian_500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_asian_500_walk'] / nodeswalk['pop_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_hisp_500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_hisp_500_walk'] / nodeswalk['pop_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_rich_500_walk (nodeswalk):\n return fill_median(nodeswalk['rich_500_walk'] / nodeswalk['pop_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_poor_500_walk (nodeswalk):\n return fill_median(nodeswalk['poor_500_walk'] / nodeswalk['pop_500_walk'])\n\[email protected]('nodeswalk')\ndef prop_children_1500_walk (nodeswalk):\n return fill_median((nodeswalk['children_1500_walk'] > 0).astype(int)/nodeswalk['hh_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_singles_1500_walk (nodeswalk):\n return fill_median(nodeswalk['singles_1500_walk'] / nodeswalk['hh_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_elderly_1500_walk (nodeswalk):\n return fill_median(nodeswalk['elderly_hh_1500_walk'] / nodeswalk['hh_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_black_1500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_black_1500_walk'] / nodeswalk['pop_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_white_1500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_white_1500_walk'] / nodeswalk['pop_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_asian_1500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_asian_1500_walk'] / nodeswalk['pop_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_hisp_1500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_hisp_1500_walk'] / nodeswalk['pop_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_rich_1500_walk (nodeswalk):\n return fill_median(nodeswalk['rich_1500_walk'] / nodeswalk['pop_1500_walk'])\n\[email protected]('nodeswalk')\ndef prop_poor_1500_walk (nodeswalk):\n return fill_median(nodeswalk['poor_1500_walk'] / nodeswalk['pop_1500_walk'])\n\[email protected]('nodeswalk')\ndef pop_jobs_ratio_1500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_1500_walk'] / (nodeswalk['jobs_500_walk']))\n\[email protected]('nodeswalk')\ndef avg_hhs_500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_500_walk'] / (nodeswalk['hh_500_walk']))\n\[email protected]('nodeswalk')\ndef avg_hhs_1500_walk (nodeswalk):\n return fill_median(nodeswalk['pop_1500_walk'] / (nodeswalk['hh_1500_walk']))\n\n###########################\n# beam network vars #\n###########################\n# @orca.column('parcels')\n# def node_id_beam(parcels, netbeam):\n# idsbeam_parcel = netbeam.get_node_ids(parcels.x, parcels.y)\n# return idsbeam_parcel\n\n\n# @orca.column('rentals')\n# def node_id_beam(rentals, netbeam):\n# idsbeam_rentals = netbeam.get_node_ids(\n# rentals.longitude, rentals.latitude)\n# return idsbeam_rentals\n\n\n# @orca.column('buildings')\n# def node_id_beam(parcels, buildings):\n# return misc.reindex(parcels.node_id_beam, buildings.parcel_id)\n\n\n# @orca.column('jobs')\n# def node_id_beam(buildings, jobs):\n# return misc.reindex(buildings.node_id_beam, jobs.building_id)\n\n\n###############################\n# WLCM dummy columns #\n###############################\n\[email protected]('jobs')\ndef sector_retail(jobs):\n return jobs['sector_id'].isin([44, 45]).astype(int)\n\n\[email protected]('jobs')\ndef sector_healthcare(jobs):\n return jobs['sector_id'].isin([62]).astype(int)\n\n\[email protected]('jobs')\ndef sector_tech(jobs):\n return jobs['sector_id'].isin([51, 54]).astype(int)\n\n\[email protected]('jobs')\ndef sector_food_and_hosp(jobs):\n return jobs['sector_id'].isin([72]).astype(int)\n\n\[email protected]('jobs')\ndef sector_mfg(jobs):\n return jobs['sector_id'].isin([31, 32, 33]).astype(int)\n\n\[email protected]('jobs')\ndef sector_edu_serv(jobs):\n return jobs['sector_id'].isin([61]).astype(int)\n\n\[email protected]('jobs')\ndef sector_oth_serv(jobs):\n return jobs['sector_id'].isin([81]).astype(int)\n\n\[email protected]('jobs')\ndef sector_constr(jobs):\n return jobs['sector_id'].isin([23]).astype(int)\n\n\[email protected]('jobs')\ndef sector_gov(jobs):\n return jobs['sector_id'].isin([92]).astype(int)\n\n\[email protected]('jobs')\ndef sector_fire(jobs):\n return jobs['sector_id'].isin([52, 53]).astype(int)\n\n\[email protected]('jobs')\ndef sector_whlsale(jobs):\n return jobs['sector_id'].isin([42]).astype(int)\n\n\[email protected]('jobs')\ndef sector_admin(jobs):\n return jobs['sector_id'].isin([56]).astype(int)\n\n\[email protected]('jobs')\ndef sector_transport(jobs):\n return jobs['sector_id'].isin([48]).astype(int)\n\n\[email protected]('jobs')\ndef sector_arts(jobs):\n return jobs['sector_id'].isin([71]).astype(int)\n\n\[email protected]('jobs')\ndef sector_util(jobs):\n return jobs['sector_id'].isin([22]).astype(int)\n\n\n# @orca.column('jobs')\n# def parcel_id(jobs, buildings):\n# return misc.reindex(\n# buildings.parcel_id, jobs.building_id)\n\n\[email protected]('persons')\ndef no_higher_ed(persons):\n return (persons['edu'] < 21).astype(int)\n\n\[email protected]('persons')\ndef age_under_45(persons):\n return (persons['age'] < 45).astype(int)\n\n\[email protected]('households')\ndef hh_inc_under_25k(households):\n return ((\n households['income'] < 25000) & (\n households['income'] > 10)).astype(int)\n\n\[email protected]('households')\ndef hh_inc_25_to_75k(households):\n return ((\n households['income'] >= 25000) & (\n households['persons'] < 75000)).astype(int)\n\n\[email protected]('households')\ndef hh_inc_75_to_200k(households):\n return ((\n households['income'] >= 75000) & (\n households['income'] < 200000)).astype(int)\n\n\n# cols for WLCM interaction terms\[email protected]('jobs')\ndef zone_id_work(jobs, buildings, parcels):\n return misc.reindex(\n orca.merge_tables(\n buildings, [buildings, parcels], columns=['zone_id'])['zone_id'],\n jobs.building_id).astype(float)\n\n\[email protected]('persons')\ndef zone_id_home(persons, households, units, buildings, parcels):\n return misc.reindex(\n orca.merge_tables(\n households, [households, units, buildings, parcels],\n columns=['zone_id'])['zone_id'],\n persons.household_id).astype(float)\n\n\n#########################################\n# Auto ownership dummy columns #\n#########################################\n\n\n# income bin dummies\[email protected]('households')\ndef income_2(households):\n return ((households['income']>= 0) & (households['income']<= 20000)).astype(int)\n\n\[email protected]('households')\ndef income_4(households):\n return ((households['income']> 20000) & (households['income']<= 40000)).astype(int)\n\n\[email protected]('households')\ndef income_6(households):\n return ((households['income']> 40000) & (households['income']<= 60000)).astype(int)\n\n\[email protected]('households')\ndef income_8(households):\n return ((households['income']> 60000) & (households['income']<= 80000)).astype(int)\n\n\[email protected]('households')\ndef income_10(households):\n return ((households['income']> 80000) & (households['income']<= 100000)).astype(int)\n\n\[email protected]('households')\ndef income_12(households):\n return ((households['income']> 100000) & (households['income']<= 120000)).astype(int)\n\n\[email protected]('households')\ndef income_12p(households):\n return (households['income']> 120000).astype(int)\n\n\n# tenure type dummies\[email protected]('households')\ndef tenure_1(households):\n return (households['tenure'] == 1).astype(int)\n\n\[email protected]('households')\ndef tenure_2(households):\n return (households['tenure'] == 2).astype(int)\n\n\[email protected]('households')\ndef tenure_3(households):\n return (households['tenure'] == 3).astype(int)\n\n\[email protected]('households')\ndef tenure_4(households):\n return (households['tenure'] == 4).astype(int)\n\n\[email protected]('households')\ndef single_family_int(households):\n return households['single_family'].astype(int)\n\n\[email protected]('households')\ndef building_type_2(households):\n return (households['building_type'] == 2).astype(int)\n\n\n# AM Peak Accessibility Vars\n\[email protected]('parcels')\ndef average_income_20(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.average_income_20, parcels.block_id)\n\n\[email protected]('parcels')\ndef above_jobs_20(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.above_jobs_20, parcels.block_id)\n\n\[email protected]('parcels')\ndef above_jobs_40(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.above_jobs_40, parcels.block_id)\n\n\[email protected]('parcels')\ndef above_jobs_60(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.above_jobs_60, parcels.block_id)\n\n\[email protected]('parcels')\ndef below_jobs_20(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.below_jobs_20, parcels.block_id)\n\n\[email protected]('parcels')\ndef below_jobs_40(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.below_jobs_40, parcels.block_id)\n\n\[email protected]('parcels')\ndef below_jobs_60(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.below_jobs_60, parcels.block_id)\n\n\[email protected]('parcels')\ndef employment_20(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.employment_20, parcels.block_id)\n\n\[email protected]('parcels')\ndef employment_40(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.employment_40, parcels.block_id)\n\n\[email protected]('parcels')\ndef employment_60(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.employment_60, parcels.block_id)\n\n\[email protected]('parcels')\ndef population_20(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.population_20, parcels.block_id)\n\n\[email protected]('parcels')\ndef population_40(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.population_40, parcels.block_id)\n\n\[email protected]('parcels')\ndef population_60(parcels, access_indicators_ampeak):\n return misc.reindex(\n access_indicators_ampeak.population_60, parcels.block_id)\n\n###########################\n# TOD choice dummy vars #\n###########################\[email protected]('households')\ndef hh_inc_150kplus(households):\n return((\n households['income'] > 150000) | (\n households['income'] == 150000)).astype(int)\n\[email protected]('persons')\ndef lessGED(persons):\n return(persons['edu'] < 16).astype(int)\n\[email protected]('persons')\ndef GED(persons):\n return(persons['edu'].isin([16,17])).astype(int)\n\[email protected]('persons')\ndef somebach(persons):\n return(persons['edu'].isin([16,17])).astype(int)\n\[email protected]('persons')\ndef Assoc(persons):\n return(persons['edu'].isin([20])).astype(int)\n \[email protected]('persons')\ndef Bach(persons):\n return(persons['edu'].isin([21])).astype(int)\n \[email protected]('persons')\ndef female(persons):\n return (persons['sex'] - 1)\n \[email protected]('persons')\ndef white(persons):\n return(persons['race_id'].isin([1.0])).astype(int)\n\[email protected]('persons')\ndef minority(persons):\n return(persons['white'].isin([0.0])).astype(int)\n \[email protected]('persons')\ndef age_16less25(persons):\n return((persons.age.between(16,25,inclusive = False)) | (persons.age==16)).astype(int)\n \[email protected]('households')\ndef hh_size_1per(households):\n return(households.persons.isin([1.0])).astype(int)\n \[email protected]('jobs')\ndef finance(jobs):\n return jobs['sector_id'].isin([52]).astype(int)\n \[email protected]('jobs')\ndef info(jobs):\n return jobs['sector_id'].isin([51]).astype(int)\n \[email protected]('jobs')\ndef scitech(jobs):\n return jobs['sector_id'].isin([54]).astype(int)\n", "id": "6246006", "language": "Python", "matching_score": 7.149755477905273, "max_stars_count": 1, "path": "activitysynth/scripts/variables.py" }, { "content": "import orca\nfrom urbansim.utils import misc\n\n\n#########################\n# ZONES VARIABLES #\n#########################\n\n# these are primarily used for calculating skim-based\n# acccessibilities\n\n\[email protected]('zones', cache=True)\ndef total_jobs(jobs, zones):\n return jobs.zone_id_work.groupby(\n jobs.zone_id_work).count().reindex(zones.index).fillna(0)\n\n\[email protected]('zones')\ndef sum_residential_units(parcels, buildings, zones):\n s = buildings.residential_units.groupby(\n buildings.parcel_id).sum().groupby(parcels.zone_id).sum()\n return s.reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef sum_persons(households, buildings, parcels, zones):\n s = households.persons.groupby(\n households.building_id).sum().groupby(\n buildings.parcel_id).sum().groupby(parcels.zone_id).sum()\n return s.reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef sum_income(households, buildings, parcels, zones):\n s = households.income.groupby(\n households.building_id).sum().groupby(\n buildings.parcel_id).sum().groupby(parcels.zone_id).sum()\n return s.reindex(zones.index).fillna(0)\n\n\[email protected]('zones', cache=True)\ndef avg_income(households, buildings, parcels, zones):\n s = households.income.groupby(\n households.building_id).mean().groupby(\n buildings.parcel_id).mean().groupby(parcels.zone_id).mean()\n return s.reindex(zones.index).fillna(0)\n\n\n############################\n# small drive network vars #\n############################\n\[email protected]('parcels')\ndef node_id_small(parcels, netsmall):\n idssmall_parcel = netsmall.get_node_ids(parcels.x, parcels.y)\n return idssmall_parcel\n\n\[email protected]('rentals')\ndef node_id_small(rentals, netsmall):\n idssmall_rentals = netsmall.get_node_ids(\n rentals.longitude, rentals.latitude)\n return idssmall_rentals\n\n\[email protected]('buildings')\ndef node_id_small(parcels, buildings):\n return misc.reindex(parcels.node_id_small, buildings.parcel_id)\n\n\[email protected]('units')\ndef node_id_small(buildings, units):\n return misc.reindex(buildings.node_id_small, units.building_id)\n\n\[email protected]('households')\ndef node_id_small(units, households):\n return misc.reindex(units.node_id_small, households.unit_id)\n\n\[email protected]('persons')\ndef node_id_small(households, persons):\n return misc.reindex(households.node_id_small, persons.household_id)\n\n\[email protected]('jobs')\ndef node_id_small(buildings, jobs):\n return misc.reindex(buildings.node_id_small, jobs.building_id)\n\n###########################\n# walk network vars #\n###########################\[email protected]('parcels')\ndef node_id_walk(parcels, netwalk):\n idswalk_parcel = netwalk.get_node_ids(parcels.x, parcels.y)\n return idswalk_parcel\n\n\[email protected]('rentals')\ndef node_id_walk(rentals, netwalk):\n idswalk_rentals = netwalk.get_node_ids(rentals.longitude, rentals.latitude)\n return idswalk_rentals\n\n\[email protected]('buildings')\ndef node_id_walk(parcels, buildings):\n return misc.reindex(parcels.node_id_walk, buildings.parcel_id)\n\n\[email protected]('units')\ndef node_id_walk(buildings, units):\n return misc.reindex(buildings.node_id_walk, units.building_id)\n\n\[email protected]('households')\ndef node_id_walk(units, households):\n return misc.reindex(units.node_id_walk, households.unit_id)\n\n\[email protected]('persons')\ndef node_id_walk(households, persons):\n return misc.reindex(households.node_id_walk, persons.household_id)\n\n\[email protected]('jobs')\ndef node_id_walk(buildings, jobs):\n return misc.reindex(buildings.node_id_walk, jobs.building_id)\n\n\n###############################\n# WLCM dummy columns #\n###############################\n\[email protected]('jobs')\ndef sector_retail(jobs):\n return jobs['sector_id'].isin([44, 45]).astype(int)\n\n\[email protected]('jobs')\ndef sector_healthcare(jobs):\n return jobs['sector_id'].isin([62]).astype(int)\n\n\[email protected]('jobs')\ndef sector_tech(jobs):\n return jobs['sector_id'].isin([51, 54]).astype(int)\n\n\[email protected]('jobs')\ndef sector_food_and_hosp(jobs):\n return jobs['sector_id'].isin([72]).astype(int)\n\n\[email protected]('jobs')\ndef sector_mfg(jobs):\n return jobs['sector_id'].isin([31, 32, 33]).astype(int)\n\n\[email protected]('jobs')\ndef sector_edu_serv(jobs):\n return jobs['sector_id'].isin([61]).astype(int)\n\n\[email protected]('jobs')\ndef sector_oth_serv(jobs):\n return jobs['sector_id'].isin([81]).astype(int)\n\n\[email protected]('jobs')\ndef sector_constr(jobs):\n return jobs['sector_id'].isin([23]).astype(int)\n\n\[email protected]('jobs')\ndef sector_gov(jobs):\n return jobs['sector_id'].isin([92]).astype(int)\n\n\[email protected]('jobs')\ndef sector_fire(jobs):\n return jobs['sector_id'].isin([52, 53]).astype(int)\n\n\[email protected]('jobs')\ndef sector_whlsale(jobs):\n return jobs['sector_id'].isin([42]).astype(int)\n\n\[email protected]('jobs')\ndef sector_admin(jobs):\n return jobs['sector_id'].isin([56]).astype(int)\n\n\[email protected]('jobs')\ndef sector_transport(jobs):\n return jobs['sector_id'].isin([48]).astype(int)\n\n\[email protected]('jobs')\ndef sector_arts(jobs):\n return jobs['sector_id'].isin([71]).astype(int)\n\n\[email protected]('jobs')\ndef sector_util(jobs):\n return jobs['sector_id'].isin([22]).astype(int)\n\n\[email protected]('jobs')\ndef parcel_id(jobs, buildings):\n return misc.reindex(\n buildings.parcel_id, jobs.building_id)\n\n\[email protected]('persons')\ndef no_higher_ed(persons):\n return (persons['edu'] < 21).astype(int)\n\n\[email protected]('persons')\ndef age_under_45(persons):\n return (persons['age'] < 45).astype(int)\n\n\[email protected]('households')\ndef hh_inc_under_25k(households):\n return ((\n households['income'] < 25000) & (\n households['income'] > 10)).astype(int)\n\n\[email protected]('households')\ndef hh_inc_25_to_75k(households):\n return ((\n households['income'] >= 25000) & (\n households['persons'] < 75000)).astype(int)\n\n\[email protected]('households')\ndef hh_inc_75_to_200k(households):\n return ((\n households['income'] >= 75000) & (\n households['income'] < 200000)).astype(int)\n\n\n# cols for WLCM interaction terms\[email protected]('jobs')\ndef zone_id_work(jobs, parcels):\n return misc.reindex(\n parcels.zone_id, jobs.parcel_id)\n\n\[email protected]('persons')\ndef zone_id_home(persons, households, units, buildings, parcels):\n return misc.reindex(\n orca.merge_tables(\n households, [households, units, buildings, parcels],\n columns=['zone_id'])['zone_id'],\n persons.household_id).astype(float)\n\n\n#########################################\n# Auto ownership dummy columns #\n#########################################\n\n\[email protected]('households')\ndef tenure_1(households):\n return (households['tenure'] == 1).astype(int)\n\[email protected]('households')\ndef tenure_2(households):\n return (households['tenure'] == 2).astype(int)\n\[email protected]('households')\ndef tenure_3(households):\n return (households['tenure'] == 3).astype(int)\n\[email protected]('households')\ndef tenure_4(households):\n return (households['tenure'] == 4).astype(int)\n\[email protected]('households')\ndef single_family_int(households):\n return households['single_family'].astype(int)\n\[email protected]('households')\ndef building_type_2(households):\n return (households['building_type'] == 2).astype(int)\n\n\n###########################\n# TOD choice dummy vars #\n###########################\[email protected]('households')\ndef hh_inc_150kplus(households):\n return((\n households['income'] > 150000) | (\n households['income'] == 150000)).astype(int)\n\[email protected]('persons')\ndef lessGED(persons):\n return(persons['edu'] < 16).astype(int)\n\[email protected]('persons')\ndef GED(persons):\n return(persons['edu'].isin([16,17])).astype(int)\n\[email protected]('persons')\ndef somebach(persons):\n return(persons['edu'].isin([16,17])).astype(int)\n\[email protected]('persons')\ndef Assoc(persons):\n return(persons['edu'].isin([20])).astype(int)\n \[email protected]('persons')\ndef Bach(persons):\n return(persons['edu'].isin([21])).astype(int)\n \[email protected]('persons')\ndef female(persons):\n return (persons['sex'] - 1)\n \[email protected]('persons')\ndef white(persons):\n return(persons['race_id'].isin([1.0])).astype(int)\n\[email protected]('persons')\ndef minority(persons):\n return(persons['white'].isin([0.0])).astype(int)\n \[email protected]('persons')\ndef age_16less25(persons):\n return((persons.age.between(16,25,inclusive = False)) | (persons.age==16)).astype(int)\n \[email protected]('households')\ndef hh_size_1per(households):\n return(households.persons.isin([1.0])).astype(int)\n \[email protected]('jobs')\ndef finance(jobs):\n return jobs['sector_id'].isin([52]).astype(int)\n \[email protected]('jobs')\ndef info(jobs):\n return jobs['sector_id'].isin([51]).astype(int)\n \[email protected]('jobs')\ndef scitech(jobs):\n return jobs['sector_id'].isin([54]).astype(int)\n", "id": "7410605", "language": "Python", "matching_score": 3.0373332500457764, "max_stars_count": 1, "path": "spring-2019-models/scripts/variables.py" }, { "content": "import orca\nimport pandana as pdna\nimport pandas as pd\nimport scipy.stats as st\nimport numpy as np\nfrom datetime import datetime\nimport os\n\nfrom urbansim.utils import networks\nfrom urbansim_templates import modelmanager as mm\nfrom urbansim_templates.models import LargeMultinomialLogitStep\nfrom urbansim_templates.utils import update_column\n\nfrom activitysynth.scripts import utils\n\n\n# load existing model steps from the model manager\nmm.initialize()\n\n\[email protected]()\ndef initialize_imputed_skims(mtc_skims):\n\n # if imputed skims exist, just load them\n try:\n df = orca.get_table('beam_skims_imputed').to_frame()\n\n # otherwise, impute the raw skims\n except FileNotFoundError:\n print('No imputed skims found. Creating them now.')\n\n try:\n raw_skims = orca.get_table('beam_skims_raw')\n df = utils.impute_missing_skims(mtc_skims, raw_skims)\n except FileNotFoundError:\n print(\n \"Couldn't find raw skims either. Make sure there \"\n \"is a file of skims present in the data directory.\")\n\n orca.add_table('beam_skims_imputed', df, cache=True)\n\n\[email protected]()\ndef skims_aggregations(beam_skims_imputed):\n\n for impedance in ['gen_tt_WALK_TRANSIT', 'gen_tt_CAR']:\n\n # each of these columns must be defined for the\n # zones table since the skims are reported at\n # the zone level. currently they get created in\n # variables.py under the section commented as\n # \"ZONES VARIABLES\"\n for col in [\n 'total_jobs', 'sum_persons', 'sum_income',\n 'sum_residential_units']:\n for tt in [15, 45]:\n utils.register_skim_access_variable(\n col + '_{0}_'.format(impedance) + str(tt),\n col, impedance, tt, beam_skims_imputed)\n\n for col in ['avg_income']:\n for tt in [30]:\n utils.register_skim_access_variable(\n col + '_{0}_'.format(impedance) + str(tt),\n col, impedance, tt, beam_skims_imputed, np.mean)\n\n\[email protected]()\ndef test_manual_registration():\n print(\"Model step is running\")\n\n\[email protected]()\ndef initialize_network_small():\n \"\"\"\n This will be turned into a data loading template.\n \"\"\"\n @orca.injectable('netsmall', cache=True)\n def build_networksmall(drive_nodes, drive_edges):\n drive_nodes = drive_nodes.to_frame()\n drive_edges = drive_edges.to_frame()\n netsmall = pdna.Network(\n drive_nodes.x, drive_nodes.y, drive_edges.u,\n drive_edges.v, drive_edges[['length']],\n twoway=False)\n netsmall.precompute(25000)\n return netsmall\n\n\[email protected]()\ndef initialize_network_walk():\n \"\"\"\n This will be turned into a data loading template.\n\n \"\"\"\n @orca.injectable('netwalk', cache=True)\n def build_networkwalk(walk_nodes, walk_edges):\n walk_nodes = walk_nodes.to_frame()\n walk_edges = walk_edges.to_frame()\n netwalk = pdna.Network(\n walk_nodes.x, walk_nodes.y, walk_edges.u,\n walk_edges.v, walk_edges[['length']], twoway=True)\n netwalk.precompute(2500)\n return netwalk\n\n\[email protected]()\ndef network_aggregations_small(netsmall):\n \"\"\"\n This will be turned into a network aggregation template.\n \"\"\"\n nodessmall = networks.from_yaml(\n netsmall, 'network_aggregations_small.yaml')\n nodessmall = nodessmall.fillna(0)\n\n print(nodessmall.describe())\n orca.add_table('nodessmall', nodessmall)\n\n\[email protected]()\ndef network_aggregations_walk(netwalk):\n \"\"\"\n This will be turned into a network aggregation template.\n\n \"\"\"\n nodeswalk = networks.from_yaml(netwalk, 'network_aggregations_walk.yaml')\n nodeswalk = nodeswalk.fillna(0)\n print(nodeswalk.describe())\n orca.add_table('nodeswalk', nodeswalk)\n\n\[email protected]()\ndef network_aggregations_beam(netbeam):\n \"\"\"\n This will be turned into a network aggregation template.\n\n \"\"\"\n\n nodesbeam = networks.from_yaml(netbeam, 'network_aggregations_beam.yaml')\n nodesbeam = nodesbeam.fillna(0)\n print(nodesbeam.describe())\n orca.add_table('nodesbeam', nodesbeam)\n\n\[email protected]()\ndef wlcm_simulate(beam_skims_imputed):\n \"\"\"\n Generate workplace location choices for the synthetic pop. This is just\n a temporary workaround until the model templates themselves can handle\n interaction terms. Otherwise the model template would normally not need\n an addtional orca step wrapper such as is defined here.\n\n \"\"\"\n interaction_terms = beam_skims_imputed.to_frame().rename_axis(\n ['zone_id_home', 'zone_id_work'])\n\n m = mm.get_step('WLCM_gen_tt_simple')\n\n m.run(chooser_batch_size=200000, interaction_terms=[interaction_terms])\n\n orca.broadcast(\n 'jobs', 'persons', cast_index=True, onto_on='job_id')\n\n\[email protected]()\ndef auto_ownership_simulate(households):\n \"\"\"\n Generate auto ownership choices for the synthetic pop households.\n The categories are:\n - 0: no vehicle\n - 1: one vehicle\n - 2: two vehicles\n - 3: three or more vehicles\n \"\"\"\n m = mm.get_step('auto_ownership')\n\n # remove filters, specify out tables\n m.filters = None\n m.tables = [\n 'households', 'units', 'buildings', 'parcels', 'nodessmall',\n 'nodeswalk']\n m.out_tables = [\n 'households', 'units', 'buildings', 'parcels', 'nodessmall',\n 'nodeswalk']\n m.run()\n\n\[email protected]()\ndef primary_mode_choice_simulate(persons):\n \"\"\"\n Generate primary mode choices for the synthetic population.\n The choices are:\n - 0: drive alone\n - 1: shared\n - 2: walk-transit-walk\n - 3: drive-transit-walk\n - 4: walk-transit-drive\n - 5: bike\n - 6: walk\n \"\"\"\n\n @orca.table(cache=True)\n def persons_CHTS_format(mtc_skims):\n # use persons with jobs for persons\n persons = orca.get_table('persons').to_frame()\n persons.index.name = 'person_id'\n persons.reset_index(inplace=True)\n persons = persons[['person_id','sex','age','race_id','worker','edu','household_id','job_id', 'TOD']]\n\n hh_df = orca.get_table('households').to_frame().reset_index()[['household_id','cars','tenure','income','persons','building_id']]\n jobs_df = orca.get_table('jobs').to_frame().reset_index()[['job_id','building_id']]\n buildings_df = orca.get_table('buildings').to_frame().reset_index()[['building_id','parcel_id']]\n try:\n parcels_df = orca.get_table('parcels').to_frame().reset_index()[['parcel_id','zone_id']]\n except KeyError:\n parcels_df = orca.get_table('parcels').to_frame().reset_index()[['primary_id','zone_id']]\n parcels_df.rename(columns = {'primary_id':'parcel_id'}, inplace = True)\n\n # rename columns/change values to match CHTS\n persons.columns = ['person_id','GEND','AGE','RACE1','JOBS','EDUCA','household_id','job_id', 'TOD']\n persons.RACE1 = persons.RACE1.map({1:1,2:2,3:3,4:3,5:3,6:4,7:5,8:97,9:97})\n persons.EDUCA = persons.EDUCA.map({0:1,1:1,2:1,3:1,4:1,5:1,6:1,7:1,8:1,9:1,\n 10:1,11:1,12:1,13:1,14:1,15:1,16:2,17:2,18:3,19:3,\n 20:4,21:5,22:6,23:6,24:6})\n persons.TOD = persons.TOD.map({2:'EA',3:'EA',12:'AM',14:'AM',22:'MD',23:'MD',24:'MD'})\n\n # read skim\n skim = orca.get_table('mtc_skims').to_frame()\n \n skim.columns = skim.columns.str.replace('_distance','_Distance') # capitalization issues\n skim.columns = skim.columns.str.replace('_cost','_Cost')\n \n EA_skim = skim[['orig','dest']+list(skim.filter(like = 'EA').columns)]\n EA_skim.columns = EA_skim.columns.str.replace('_EA','')\n EA_skim['TOD'] = 'EA'\n AM_skim = skim[['orig','dest']+list(skim.filter(like = 'AM').columns)]\n AM_skim.columns = AM_skim.columns.str.replace('_AM','')\n AM_skim['TOD'] = 'AM'\n MD_skim = skim[['orig','dest']+list(skim.filter(like = 'MD').columns)]\n MD_skim.columns = MD_skim.columns.str.replace('_MD','')\n MD_skim['TOD'] = 'MD'\n\n skim_combined = pd.concat([EA_skim,AM_skim,MD_skim])\n\n MTC_acc = pd.read_csv('./data/MTC_TAZ_accessibility.csv')\n\n # merge attributes onto persons\n # want household as origin zone and job as destination zone.\n\n hh_df = hh_df.merge(buildings_df, how = 'left', on = 'building_id').merge(parcels_df, how = 'left', on = 'parcel_id')\n hh_df.rename(columns = {'zone_id':'orig'},inplace = True)\n\n jobs_df = jobs_df.merge(buildings_df,how = 'left', on = 'building_id').merge(parcels_df, how = 'left', on = 'parcel_id')\n jobs_df.rename(columns = {'zone_id':'dest'}, inplace = True)\n\n persons = persons.merge(hh_df, how = 'left', on = 'household_id')\n persons.drop(['building_id','parcel_id'],axis = 1,inplace = True)\n\n persons = persons.merge(jobs_df, how = 'inner',on = 'job_id')\n persons.drop(['building_id','parcel_id'],axis = 1,inplace = True)\n\n\n persons = persons.merge(MTC_acc, how = 'left',left_on = 'orig', right_on = 'taz1454')\n persons[MTC_acc.columns] = persons[MTC_acc.columns].fillna(0)\n\n persons = persons.merge(skim_combined, how = 'left', on = ['orig','dest','TOD'])\n\n \n # rename the remaning attributes\n persons['OWN'] = (persons['tenure']==1).astype(int)\n persons.rename(columns = {'cars':'HHVEH','income':'INCOM','persons':'HHSIZ'},inplace = True)\n return persons\n \n \n m = mm.get_step('primary_mode_choice')\n \n # remove filters, specify out table, out column\n m.filters = None\n m.out_filters = None\n m.tables = ['persons_CHTS_format']\n m.out_tables = 'persons_CHTS_format'\n m.out_column = 'primary_commute_mode'\n\n m.run()\n\n\[email protected]()\ndef TOD_choice_simulate(mtc_skims):\n \"\"\"\n Generate time of day period choices for the synthetic population\n home-work and work-home trips.\n \"\"\"\n TOD_obs = orca.merge_tables('persons', ['persons', 'households', 'jobs'])\n \n # TOD_obs.dropna(inplace = True)\n TOD_obs.reset_index(inplace=True)\n\n mtc_skims = orca.get_table('mtc_skims').to_frame()\n \n TOD_obs = pd.merge(TOD_obs, mtc_skims, how = 'left', \n left_on=['zone_id_home', 'zone_id_work'], \n right_on=['orig', 'dest'])\n\n TOD_obs = pd.merge(TOD_obs, mtc_skims, how = 'left',\n left_on=['zone_id_work','zone_id_home'], \n right_on=['orig', 'dest'], suffixes=('_HW', '_WH'))\n \n TOD_list = ['EA','AM','MD','PM','EV']\n\n for tod1 in TOD_list:\n for tod2 in TOD_list:\n col_name = f'da_Time_{tod1}_{tod2}'\n TOD_obs[col_name] = TOD_obs[f'da_Time_{tod1}_HW'] + TOD_obs[f'da_Time_{tod2}_WH']\n\n # TOD_obs['TOD'] = None\n \n m = mm.get_step('TOD_choice')\n \n @orca.table(cache=True)\n def tripsA():\n return TOD_obs\n \n m.run()\n\n results = orca.get_table('tripsA').to_frame().set_index('person_id')\n persons = orca.get_table('persons').to_frame()\n\n #####UPDATE COLUMN#######\n update_column('persons', 'TOD', results['TOD'])\n # persons = pd.merge(\n # persons, results[['TOD']], how='left',\n # left_index=True, right_index=True)\n # orca.add_table('persons', persons)\n\n \[email protected]()\ndef TOD_distribution_simulate():\n \"\"\"\n Generate specific time of day choices for the synthetic population\n home-work and work-home trips.\n \n \"\"\"\n persons = orca.get_table('persons').to_frame()\n \n trips02 = persons.loc[persons['TOD'].isin([2])]\n trips03 = persons.loc[persons['TOD'].isin([3])]\n trips12 = persons.loc[persons['TOD'].isin([12])]\n trips13 = persons.loc[persons['TOD'].isin([13])]\n trips14 = persons.loc[persons['TOD'].isin([14])]\n trips22 = persons.loc[persons['TOD'].isin([22])]\n trips23 = persons.loc[persons['TOD'].isin([23])]\n trips24 = persons.loc[persons['TOD'].isin([24])]\n \n trips02['HW_ST'] = st.burr.rvs(size= len(trips02), \n c=104.46,d=0.03,loc=2.13,scale=3.72)\n trips02['WH_ST'] = st.argus.rvs(size= len(trips02), \n chi=3.02,loc=7.70,scale=7.66)\n\n trips03['HW_ST'] = st.genlogistic.rvs(size= len(trips03), c=0.08,loc=5.86,scale=0.05)\n trips03['WH_ST'] = st.bradford.rvs(size= len(trips03), c=8.91, loc=15.50, scale=3.01)\n\n trips12['HW_ST'] = st.vonmises_line.rvs(size= len(trips12), \n kappa=0.33,loc=7.48,scale=0.47)\n trips12['WH_ST'] = st.johnsonsb.rvs(size= len(trips12), \n a=-0.95, b=0.71, loc=8.69, scale=6.80)\n\n trips13['HW_ST'] = st.vonmises_line.rvs(size= len(trips13), \n kappa=0.46,loc=7.48,scale=0.47)\n trips13['WH_ST'] = st.vonmises_line.rvs(size= len(trips13), \n kappa=0.41, loc=16.99, scale=0.47)\n\n trips14['HW_ST'] = st.beta.rvs(size= len(trips14), a=1.58,b=1.14,loc=5.90,scale=3.07)\n trips14['WH_ST'] = st.pareto.rvs(size= len(trips14), b=19.93, loc=-0.36, scale=18.86)\n\n trips22['HW_ST'] = st.weibull_min.rvs(size= len(trips22), c=0.95,loc=9.00,scale=1.04)\n trips22['WH_ST'] = st.burr.rvs(size= len(trips22), \n c=263.97, d=0.03, loc=-1.00, scale=16.33)\n\n trips23['HW_ST'] = st.levy.rvs(size= len(trips23), loc=8.93,scale=0.30)\n trips23['WH_ST'] = st.triang.rvs(size= len(trips23), c=0.90, loc=15.17, scale=3.34)\n\n trips24['WH_ST'] = st.bradford.rvs(size= len(trips24), c=21.60, loc=18.50, scale=7.76)\n \n #make sure start times are within the correct period of day:\n while len(trips02.loc[(trips02['HW_ST'] < 3) | (trips02['HW_ST'] >= 6)]) > 0:\n trips02.loc[ (trips02['HW_ST'] < 3) | (trips02['HW_ST'] >= 6),\n 'HW_ST'] = st.burr.rvs(size= len(trips02.loc[(trips02['HW_ST'] < 3) |\n (trips02['HW_ST'] >= 6)]), \n c=104.46,d=0.03,loc=2.13,scale=3.72)\n\n while len(trips03.loc[(trips03['HW_ST'] < 3) | (trips03['HW_ST'] >= 6)]) > 0:\n trips03.loc[ (trips03['HW_ST'] < 3) | (trips03['HW_ST'] >= 6),\n 'HW_ST'] = st.genlogistic.rvs(size= len(trips03.loc[(trips03['HW_ST'] < 3) |\n (trips03['HW_ST'] >= 6)]), \n c=0.08,loc=5.86,scale=0.05)\n while len(trips12.loc[(trips12['HW_ST'] < 6) | (trips12['HW_ST'] >= 9)]) > 0:\n trips12.loc[ (trips12['HW_ST'] < 6) | (trips12['HW_ST'] >= 9),\n 'HW_ST'] = st.vonmises_line.rvs(size= len(trips12.loc[(trips12['HW_ST'] < 6) | \n (trips12['HW_ST'] >= 9)]), \n kappa=0.33,loc=7.48,scale=0.47)\n\n while len(trips13.loc[(trips13['HW_ST'] < 6) | (trips13['HW_ST'] >= 9)]) > 0:\n trips13.loc[ (trips13['HW_ST'] < 6) | (trips13['HW_ST'] >= 9),\n 'HW_ST'] = st.vonmises_line.rvs(size= len(trips13.loc[(trips13['HW_ST'] < 6) | \n (trips13['HW_ST'] >= 9)]), \n kappa=0.46,loc=7.48,scale=0.47)\n\n while len(trips14.loc[(trips14['HW_ST'] < 6) | (trips14['HW_ST'] >= 9)]) > 0:\n trips14.loc[ (trips14['HW_ST'] < 6) | (trips14['HW_ST'] >= 9),\n 'HW_ST'] = st.beta.rvs(size= len(trips14.loc[(trips14['HW_ST'] < 6) | \n (trips14['HW_ST'] >= 9)]), \n a=1.58,b=1.14,loc=5.90,scale=3.07)\n\n while len(trips22.loc[(trips22['HW_ST'] < 9) | (trips22['HW_ST'] >= 15.5)]) > 0:\n trips22.loc[ (trips22['HW_ST'] < 9) | (trips22['HW_ST'] >= 15.5),\n 'HW_ST'] = st.weibull_min.rvs(size= len(trips22.loc[(trips22['HW_ST'] < 9) | \n (trips22['HW_ST'] >= 15.5)]), \n c=0.95,loc=9.00,scale=1.04)\n\n while len(trips23.loc[(trips23['HW_ST'] < 9) | (trips23['HW_ST'] >= 15.5)]) > 0:\n trips23.loc[ (trips23['HW_ST'] < 9) | (trips23['HW_ST'] >= 15.5),\n 'HW_ST'] = st.levy.rvs(size= len(trips23.loc[(trips23['HW_ST'] < 9) | \n (trips23['HW_ST'] >= 15.5)]), \n loc=8.93,scale=0.30)\n \n while len(trips02.loc[(trips02['WH_ST'] < 9) | (trips02['WH_ST'] >= 15.5)]) > 0:\n trips02.loc[ (trips02['WH_ST'] < 9) | (trips02['WH_ST'] >= 15.5),\n 'WH_ST'] = st.argus.rvs(size= len(trips02.loc[(trips02['WH_ST'] < 9) | \n (trips02['WH_ST'] >= 15.5)]), \n chi=3.02,loc=7.70,scale=7.66)\n\n while len(trips03.loc[(trips03['WH_ST'] < 15.5) | (trips03['WH_ST'] >= 18.5)]) > 0:\n trips03.loc[ (trips03['WH_ST'] < 15.5) | (trips03['WH_ST'] >= 18.5),\n 'WH_ST'] = st.bradford.rvs(size= len(trips03.loc[(trips03['WH_ST'] < 15.5) | \n (trips03['WH_ST'] >= 18.5)]), \n c=8.91, loc=15.50, scale=3.01)\n\n while len(trips12.loc[(trips12['WH_ST'] < 9) | (trips12['WH_ST'] >= 15.5)]) > 0:\n trips12.loc[ (trips12['WH_ST'] < 9) | (trips12['WH_ST'] >= 15.5),\n 'WH_ST'] = st.johnsonsb.rvs(size= len(trips12.loc[(trips12['WH_ST'] < 9) | \n (trips12['WH_ST'] >= 15.5)]), \n a=-0.95, b=0.71, loc=8.69, scale=6.80)\n\n while len(trips13.loc[(trips13['WH_ST'] < 15.5) | (trips13['WH_ST'] >= 18.5)]) > 0:\n trips13.loc[ (trips13['WH_ST'] < 15.5) | (trips13['WH_ST'] >= 18.5),\n 'WH_ST'] = st.vonmises_line.rvs(size= len(\n trips13.loc[(trips13['WH_ST'] < 15.5) | (trips13['WH_ST'] >= 18.5)]), \n kappa=0.41, loc=16.99, scale=0.47)\n \n while len(trips14.loc[(trips14['WH_ST'] < 18.5) | (trips14['WH_ST'] >= 27)]) > 0:\n trips14.loc[ (trips14['WH_ST'] < 18.5) | (trips14['WH_ST'] >= 27),\n 'WH_ST'] = st.pareto.rvs(size= len(trips14.loc[(trips14['WH_ST'] < 18.5) | \n (trips14['WH_ST'] >= 27)]), \n b=19.93, loc=-0.36, scale=18.86)\n\n trips14.loc[ (trips14['WH_ST'] > 24),'WH_ST'] = trips14['WH_ST'] - 24\n \n while len(trips22.loc[(trips22['WH_ST'] < 9) | (trips22['WH_ST'] >= 15.5)]) > 0:\n trips22.loc[ (trips22['WH_ST'] < 9) | (trips22['WH_ST'] >= 15.5),\n 'WH_ST'] = st.burr.rvs(size= len(trips22.loc[(trips22['WH_ST'] < 9) | \n (trips22['WH_ST'] >= 15.5)]), \n c=263.97, d=0.03, loc=-1.00, scale=16.33)\n #make sure HW time is before WH time for people in period 22:\n while len(trips22.loc[(trips22['HW_ST'] >= trips22['WH_ST'])]) > 0:\n trips22.loc[ (trips22['HW_ST'] >= trips22['WH_ST']),\n 'WH_ST'] = st.burr.rvs(size= len(trips22.loc[(trips22['HW_ST'] >= \n trips22['WH_ST'])]), \n c=263.97, d=0.03, loc=-1.00, scale=16.33)\n\n trips22.loc[ (trips22['HW_ST'] >= trips22['WH_ST']),\n 'HW_ST'] = st.weibull_min.rvs(size= len(trips22.loc[(trips22['HW_ST'] >= \n trips22['WH_ST'])]), \n c=0.95,loc=9.00,scale=1.04)\n \n while len(trips23.loc[(trips23['WH_ST'] < 15.5) | (trips23['WH_ST'] >= 18.5)]) > 0:\n trips23.loc[ (trips23['WH_ST'] < 15.5) | (trips23['WH_ST'] >= 18.5),\n 'WH_ST'] = st.triang.rvs(size= len(trips23.loc[(trips23['WH_ST'] < 15.5) | \n (trips23['WH_ST'] >= 18.5)]), \n c=0.90, loc=15.17, scale=3.34)\n\n while len(trips24.loc[(trips24['WH_ST'] < 18.5) | (trips24['WH_ST'] >= 27)]) > 0:\n trips24.loc[ (trips24['WH_ST'] < 18.5) | (trips24['WH_ST'] >= 27),\n 'WH_ST'] = st.bradford.rvs(size= len(trips24.loc[(trips24['WH_ST'] < 18.5) | \n (trips24['WH_ST'] >= 27)]), \n c=21.60, loc=18.50, scale=7.76)\n \n trips24.loc[ (trips24['WH_ST'] > 24),'WH_ST'] = trips24['WH_ST'] - 24\n \n #set up separate HW distribution assignment for 9am-12pm and 12-3:29pm:\n trips24a = trips24.sample(int(round(len(trips24)*(241/377))))\n\n AM = trips24a.index.unique()\n\n trips24b = trips24[~trips24.index.isin(AM)] \n \n trips24a['HW_ST'] = st.bradford.rvs(size= len(trips24a), c=9.63, loc=9.00, scale=2.83)\n trips24b['HW_ST'] = st.exponweib.rvs(size= len(trips24b), \n a=0.05, c=21.50, loc=11.99, scale=3.23)\n \n while len(trips24a.loc[(trips24a['HW_ST'] < 9) | (trips24a['HW_ST'] >= 12)]) > 0:\n trips24a.loc[ (trips24a['HW_ST'] < 9) | (trips24a['HW_ST'] >= 12),\n 'HW_ST'] = st.bradford.rvs(size= len(trips24a.loc[(trips24a['HW_ST'] < 9) | \n (trips24a['HW_ST'] >= 12)]), \n c=9.63, loc=9.00, scale=2.83)\n\n while len(trips24b.loc[(trips24b['HW_ST'] < 12) | (trips24b['HW_ST'] >= 15.5)]) > 0:\n trips24b.loc[ (trips24b['HW_ST'] < 12) | (trips24b['HW_ST'] >= 15.5),\n 'HW_ST'] = st.exponweib.rvs(size= len(trips24b.loc[(trips24b['HW_ST'] < 12) | \n (trips24b['HW_ST'] >= 15.5)]), \n a=0.05, c=21.50, loc=11.99, scale=3.23)\n\n cols = list(trips02.columns.values)\n\n frames = [\n trips02, trips03, trips12, trips13, trips14, trips22, trips23, trips24a, trips24b]\n\n TOD_obs2 = pd.concat(frames)\n\n TOD_obs2 = TOD_obs2[cols]\n\n for col in ['HW_ST', 'WH_ST']:\n update_column('persons', col, TOD_obs2[col])\n # persons = pd.merge(\n # persons, TOD_obs2[['HW_ST', 'WH_ST']], how='left',\n # left_index=True, right_index=True)\n # orca.add_table('persons', persons)\n\n\[email protected]()\ndef generate_activity_plans():\n\n time = str(datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))\n\n persons = orca.get_table('persons').to_frame().reset_index().rename(\n columns={'index': 'person_id'})\n\n job_coords = orca.merge_tables('jobs', ['jobs', 'buildings', 'parcels'])\n job_coords = job_coords[['x', 'y']]\n\n hh_coords = orca.merge_tables(\n 'households', ['households', 'units', 'buildings', 'parcels'])\n hh_coords = hh_coords[['x', 'y']]\n\n trips = persons[[\n 'person_id', 'household_id', 'job_id', 'HW_ST',\n 'WH_ST']].rename(\n columns={'HW_ST': 'Home', 'WH_ST': 'Work'})\n\n trip_data = trips.merge(\n hh_coords, left_on='household_id', right_index=True).merge(\n job_coords, left_on='job_id', right_index=True,\n suffixes=('_home', '_work'))\n trip_data = trip_data[[\n 'person_id', 'Home', 'Work', 'x_home', 'y_home', 'x_work',\n 'y_work']]\n\n melted = trip_data.melt(\n id_vars=['person_id', 'x_home', 'y_home', 'x_work', 'y_work'],\n var_name='activityType', value_name='endTime')\n melted['x'] = None\n melted['y'] = None\n melted.loc[melted['activityType'] == 'Home', 'x'] = melted.loc[\n melted['activityType'] == 'Home', 'x_home']\n melted.loc[melted['activityType'] == 'Home', 'y'] = melted.loc[\n melted['activityType'] == 'Home', 'y_home']\n melted.loc[melted['activityType'] == 'Work', 'x'] = melted.loc[\n melted['activityType'] == 'Work', 'x_work']\n melted.loc[melted['activityType'] == 'Work', 'y'] = melted.loc[\n melted['activityType'] == 'Work', 'y_work']\n\n plans = melted.sort_values(['person_id', 'endTime'])[[\n 'person_id', 'activityType', 'endTime', 'x',\n 'y']].reset_index(drop=True)\n plans['planElement'] = 'activity'\n plans['planElementIndex'] = plans.groupby('person_id').cumcount() * 2 + 1\n\n returnActivity = plans[plans['planElementIndex'] == 1]\n returnActivity.loc[:, 'planElementIndex'] = 5\n returnActivity.loc[:, 'endTime'] = None\n\n plans = plans.append(\n returnActivity, ignore_index=True).sort_values(\n ['person_id', 'planElementIndex'])\n\n legs = plans[plans['planElementIndex'].isin([1, 3])]\n legs.loc[:, 'planElementIndex'] = legs.loc[:, 'planElementIndex'] + 1\n legs.loc[:, 'activityType'] = ''\n legs.loc[:, 'endTime'] = None\n legs.loc[:, 'x'] = None\n legs.loc[:, 'y'] = None\n legs.loc[:, 'planElement'] = 'leg'\n\n plans = plans.append(legs, ignore_index=True).sort_values(\n ['person_id', 'planElementIndex']).rename(\n columns={'person_id': 'personId'}).reset_index(\n drop=True)\n plans = plans[[\n 'personId', 'planElement', 'planElementIndex', 'activityType',\n 'x', 'y', 'endTime']]\n plans['x']\n # plans.loc[plans['planElement'] == 'activity', 'mode'] = ''\n orca.add_table('plans', plans, cache=True)\n", "id": "10414146", "language": "Python", "matching_score": 3.7717127799987793, "max_stars_count": 1, "path": "activitysynth/scripts/models.py" }, { "content": "import orca\nimport pandas as pd\nimport warnings\nimport urbansim_templates\nimport argparse\nimport s3fs\nfrom datetime import datetime\nimport os\n\nfrom activitysynth.scripts import models, datasources, variables\n\n\nwarnings.simplefilter('ignore')\n\n\n# default runtime args\nscenario = None\naccessibilities_mode = 'compute'\ndata_out = './output/model_data_output.h5'\noutput_store = False\ninput_file_format = 'csv'\nformattable_fname_dict = {\n 'parcels': 'parcels.{0}',\n 'buildings': 'buildings.{0}',\n 'jobs': 'jobs.{0}',\n 'establishments': 'establishments.{0}',\n 'households': 'households.{0}',\n 'persons': 'persons.{0}',\n 'rentals': 'craigslist.{0}',\n 'units': 'units.{0}',\n 'mtc_skims': 'mtc_skims.{0}',\n 'beam_skims_raw': 'beam_skims_raw.{0}',\n 'beam_skims_imputed': 'beam_skims_imputed.{0}',\n # the following nodes and edges .csv's will be phased out and\n # replaced by travel model skims entirely\n 'drive_nodes': 'drive_nodes.{0}',\n 'drive_edges': 'drive_edges.{0}',\n 'drive_access_vars': 'drive_net_vars.{0}',\n 'walk_nodes': 'walk_nodes.{0}',\n 'walk_edges': 'walk_edges.{0}',\n 'walk_access_vars': 'walk_net_vars.{0}',\n 'zones': 'zones.{0}',\n 'zone_access_vars': 'zones_w_access_vars.{0}',\n}\noutput_tables = [\n 'parcels', 'buildings', 'jobs', 'persons', 'households',\n 'establishments', 'rentals', 'units', 'zones', 'beam_skims_imputed',\n 'mtc_skims', 'plans', 'walk_edges', 'walk_nodes', 'drive_edges',\n 'drive_nodes']\n\n# default input data dir\ninput_data_dir = './data/'\n\n\ndef format_fname_dict(formattable_fname_dict, format='csv'):\n formatted_dict = {\n k: v.format('csv')\n for k, v in formattable_fname_dict.items()}\n return formatted_dict\n\n\nif __name__ == \"__main__\":\n\n # parse runtime arguments\n parser = argparse.ArgumentParser(description='Run ActivitySynth models.')\n # parser.add_argument(\n # \"--data-mode\", \"-d\", dest='data_mode', action=\"store\",\n # help=\"options: local, remote\")\n parser.add_argument(\n '--input-data-dir', '-i', action='store', dest='input_data_dir',\n help='full (pandas-compatible) path to input data directory')\n\n parser.add_argument(\n \"--input-file-format\", \"-f\", dest='input_file_format', action=\"store\",\n help=\"options: h5, csv, parquet\")\n\n parser.add_argument(\n '-o', action='store_true', dest='output_store',\n help='write output data tables to h5 data store')\n\n parser.add_argument(\n '-d', action='store_true', dest='data_out',\n help='full filepath for output data')\n\n parser.add_argument(\n \"--access-vars-mode\", \"-a\", help=\"option: compute, stored\",\n action=\"store\", dest='accessibilities_mode')\n\n options = parser.parse_args()\n\n if options.input_file_format:\n input_file_format = options.input_file_format\n orca.add_injectable('input_file_format', input_file_format)\n\n if options.output_store:\n if options.data_out:\n data_out = options.data_out\n if os.path.exists(data_out):\n os.remove(data_out)\n else:\n data_out = None\n\n if options.accessibilities_mode:\n accessibilities_mode = options.accessibilities_mode\n\n if options.input_data_dir:\n input_data_dir = options.input_data_dir\n\n input_data_dir = os.path.abspath(input_data_dir)\n orca.add_injectable('input_data_dir', input_data_dir)\n print('Reading input data from {0}'.format(input_data_dir))\n\n # h5 inputs not currently operational\n if input_file_format == 'h5':\n @orca.injectable('store', cache=True)\n def hdfstore():\n return pd.HDFStore(\n input_data_dir + \"model_data_output.h5\", mode='r')\n orca.add_injectable('input_fnames', None) # h5 mode has no fnames\n\n # data modes that store data as individual files\n elif input_file_format == 'parquet' or input_file_format == 'csv':\n orca.add_injectable('store', None)\n input_fnames = format_fname_dict(\n formattable_fname_dict, input_file_format)\n orca.add_injectable('input_fnames', input_fnames)\n\n else:\n raise ValueError(\n 'Must specifiy a valid input file format. Valid options '\n 'include \"csv\", \"h5\", and \"parquet\".')\n\n # initialize networks\n model_steps = [\n 'initialize_network_small',\n 'initialize_network_walk',\n 'initialize_imputed_skims'\n ]\n\n orca.run(model_steps)\n\n # create and save access vars if not run before\n if accessibilities_mode == 'compute':\n model_steps = [\n 'network_aggregations_small',\n 'network_aggregations_walk',\n 'skims_aggregations']\n orca.run(model_steps)\n\n orca.get_table('nodeswalk').to_frame().to_csv(\n os.path.join(input_data_dir, input_fnames['walk_access_vars']))\n orca.get_table('nodessmall').to_frame().to_csv(\n os.path.join(input_data_dir, input_fnames['drive_access_vars']))\n\n # skims aggregations step writes straight to the zones\n # table but we store the updated zones table separately\n orca.get_table('zones').to_frame().to_csv(\n os.path.join(input_data_dir, input_fnames['zone_access_vars']))\n\n elif accessibilities_mode == 'stored':\n walk_net_vars = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['walk_access_vars']),\n index_col='osmid')\n drive_net_vars = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['drive_access_vars']),\n index_col='osmid')\n zones = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['zone_access_vars']),\n index_col='zone_id', dtype={'zone_id': int})\n orca.add_table('nodeswalk', walk_net_vars)\n orca.add_table('nodessmall', drive_net_vars)\n\n # if stored replace the existing zones table with the\n # one containing zone-level accessibility variables\n orca.add_table('zones', zones)\n\n model_steps = [\n 'wlcm_simulate', 'TOD_choice_simulate',\n 'TOD_distribution_simulate',\n 'auto_ownership_simulate', 'primary_mode_choice_simulate',\n 'generate_activity_plans']\n\n orca.run(\n model_steps,\n data_out=data_out,\n out_base_tables=[],\n out_base_local=True,\n out_run_tables=output_tables,\n out_run_local=True)\n", "id": "4863838", "language": "Python", "matching_score": 3.3536975383758545, "max_stars_count": 1, "path": "activitysynth/run.py" }, { "content": "import orca\nimport pandas as pd\nimport numpy as np\nimport os\n# data documentation: https://berkeley.app.box.com/notes/282712547032\n\n\[email protected]('parcels', cache=True)\ndef parcels(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['parcels']))\n elif input_file_format == 'h5':\n df = store['parcels']\n elif input_file_format == 'csv':\n try:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['parcels']),\n index_col='parcel_id', dtype={\n 'parcel_id': int, 'block_id': str, 'apn': str})\n except ValueError:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['parcels']),\n index_col='primary_id', dtype={\n 'primary_id': int, 'block_id': str, 'apn': str})\n return df\n\n\[email protected]('buildings', cache=True)\ndef buildings(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['buildings']))\n elif input_file_format == 'h5':\n df = store['buildings']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['buildings']),\n index_col='building_id', dtype={\n 'building_id': int, 'parcel_id': int})\n df['res_sqft_per_unit'] = df[\n 'residential_sqft'] / df['residential_units']\n df['res_sqft_per_unit'][df['res_sqft_per_unit'] == np.inf] = 0\n return df\n\n\[email protected]('jobs', cache=True)\ndef jobs(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['jobs']))\n elif input_file_format == 'h5':\n df = store['jobs']\n elif input_file_format == 'csv':\n try:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['jobs']),\n index_col='job_id', dtype={'job_id': int, 'building_id': int})\n except ValueError:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['jobs']),\n index_col=0, dtype={'job_id': int, 'building_id': int})\n df.index.name = 'job_id'\n return df\n\n\[email protected]('establishments', cache=True)\ndef establishments(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['establishments']))\n elif input_file_format == 'h5':\n df = store['establishments']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['establishments']),\n index_col='establishment_id', dtype={\n 'establishment_id': int, 'building_id': int,\n 'primary_id': int})\n return df\n\n\[email protected]('households', cache=True)\ndef households(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['households']))\n elif input_file_format == 'h5':\n df = store['households']\n elif input_file_format == 'csv':\n try:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['households']),\n index_col='household_id', dtype={\n 'household_id': int, 'block_group_id': str, 'state': str,\n 'county': str, 'tract': str, 'block_group': str,\n 'building_id': int, 'unit_id': int, 'persons': float})\n except ValueError:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['households']),\n index_col=0, dtype={\n 'household_id': int, 'block_group_id': str, 'state': str,\n 'county': str, 'tract': str, 'block_group': str,\n 'building_id': int, 'unit_id': int, 'persons': float})\n df.index.name = 'household_id'\n return df\n\n\[email protected]('persons', cache=True)\ndef persons(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['persons']))\n elif input_file_format == 'h5':\n df = store['persons']\n elif input_file_format == 'csv':\n try:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['persons']),\n index_col='person_id', dtype={\n 'person_id': int, 'household_id': int})\n except ValueError:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['persons']),\n index_col=0, dtype={'person_id': int, 'household_id': int})\n df.index.name = 'person_id'\n return df\n\n\[email protected]('rentals', cache=True)\ndef rentals(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['rentals']))\n elif input_file_format == 'h5':\n df = store['craigslist']\n elif input_file_format == 'csv':\n try:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['rentals']),\n index_col='pid', dtype={\n 'pid': int, 'date': str, 'region': str,\n 'neighborhood': str, 'rent': float, 'sqft': float,\n 'rent_sqft': float, 'longitude': float,\n 'latitude': float, 'county': str, 'fips_block': str,\n 'state': str, 'bathrooms': str})\n except ValueError:\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['rentals']),\n index_col=0, dtype={\n 'date': str, 'region': str,\n 'neighborhood': str, 'rent': float, 'sqft': float,\n 'rent_sqft': float, 'longitude': float,\n 'latitude': float, 'county': str, 'fips_block': str,\n 'state': str, 'bathrooms': str})\n df.index.name = 'pid'\n df.rent[df.rent < 100] = 100.0\n df.rent[df.rent > 10000] = 10000.0\n df.rent_sqft[df.rent_sqft < .2] = .2\n df.rent_sqft[df.rent_sqft > 50] = 50.0\n return df\n\n\[email protected]('units', cache=True)\ndef units(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['units']))\n elif input_file_format == 'h5':\n df = store['units']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['units']),\n index_col='unit_id', dtype={'unit_id': int, 'building_id': int})\n df.index.name = 'unit_id'\n return df\n\n\[email protected]('zones', cache=True)\ndef zones(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['zones']))\n elif input_file_format == 'h5':\n df = store['zones']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['zones']),\n index_col='zone_id', dtype={'zone_id': int})\n if 'tract' in df.columns:\n df.drop('tract', axis=1, inplace=True)\n return df\n\n\n# Tables from <NAME>\n# Append AM peak UrbanAccess transit accessibility variables to parcels table\[email protected]('access_indicators_ampeak', cache=True)\ndef access_indicators_ampeak():\n # this filepath is hardcoded because it lives in the repo\n am_acc = pd.read_csv(\n './data/access_indicators_ampeak.csv', dtype={'block_id': str})\n am_acc.block_id = am_acc.block_id.str.zfill(15)\n am_acc.set_index('block_id', inplace=True)\n am_acc = am_acc.fillna(am_acc.median())\n return am_acc\n\n\n# Tables from Emma\[email protected]('mtc_skims', cache=True)\ndef mtc_skims(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['mtc_skims']))\n elif input_file_format == 'h5':\n df = store['mtc_skims']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['mtc_skims']),\n index_col=0)\n return df\n\n\[email protected](cache=True)\ndef beam_skims_raw(input_file_format, input_data_dir, store, input_fnames):\n \"\"\"\n Load full BEAM skims, convert travel time to minutes\n \"\"\"\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['beam_skims_raw']))\n elif input_file_format == 'h5':\n df = store['beam_skims_raw']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['beam_skims_raw']))\n\n df.rename(columns={\n 'generalizedCost': 'gen_cost', 'origTaz': 'from_zone_id',\n 'destTaz': 'to_zone_id'}, inplace=True)\n return df\n\n\[email protected](cache=True)\ndef beam_skims_imputed(input_file_format, input_data_dir, store, input_fnames):\n \"\"\"\n Load imputed BEAM skims\n \"\"\"\n if input_file_format == 'parquet':\n df = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['beam_skims_imputed']))\n elif input_file_format == 'h5':\n df = store['beam_skims_imputed']\n elif input_file_format == 'csv':\n df = pd.read_csv(\n os.path.join(input_data_dir, input_fnames['beam_skims_imputed']))\n df.set_index(['from_zone_id', 'to_zone_id'], inplace=True)\n return df\n\n\[email protected](cache=True)\ndef drive_nodes(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n nodes = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['drive_nodes']))\n elif input_file_format == 'h5':\n nodes = store['drive_nodes']\n elif input_file_format == 'csv':\n nodes = pd.read_csv(os.path.join(\n input_data_dir,\n input_fnames['drive_nodes'])).set_index('osmid')\n return nodes\n\n\[email protected](cache=True)\ndef drive_edges(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n edges = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['drive_edges']))\n elif input_file_format == 'h5':\n edges = store['drive_edges']\n elif input_file_format == 'csv':\n edges = pd.read_csv(os.path.join(\n input_data_dir,\n input_fnames['drive_edges'])).set_index('uniqueid')\n return edges\n\n\[email protected](cache=True)\ndef walk_nodes(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n nodes = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['walk_nodes']))\n elif input_file_format == 'h5':\n nodes = store['walk_nodes']\n elif input_file_format == 'csv':\n nodes = pd.read_csv(os.path.join(\n input_data_dir,\n input_fnames['walk_nodes'])).set_index('osmid')\n return nodes\n\n\[email protected](cache=True)\ndef walk_edges(input_file_format, input_data_dir, store, input_fnames):\n if input_file_format == 'parquet':\n edges = pd.read_parquet(\n os.path.join(input_data_dir, input_fnames['walk_edges']))\n elif input_file_format == 'h5':\n edges = store['walk_edges']\n elif input_file_format == 'csv':\n edges = pd.read_csv(os.path.join(\n input_data_dir,\n input_fnames['walk_edges'])).set_index('uniqueid')\n return edges\n\n\n# Broadcasts, a.k.a. merge relationships\norca.broadcast(\n 'parcels', 'buildings', cast_index=True, onto_on='parcel_id')\norca.broadcast(\n 'buildings', 'units', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'units', 'households', cast_index=True, onto_on='unit_id')\norca.broadcast(\n 'households', 'persons', cast_index=True, onto_on='household_id')\norca.broadcast(\n 'buildings', 'jobs', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'buildings', 'establishments', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'nodeswalk', 'parcels', cast_index=True, onto_on='node_id_walk')\norca.broadcast(\n 'nodeswalk', 'rentals', cast_index=True, onto_on='node_id_walk')\norca.broadcast(\n 'nodessmall', 'rentals', cast_index=True, onto_on='node_id_small')\norca.broadcast(\n 'nodessmall', 'parcels', cast_index=True, onto_on='node_id_small')\norca.broadcast(\n 'zones', 'parcels', cast_index=True, onto_on='zone_id')\n", "id": "11581212", "language": "Python", "matching_score": 7.19121789932251, "max_stars_count": 1, "path": "activitysynth/scripts/datasources.py" }, { "content": "import orca\nimport numpy as np\nimport pandas as pd\n\n\n# data documentation: https://berkeley.app.box.com/notes/282712547032\n\n\n# Set data directory\n\nd = '/home/data/spring_2019/2025/'\n\nif 'data_directory' in orca.list_injectables():\n d = orca.get_injectable('data_directory')\n\n# b = '/home/data/spring_2019/base/'\n############################################################\n\n# Tables from MTC Bay Area UrbanSim\n\n\[email protected](cache=True)\ndef parcels():\n try:\n df = pd.read_csv(\n d + 'parcels.csv',\n index_col='primary_id',\n dtype={'primary_id': int, 'block_id': str})\n except:\n df = pd.read_csv(\n d + 'parcels.csv',\n index_col='parcel_id',\n dtype={'parcel_id': int, 'block_id': str})\n return df\n\n\[email protected](cache=True)\ndef buildings():\n df = pd.read_csv(\n d + 'buildings.csv',\n index_col='building_id', dtype={'building_id': int, 'parcel_id': int})\n df['res_sqft_per_unit'] = df['residential_sqft'] / df['residential_units']\n df['res_sqft_per_unit'][df['res_sqft_per_unit'] == np.inf] = 0\n return df\n\n\n############################################################\n\n# Table of Rental Data from Craigslist, bayarea_urbansim added by Arezoo\n\n# @orca.table(cache=True)\n# def craigslist():\n# df = pd.read_csv(\n# d + 'craigslist.csv',\n# index_col='pid', dtype={'pid': int})\n# return df\n\n\[email protected](cache=True)\ndef rentals():\n df = pd.read_csv(\n d + 'craigslist.csv',\n index_col='pid', dtype={'pid': int, 'rent': float})\n return df\n\n\n############################################################\n\n# Tables synthesized by <NAME>\n\[email protected](cache=True)\ndef units():\n df = pd.read_csv(\n d + 'units.csv',\n index_col='unit_id', dtype={'unit_id': int, 'building_id': int})\n return df\n\n\[email protected](cache=True)\ndef households():\n try:\n df = pd.read_csv(\n d + 'households.csv',\n index_col='household_id', dtype={\n 'household_id': int, 'block_group_id': str, 'state': str,\n 'county': str, 'tract': str, 'block_group': str,\n 'building_id': int, 'unit_id': int, 'persons': float})\n except:\n df = pd.read_csv(\n d + 'households.csv',\n index_col=0, dtype={\n 'block_group_id': str, 'state': str,\n 'county': str, 'tract': str, 'block_group': str,\n 'building_id': int, 'unit_id': int, 'persons': float})\n df.index.name = 'household_id'\n return df\n\n\[email protected](cache=True)\ndef persons():\n df = pd.read_csv(\n d + 'persons.csv',\n index_col='person_id', dtype={'person_id': int, 'household_id': int})\n return df\n\n\[email protected](cache=True)\ndef jobs():\n\n try:\n df = pd.read_csv(\n d + 'jobs.csv',\n index_col='job_id', dtype={'job_id': int, 'building_id': int})\n except:\n df = pd.read_csv(\n d + 'jobs.csv',\n index_col=0, dtype={'job_id': int, 'building_id': int})\n df.index.name = 'job_id'\n return df\n\n\n############################################################\n\n# Tables from <NAME>\[email protected](cache=True)\ndef establishments():\n df = pd.read_csv(\n d + 'establishments.csv',\n index_col='establishment_id', dtype={\n 'establishment_id': int, 'building_id': int, 'primary_id': int})\n return df\n\n############################################################\n\n\n# zones table\[email protected](cache=True)\ndef zones():\n df = pd.read_csv(\n d + 'zones.csv', index_col='zone_id',\n dtype={'zone_id': int})\n return df\n\n############################################################\n# skims\n\n\n# Tables from Emma\[email protected]('skims', cache=True)\ndef skims():\n df = pd.read_csv(d + 'skims.csv', index_col=0)\n return df\n\n\[email protected](cache=True)\ndef beam_drive_skims():\n \"\"\"\n Load BEAM skims, convert travel time to minutes\n \"\"\"\n df = pd.read_csv(\n d + 'smart-1Apr2019-sc-b-lt-2025-20.skimsExcerpt.csv.gz')\n\n # morning peak\n df = df[df['period'] == 'AM']\n assert len(df) == 2114116\n df = df.rename(\n columns={'origTaz': 'from_zone_id', 'destTaz': 'to_zone_id'})\n df = df.set_index(['from_zone_id', 'to_zone_id'])\n\n # seconds to minutes\n df['gen_tt_CAR'] = df['generalizedTimeInS'] / 60\n return df\n\n\[email protected](cache=True)\ndef beam_skims():\n \"\"\"\n Load BEAM skims, convert travel time to minutes\n \"\"\"\n df = pd.read_csv(\n d + 'smart-1Apr2019-sc-b-lt-2025-20.skims.csv.gz')\n\n df.rename(columns={\n 'generalizedCost': 'gen_cost', 'origTaz': 'from_zone_id',\n 'destTaz': 'to_zone_id'}, inplace=True)\n\n # seconds to minutes\n df['gen_tt'] = df['generalizedTimeInS'] / 60\n\n return df\n\n############################################################\n# Broadcasts, a.k.a. merge relationships\n\n\norca.broadcast(\n 'parcels', 'buildings', cast_index=True, onto_on='parcel_id')\norca.broadcast(\n 'buildings', 'units', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'units', 'households', cast_index=True, onto_on='unit_id')\norca.broadcast(\n 'households', 'persons', cast_index=True, onto_on='household_id')\norca.broadcast(\n 'buildings', 'jobs', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'buildings', 'establishments', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'nodeswalk', 'parcels', cast_index=True, onto_on='node_id_walk')\norca.broadcast(\n 'nodeswalk', 'rentals', cast_index=True, onto_on='node_id_walk')\norca.broadcast(\n 'nodessmall', 'rentals', cast_index=True, onto_on='node_id_small')\norca.broadcast(\n 'nodessmall', 'parcels', cast_index=True, onto_on='node_id_small')\n# orca.broadcast(\n# 'nodesbeam', 'parcels', cast_index=True, onto_on='node_id_beam')\n", "id": "5715554", "language": "Python", "matching_score": 3.8927667140960693, "max_stars_count": 1, "path": "spring-2019-models/scripts/datasources.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport subprocess\nfrom subprocess import PIPE\n\n\nd = '/home/data/fall_2018/'\nb = '/home/data/spring_2019/beam_to_urbansim-v3/'\n\nparcels = pd.read_csv(\n d + 'parcel_attr.csv',\n index_col='primary_id',\n dtype={'primary_id': int, 'block_id': str})\n\nbuildings = pd.read_csv(\n d + 'buildings_v2.csv',\n index_col='building_id', dtype={'building_id': int, 'parcel_id': int})\nbuildings['res_sqft_per_unit'] = buildings['residential_sqft'] / buildings['residential_units']\nbuildings['res_sqft_per_unit'][buildings['res_sqft_per_unit'] == np.inf] = 0\n\nbuilding_types = pd.read_csv(\n d + 'building_types.csv',\n index_col='building_type_id', dtype={'building_type_id': int})\n\nbuilding_types.head()\n\nrentals = pd.read_csv(\n d + 'MTC_craigslist_listings_7-10-18.csv',\n index_col='pid', dtype={\n 'pid': int, 'date': str, 'region': str, 'neighborhood': str,\n 'rent': float, 'sqft': float, 'rent_sqft': float, \n 'longitude': float, 'latitude': float, 'county': str,\n 'fips_block': str, 'state': str, 'bathrooms': str})\n\nunits = pd.read_csv(\n d + 'units_v2.csv',\n index_col='unit_id', dtype={'unit_id': int, 'building_id': int})\n\nhouseholds = pd.read_csv(\n d + 'households_v2.csv',\n index_col='household_id', dtype={\n 'household_id': int, 'block_group_id': str, 'state': str,\n 'county': str, 'tract': str, 'block_group': str,\n 'building_id': int, 'unit_id': int, 'persons': float})\n\npersons = pd.read_csv(\n d + 'persons_v3.csv',\n index_col='person_id', dtype={'person_id': int, 'household_id': int})\n\njobs = pd.read_csv(\n d + 'jobs_v2.csv',\n index_col='job_id', dtype={'job_id': int, 'building_id': int})\n\nestablishments = pd.read_csv(\n d + 'establishments_v2.csv',\n index_col='establishment_id', dtype={\n 'establishment_id': int, 'building_id': int, 'primary_id': int})\n\nbeam_nodes_fname = 'beam-network-nodes.csv'\nbeam_links_fname = '10.linkstats.csv'\nbeam_links_filtered_fname = 'beam_links_8am.csv'\nwith open(b + beam_links_filtered_fname, 'w') as f:\n p1 = subprocess.Popen(\n [\"cat\", b + beam_links_fname], stdout=PIPE)\n p2 = subprocess.Popen([\n \"awk\", \"-F\", \",\",\n '(NR==1) || ($4 == \"8.0\" && $8 == \"AVG\")'],\n stdin=p1.stdout, stdout=f)\n p2.wait()\n\nnodesbeam = pd.read_csv(b + beam_nodes_fname).set_index('id')\nedgesbeam = pd.read_csv(b + beam_links_filtered_fname).set_index('link')\n\nnodeswalk = pd.read_csv(d + 'bayarea_walk_nodes.csv').set_index('osmid')\nedgeswalk = pd.read_csv(d + 'bayarea_walk_edges.csv').set_index('uniqueid')\n\nnodessmall = pd.read_csv(d + 'bay_area_tertiary_strongly_nodes.csv').set_index('osmid')\nedgessmall = pd.read_csv(d + 'bay_area_tertiary_strongly_edges.csv').set_index('uniqueid')\n\nskims = pd.read_csv(d + 'skims_110118.csv', index_col=0)\n\nstore = pd.HDFStore('data/model_data.h5')\nstore.put('parcels',parcels)\nstore.put('buildings',buildings)\nstore.put('building_types',building_types)\nstore.put('units',units)\nstore.put('rentals',rentals)\nstore.put('households',households)\nstore.put('persons',persons)\nstore.put('jobs',jobs)\nstore.put('establishments',establishments)\nstore.put('nodesbeam',nodesbeam)\nstore.put('edgesbeam',edgesbeam)\nstore.put('nodeswalk',nodeswalk)\nstore.put('edgeswalk',edgeswalk)\nstore.put('nodessmall',nodessmall)\nstore.put('edgessmall',edgessmall)\nstore.put('skims', skims)\nstore.keys()\n\nstore.close()\n", "id": "1066907", "language": "Python", "matching_score": 7.678104400634766, "max_stars_count": 2, "path": "bayarea/make_model_data_hdf.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport subprocess\nfrom subprocess import PIPE\n\n\ndef send_df_to_s3_as_pq(df, table_name):\n s3_url = 's3://urbansim-baseyear-inputs/{0}.parquet.gz'.format(table_name)\n df.to_parquet(s3_url, compression='gzip', engine='pyarrow')\n\n\nd = '/home/data/fall_2018/'\nb = '/home/data/spring_2019/beam_to_urbansim-v3/'\n\nparcels = pd.read_csv(\n d + 'parcel_attr.csv',\n index_col='primary_id',\n dtype={'primary_id': int, 'block_id': str})\n\nbuildings = pd.read_csv(\n d + 'buildings_v2.csv',\n index_col='building_id', dtype={'building_id': int, 'parcel_id': int})\nbuildings['res_sqft_per_unit'] = buildings['residential_sqft'] / buildings[\n 'residential_units']\nbuildings['res_sqft_per_unit'][buildings['res_sqft_per_unit'] == np.inf] = 0\n\nbuilding_types = pd.read_csv(\n d + 'building_types.csv',\n index_col='building_type_id', dtype={'building_type_id': int})\n\nrentals = pd.read_csv(\n d + 'MTC_craigslist_listings_7-10-18.csv',\n index_col='pid', dtype={\n 'pid': int, 'date': str, 'region': str, 'neighborhood': str,\n 'rent': float, 'sqft': float, 'rent_sqft': float,\n 'longitude': float, 'latitude': float, 'county': str,\n 'fips_block': str, 'state': str, 'bathrooms': str})\n\nunits = pd.read_csv(\n d + 'units_v2.csv',\n index_col='unit_id', dtype={'unit_id': int, 'building_id': int})\n\nhouseholds = pd.read_csv(\n d + 'households_v2.csv',\n index_col='household_id', dtype={\n 'household_id': int, 'block_group_id': str, 'state': str,\n 'county': str, 'tract': str, 'block_group': str,\n 'building_id': int, 'unit_id': int, 'persons': float})\n\npersons = pd.read_csv(\n d + 'persons_v3.csv',\n index_col='person_id', dtype={'person_id': int, 'household_id': int})\n\njobs = pd.read_csv(\n d + 'jobs_v2.csv',\n index_col='job_id', dtype={'job_id': int, 'building_id': int})\n\nestablishments = pd.read_csv(\n d + 'establishments_v2.csv',\n index_col='establishment_id', dtype={\n 'establishment_id': int, 'building_id': int, 'primary_id': int})\n\nbeam_nodes_fname = 'beam-network-nodes.csv'\nbeam_links_fname = '10.linkstats.csv'\nbeam_links_filtered_fname = 'beam_links_8am.csv'\nwith open(b + beam_links_filtered_fname, 'w') as f:\n p1 = subprocess.Popen(\n [\"cat\", b + beam_links_fname], stdout=PIPE)\n p2 = subprocess.Popen([\n \"awk\", \"-F\", \",\",\n '(NR==1) || ($4 == \"8.0\" && $8 == \"AVG\")'],\n stdin=p1.stdout, stdout=f)\n p2.wait()\n\nnodesbeam = pd.read_csv(b + beam_nodes_fname).set_index('id')\nedgesbeam = pd.read_csv(b + beam_links_filtered_fname).set_index('link')\n\nnodeswalk = pd.read_csv(\n d + 'bayarea_walk_nodes.csv', low_memory=False).set_index('osmid')\nedgeswalk = pd.read_csv(\n d + 'bayarea_walk_edges.csv', low_memory=False).set_index('uniqueid')\n\nnodessmall = pd.read_csv(\n d + 'bay_area_tertiary_strongly_nodes.csv').set_index('osmid')\nedgessmall = pd.read_csv(\n d + 'bay_area_tertiary_strongly_edges.csv').set_index('uniqueid')\n\nskims = pd.read_csv('./data/skims_110118.csv', index_col=0)\n\nfor table_str in [\n 'parcels', 'buildings', 'building_types', 'units', 'rentals',\n 'households', 'persons', 'jobs', 'establishments', 'nodesbeam',\n 'edgesbeam', 'nodeswalk', 'edgeswalk', 'nodessmall', 'edgessmall',\n 'skims']:\n print(table_str)\n send_df_to_s3_as_pq(eval(table_str), table_str)\n", "id": "11913423", "language": "Python", "matching_score": 1.6721055507659912, "max_stars_count": 2, "path": "bayarea/send_model_data_to_s3.py" }, { "content": "import orca\nimport pandas as pd\nimport pandana as pdna\nfrom urbansim.models import transition\nfrom urbansim_parcels import utils as parcel_utils\nimport json\nfrom altair import Chart, X, Y, Axis\nimport altair as alt\nimport numpy as np\nimport yaml\nfrom developer import proposal_select\nfrom collections import OrderedDict\nfrom urbansim.utils import networks\n\n# @orca.step()\n# def build_networks(parcels, nodes, edges, craigslist):\n # nodes, edges = nodeswalk.to_frame(), edgeswalk.to_frame()\n # print('Number of nodes is %s.' % len(nodes))\n # print('Number of edges is %s.' % len(edges))\n # net = pdna.Network(nodes[\"x\"], nodes[\"y\"], edges[\"from\"], edges[\"to\"],\n # edges[[\"weight\"]])\n\n # precompute_distance = 5000\n # print('Precomputing network for distance %s.' % precompute_distance)\n # print('Network precompute starting.')\n # net.precompute(precompute_distance)\n # print('Network precompute done.')\n\n # parcels = parcels.local\n # parcels['node_id'] = net.get_node_ids(parcels['x'], parcels['y'])\n # orca.add_table(\"parcels\", parcels)\n # orca.add_injectable(\"net\", net)\n\n # craigslist = craigslist.local\n # craigslist['node_id'] = net.get_node_ids(craigslist['longitude'], craigslist['latitude'])\n # orca.add_table('craigslist', craigslist)\n\n\n# @orca.step()\n# def build_networks(parcels, nodeswalk, edgeswalk, craigslist):\n # nodes, edges = nodeswalk.to_frame(), edgeswalk.to_frame()\n # print('Number of nodes is %s.' % len(nodes))\n # print('Number of edges is %s.' % len(edges))\n # net = pdna.Network(nodes[\"x\"], nodes[\"y\"], edges[\"u\"], edges[\"v\"],\n # edges[[\"length\"]])\n\n # precompute_distance = 5000\n # print('Precomputing network for distance %s.' % precompute_distance)\n # print('Network precompute starting.')\n # net.precompute(precompute_distance)\n # print('Network precompute done.')\n \n # return net\n\n # parcels = parcels.local\n # parcels['node_id'] = net.get_node_ids(parcels['x'], parcels['y'])\n # orca.add_table(\"parcels\", parcels)\n # orca.add_injectable(\"net\", net)\n\n # craigslist = craigslist.local\n # craigslist['node_id'] = net.get_node_ids(craigslist['longitude'], craigslist['latitude'])\n # orca.add_table('craigslist', craigslist)\n\n\n\[email protected]()\ndef initialize_network_walk():\n \"\"\"\n This will be turned into a data loading template.\n \"\"\"\n\n @orca.injectable('netwalk', cache=True)\n def build_networkwalk(nodeswalk, edgeswalk):\n nodeswalk, edgeswalk = nodeswalk.to_frame(), edgeswalk.to_frame()\n print('Number of nodes is %s.' % len(nodeswalk))\n print('Number of edges is %s.' % len(edgeswalk))\n netwalk = pdna.Network(\n nodeswalk.x, nodeswalk.y, edgeswalk.u,edgeswalk.v, \n edgeswalk[['length']], twoway=True)\n netwalk.precompute(5000)\n return netwalk\n \n\n\[email protected]()\ndef initialize_network_beam():\n \"\"\"\n This will be turned into a data loading template.\n \"\"\"\n\n @orca.injectable('netbeam', cache=True)\n def build_networkbeam(nodesbeam, edgesbeam):\n nodesbeam, edgesbeam = nodesbeam.to_frame(), edgesbeam.to_frame()\n print('Number of nodes is %s.' % len(nodesbeam))\n print('Number of edges is %s.' % len(edgesbeam))\n netbeam = pdna.Network(\n nodesbeam['lon'], nodesbeam['lat'], edgesbeam['from'],\n edgesbeam['to'], edgesbeam[['traveltime']], twoway=False)\n netbeam.precompute(3600)\n return netbeam\n \n\[email protected]()\ndef network_aggregations_walk(netwalk):\n \"\"\"\n This will be turned into a network aggregation template.\n \"\"\"\n\n nodeswalk = networks.from_yaml(netwalk, 'network_aggregations_walk.yaml')\n nodeswalk = nodeswalk.fillna(0)\n \n # new variables\n print('compute additional aggregation variables')\n nodeswalk['prop_children_500_walk'] = ((nodeswalk['children_500_walk'] > 0).astype(int) / nodeswalk['hh_500_walk']).fillna(0)\n nodeswalk['prop_singles_500_walk'] = (nodeswalk['singles_500_walk'] / nodeswalk['hh_500_walk']).fillna(0)\n nodeswalk['prop_elderly_500_walk'] = (nodeswalk['elderly_hh_500_walk'] / nodeswalk['hh_500_walk']).fillna(0)\n nodeswalk['prop_black_500_walk'] = (nodeswalk['pop_black_500_walk'] / nodeswalk['pop_500_walk']).fillna(0)\n nodeswalk['prop_white_500_walk'] = (nodeswalk['pop_white_500_walk'] / nodeswalk['pop_500_walk']).fillna(0)\n nodeswalk['prop_asian_500_walk'] = (nodeswalk['pop_asian_500_walk'] / nodeswalk['pop_500_walk']).fillna(0)\n nodeswalk['prop_hisp_500_walk'] = (nodeswalk['pop_hisp_500_walk'] / nodeswalk['pop_500_walk']).fillna(0)\n nodeswalk['prop_rich_500_walk'] = (nodeswalk['rich_500_walk'] / nodeswalk['pop_500_walk']).fillna(0)\n nodeswalk['prop_poor_500_walk'] = (nodeswalk['poor_500_walk'] / nodeswalk['pop_500_walk']).fillna(0)\n\n nodeswalk['prop_children_1500_walk'] = ((nodeswalk['children_1500_walk'] > 0).astype(int)/nodeswalk['hh_1500_walk']).fillna(0)\n nodeswalk['prop_singles_1500_walk'] = (nodeswalk['singles_1500_walk'] / nodeswalk['hh_1500_walk']).fillna(0)\n nodeswalk['prop_elderly_1500_walk'] = (nodeswalk['elderly_hh_1500_walk'] / nodeswalk['hh_1500_walk']).fillna(0)\n nodeswalk['prop_black_1500_walk'] = (nodeswalk['pop_black_1500_walk'] / nodeswalk['pop_1500_walk']).fillna(0)\n nodeswalk['prop_white_1500_walk'] = (nodeswalk['pop_white_1500_walk'] / nodeswalk['pop_1500_walk']).fillna(0)\n nodeswalk['prop_asian_1500_walk'] = (nodeswalk['pop_asian_1500_walk'] / nodeswalk['pop_1500_walk']).fillna(0)\n nodeswalk['prop_hisp_1500_walk'] = (nodeswalk['pop_hisp_1500_walk'] / nodeswalk['pop_1500_walk']).fillna(0)\n nodeswalk['prop_rich_1500_walk'] = (nodeswalk['rich_1500_walk'] / nodeswalk['pop_1500_walk']).fillna(0)\n nodeswalk['prop_poor_1500_walk'] = (nodeswalk['poor_1500_walk'] / nodeswalk['pop_1500_walk']).fillna(0)\n\n nodeswalk['pop_jobs_ratio_1500_walk'] = (nodeswalk['pop_1500_walk'] / (nodeswalk['jobs_500_walk'])).fillna(0)\n nodeswalk['avg_hhs_500_walk'] = (nodeswalk['pop_500_walk'] / (nodeswalk['hh_500_walk'])).fillna(0)\n nodeswalk['avg_hhs_1500_walk'] = (nodeswalk['pop_1500_walk'] / (nodeswalk['hh_1500_walk'])).fillna(0)\n # end of addition\n \n # fill inf and nan with median\n \n def replace_inf_nan_with_median(col_name):\n return nodeswalk[col_name].replace([np.inf, -np.inf],np.nan).fillna(nodeswalk[col_name].median)\n \n for col_name in ['prop_children_500_walk','prop_singles_500_walk','prop_elderly_500_walk',\n 'prop_black_500_walk','prop_white_500_walk','prop_asian_500_walk','prop_hisp_500_walk',\n 'prop_rich_500_walk','prop_poor_500_walk','prop_children_1500_walk','prop_singles_1500_walk',\n 'prop_elderly_1500_walk','prop_black_1500_walk','prop_white_1500_walk','prop_asian_1500_walk',\n 'prop_hisp_1500_walk','prop_rich_1500_walk','prop_poor_1500_walk','pop_jobs_ratio_1500_walk',\n 'avg_hhs_500_walk','avg_hhs_1500_walk']:\n nodeswalk[col_name] = replace_inf_nan_with_median(col_name)\n \n \n print(nodeswalk.describe())\n orca.add_table('nodeswalk', nodeswalk)\n\n\[email protected]()\ndef network_aggregations_beam(netbeam):\n \"\"\"\n This will be turned into a network aggregation template.\n \"\"\"\n\n nodesbeam = networks.from_yaml(netbeam, 'network_aggregations_beam.yaml')\n nodesbeam = nodesbeam.fillna(0)\n print(nodesbeam.describe())\n orca.add_table('nodesbeam', nodesbeam)\n\n\[email protected]('output_parameters')\ndef output_parameters():\n with open(\"configs/output_parameters.yaml\") as f:\n cfg = yaml.load(f)\n return cfg\n\n\ndef prepare_chart_data():\n\n output_parameters = orca.get_injectable('output_parameters')\n geo_small = output_parameters['chart_data']['geography_small']\n geo_large = output_parameters['chart_data']['geography_large']\n vars_sum = output_parameters['chart_data']['chart_variables']['sum']\n vars_mean = output_parameters['chart_data']['chart_variables']['mean']\n custom_variables = output_parameters['chart_data']['custom_charts']\n acres = orca.get_table('parcels').to_frame(['zone_id', 'acres'])\\\n .groupby('zone_id').sum().reset_index()\n geo_attributes = orca.get_table('parcels').to_frame(\n ['zone_id', geo_small, geo_large]).\\\n groupby('zone_id').min().reset_index()\n data = orca.get_table('zones').to_frame(vars_sum + vars_mean + ['zone_id'])\n data = pd.merge(data, acres, on='zone_id')\n data = pd.merge(data, geo_attributes, on='zone_id')\n for var in vars_sum:\n new_var_name = var + '_per_acre'\n data[new_var_name] = data[var] / data['acres']\n vars_sum = vars_sum + [new_var_name]\n variables = {'sum': vars_sum, 'mean': vars_mean}\n return data, variables, geo_small, geo_large, custom_variables\n\ndef aggregate_data(data, agg_type, geo):\n if (agg_type == 'mean'):\n data = data.groupby(geo).mean().reset_index()\n else:\n data = data.groupby(geo).sum().reset_index()\n return data\n\ndef gen_var_barcharts_by_geo(data, var, agg_type, geo):\n data = aggregate_data(data, agg_type, geo)\n titlex = agg_type + ' of ' + var.split('_', 1)[1].replace('_', ' ')\n titley = geo.replace('_', ' ')\n bar_chart = alt.Chart(data).mark_bar().encode(\n x = X(var, axis = Axis(title = titlex)),\n y = Y((geo + ':O'), axis = Axis(title=titley))\n )\n with open('./runs/%s_by_%s.json' % (var, geo), 'w') as outfile:\n json.dump(bar_chart.to_json(), outfile)\n\ndef gen_var_histograms(data, var, agg_type, geo, vdict, cdict):\n data = aggregate_data(data, agg_type, geo)\n data = data.copy()\n type = vdict[var].split(' ')[0]\n if type == 'Log':\n log_var = var\n data[log_var] = data[var]\n else:\n log_var = 'log_' + var\n data[log_var] = np.log(data[var])\n titlex = 'log of ' + var.split('_', 1)[1].replace('_', ' ')\n titley = 'number of '+ geo.replace('_', ' ') + 's'\n hist = alt.Chart(data).mark_bar().encode(\n alt.X(log_var, bin = True, axis=Axis(title = titlex)),\n alt.Y('count()', axis = Axis(title = titley))\n )\n with open('./runs/%s_histogram.json' % var, 'w') as outfile:\n json.dump(hist.to_json(), outfile)\n\n\ndef gen_var_scatters(data, var1, var2, agg1, agg2, geo_points, geo_large):\n colors = data.groupby(geo_points).min().reset_index()\n colors = colors[[geo_points, geo_large]]\n data_1 = aggregate_data(data, agg1, geo_points)[[var1 , geo_points]]\n data_2 = aggregate_data(data, agg2, geo_points)[[var2, geo_points]]\n data = pd.merge(data_1, data_2,on = geo_points, how = 'left' )\n data = pd.merge(data, colors, on = geo_points,how = 'left')\n titlex = agg1 +' of '+ var1.split('_', 1)[1].replace('_', ' ') + ' by zone'\n titley = agg2 +' of '+ var2.split('_', 1)[1].replace('_', ' ') + ' by zone'\n scatter = alt.Chart(data).mark_point().encode(\n x=X(var1, axis=Axis(title = titlex)),\n y=Y(var2, axis=Axis(title = titley)),\n color=geo_large + ':N',\n )\n with open('./runs/%s_vs_%s.json' % (var2, var1), 'w') as outfile:\n json.dump(scatter.to_json(), outfile)\n\n\ndef gen_barcharts_n_largest(data, var, agg_type, geo, n):\n data = aggregate_data(data, agg_type, geo)\n max_data = data.nlargest(n, var).reset_index()\n titlex = agg_type + ' of ' + var.split('_', 1)[1].replace('_', ' ')\n titley = geo.replace('_', ' ')\n bar_chart = alt.Chart(max_data).mark_bar().encode(\n x=X(var, axis=Axis(title = titlex)),\n y=Y(geo + ':O', axis=Axis(title=titley))\n )\n with open('./runs/%s_%ss_with_max_%s.json'% (n, geo, var), 'w') as outfile:\n json.dump(bar_chart.to_json(), outfile)\n\n\ndef gen_custom_barchart(table,var):\n df = orca.get_table(table).to_frame(['parcel_id', var]).\\\n groupby(var).count().reset_index()\n df.rename(columns={'parcel_id': 'count_'+table}, inplace=True)\n chart = alt.Chart(df).mark_bar().encode(\n x=X('count_'+table, axis=Axis(title='count_'+table)),\n y=Y(var + ':O', axis=Axis(title=var))\n )\n with open('./runs/%s_by_%s.json'% (table, var), 'w') as outfile:\n json.dump(chart.to_json(), outfile)\n\n\ndef export_indicator_definitions():\n\n # Gets relevant data from output_parameters.yaml\n output_parameters = orca.get_injectable('output_parameters')\n indicator_vars = output_parameters['output_variables']\n sum_vars = output_parameters['chart_data']['chart_variables']['sum']\n mean_vars = output_parameters['chart_data']['chart_variables']['mean']\n geo_large = output_parameters['chart_data']['geography_large']\n geo_small = output_parameters['chart_data']['geography_small']\n custom_v = output_parameters['chart_data']['custom_charts']\n\n # Gets variable definitions from var_dict\n var_dict = orca.get_injectable('dictionary')['var_dict']\n custom_d = orca.get_injectable('dictionary')['custom_var_dict']\n\n # Creates dictionary with metadata for output indicators\n spatial_output = {}\n data = {}\n for geo_type in indicator_vars:\n desc = {}\n variables = indicator_vars[geo_type]\n for var in variables:\n geo_type = geo_type.strip('s')\n desc[var] = {'name': var_dict[var]}\n csv= geo_type + '_indicators'\n spatial_output[geo_type] = {'root_csv_name': csv,\n 'var_display': desc}\n data['spatial_output'] = OrderedDict(spatial_output)\n\n # Creates dictionary with metadata for charts (based on parcel data)\n for var in sum_vars:\n new_var_name = var + '_per_acre'\n sum_vars = sum_vars + [new_var_name]\n var_dict[new_var_name] = var_dict[var] + ' per acre'\n desc = {}\n for var_type in ['sum', 'mean']:\n variables = eval(var_type+'_vars')\n for var in variables:\n name = ('% s_by_% s.json' % (var, geo_large))\n varname = var_dict[var].replace(': ',' ')\n desc[name] = {'title': 'By ' +geo_large+ ' code: ' + varname}\n name = ('%s_histogram.json' % var)\n varname = var_dict[var].strip('Log of ').replace(': ', ' ')\n desc[name] = {'title': 'Histogram: Logarithm of ' + varname.lower()}\n name = ('% s_% ss_with_max_% s.json'% (10, geo_small, var))\n varname = var_dict[var].replace(': ', ' ')\n desc[name] = {'title':'Top ten '\n + geo_small + ' codes: ' + varname}\n used_variables = []\n vars_chart = sum_vars + mean_vars\n for var1 in vars_chart:\n used_variables = used_variables + [var1]\n for var2 in vars_chart:\n if (var1 != var2) & (var2 not in used_variables):\n name = ('%s_vs_%s.json' % (var2, var1))\n varname1 = var_dict[var1].replace(': ', ' ')\n varname2 = var_dict[var2].replace(': ', ' ')\n desc[name] = {\n 'title': 'Zone Scatterplot: ' + varname2 +\n ' vs. ' + varname1}\n\n # Creates dictionary with metadata for charts (based on custom tables)\n for table in custom_v:\n for var in custom_v[table]:\n name = '%s_by_%s.json' % (table, var)\n key = table + '_' + var\n try:\n data_name = custom_d[key]['data_name']\n agg_name = custom_d[key]['aggregation_name']\n except Exception:\n data_name = 'Total ' + table.replace('_', ' ')\n agg_name = var.replace('_', ' ')\n desc[name] = {'title': data_name +' by '+ agg_name}\n\n data['chart_output'] = OrderedDict(desc)\n\n # Exports dictionary with indicator and charts definitions to .yaml file\n data = OrderedDict(data)\n represent_dict_order = lambda self, data: \\\n self.represent_mapping('tag:yaml.org,2002:map', data.items())\n yaml.add_representer(OrderedDict, represent_dict_order)\n yaml.Dumper.ignore_aliases = lambda *args: True\n with open('./runs/output_indicator_definitions.yaml', 'w') as outfile:\n yaml.dump(data, outfile, default_flow_style=False, width = 1000)\n return var_dict, custom_d\n\[email protected]()\ndef generate_indicators(year, forecast_year, parcels, zones):\n # If iter_var is not defined is a presimulation generation\n if orca.get_injectable('iter_var'):\n year = orca.get_injectable('iter_var')\n else:\n year = orca.get_injectable('base_year')\n\n # General output indicators\n cfg = orca.get_injectable('output_parameters')['output_variables']\n zone_ind = zones.to_frame(cfg['zones'])\n zone_ind = zone_ind.reindex(sorted(zone_ind.columns),axis=1)\n parcel_ind = parcels.to_frame(cfg['parcels'])\n parcel_ind = parcel_ind.reindex(sorted(parcel_ind.columns),axis=1)\n zone_ind.to_csv('./runs/zone_indicators_%s.csv' % year)\n parcel_ind.to_csv('./runs/parcel_indicators_%s.csv' % year)\n\n # Output indicators by building type\n btype_columns = ['building_type_id','is_residential', 'is_non_residential']\n btypes = orca.get_table('building_types').to_frame(btype_columns)\n btypes = btypes.reset_index()\n btypes.loc[btypes['is_residential']==True, 'ind_res'] = \\\n \"sum_residential_units_\" + btypes.building_type_id.astype(str)\n btypes.loc[btypes['is_non_residential'] == True, 'ind_non_res'] = \\\n \"sum_non_residential_sqft_\" + btypes.building_type_id.astype(str)\n btype_ind_cols = list(btypes.ind_res) + list(btypes.ind_non_res)\n btype_ind_cols = [ind for ind in btype_ind_cols if str(ind) != 'nan']\n zone_type = zones.to_frame(btype_ind_cols)\n parcel_type = parcels.to_frame(btype_ind_cols)\n zone_type = zone_type.reindex(sorted(zone_type.columns), axis=1)\n parcel_type = parcel_type.reindex(sorted(parcel_type.columns), axis=1)\n zone_type.to_csv('./runs/zone_indicators_building_type_%s.csv' % year)\n parcel_type.to_csv('./runs/parcel_indicators_building_type_%s.csv' % year)\n\n # Generate chart indicators\n if year == forecast_year:\n vdict, cdict = export_indicator_definitions()\n data, variables, geo_small, geo_large, custom_v = prepare_chart_data()\n for table in custom_v:\n for var in custom_v[table]:\n gen_custom_barchart(table, var)\n used_variables = []\n for aggtype in ['sum', 'mean']:\n for var in variables[aggtype]:\n print ('Generating charts for ' + var)\n gen_var_barcharts_by_geo(data, var, aggtype, geo_large)\n gen_var_histograms(data, var, aggtype, geo_small, vdict, cdict)\n gen_barcharts_n_largest(data, var, aggtype, geo_small, 10)\n used_variables = used_variables + [var]\n for aggtype2 in ['sum', 'mean']:\n for var2 in variables[aggtype2]:\n if (var != var2) & (var2 not in used_variables):\n gen_var_scatters(data, var, var2, aggtype,\n aggtype2, 'zone_id', geo_large)\n\n#### Proforma ####\n\ndef parcel_average_price(use):\n cap_rate = 0.05 ## To convert from price/sqft to rent/sqft\n parcels = orca.get_table('site_proposals')\n if use == 'retail':\n price = parcels.pred_retail_price_ave_800_linear\n elif use == 'industrial':\n price = parcels.pred_industrial_price_ave_800_linear\n elif use == 'office':\n price = parcels.pred_office_price_ave_800_linear\n elif use == 'residential':\n mean_price = parcels.to_frame(['pred_sf_detached_price_ave_800_linear',\n 'pred_duplex_townhome_price_ave_800_linear',\n 'pred_multifamily_price_ave_800_linear']).mean(axis=1)\n mean_sqft_per_unit = orca.get_table('site_proposals').mean_sqft_per_unit_ave_800_linear\n mean_sqft_per_unit[mean_sqft_per_unit < 400] = 400\n price = mean_price / mean_sqft_per_unit\n\n price[price < 1] = 1\n return price * cap_rate\n\n\ndef parcel_is_allowed(form):\n \"\"\"\n Defines which site proposals are allowed for a given form.\n Parameters\n ----------\n form : str\n The name of the form\n Returns\n -------\n A pandas series with \"True\" for the site proposals that are allowed\n under a given form, and \"False\" for the combinations that are not allowed.\n \"\"\"\n id = 'parcel_zoning_id_combination'\n zoning = 'potential_zoning_id'\n btypes_df, btypes = gets_allowable_buildings(form)\n proposals = orca.get_table('site_proposals').to_frame([id, zoning])\n proposals.rename(columns={zoning: 'zoning_id'}, inplace=True)\n allowable = pd.merge(proposals, btypes_df,on = 'zoning_id',how='left')\n allowable = allowable[allowable['building_type_id'].isin(btypes)].copy()\n allowable = allowable[allowable['can_develop'] == True].copy()\n proposals['output'] = False\n proposals.loc[proposals[id].isin(allowable[id]), 'output'] = True\n\n return (proposals['output'])\n\ndef gets_allowable_buildings(form):\n \"\"\"\n Helper function that gets the dataframe of allowable building types and\n matches the form with its allowable building types\n ----------\n form : str\n The name of the form\n Returns\n -------\n The DataFrame of allowable building types by zoning_id (including\n conditional costs by building type), and a list of the allowed building\n types for a given form\n \"\"\"\n\n btypes = []\n if form =='industrial':\n btypes = [4400, 4900, 5100, 5200, 6300, 6400]\n elif form =='office':\n btypes = [4100,4300,4310,6600]\n elif form =='mix_non_residential':\n btypes = [2140]\n elif form == 'retail_office':\n btypes = [6110,6120,6130,6140,6150,6160,6170,6210,6500]\n elif form == 'retail':\n btypes = [4210,4220,4230,4240,4250,4260,4290,4320]\n elif form == 'mix_all':\n btypes = [9000]\n elif form == 'residential_office':\n btypes = [2121,2122,6220,6230,7200]\n elif form == 'residential_retail':\n btypes = [2111,2112,2131,2132,3100,3200,8000]\n elif form == 'residential':\n btypes = [1110,1121,1122,1130,1210,1220,7100,7300,7900]\n\n columns = ['zoning_id', 'building_type_id', 'can_develop',\n 'conditional_use', 'cost_in_city', 'cost_in_ugb',\n 'cost_outside_ugb', 'probability']\n allowable = orca.get_table('allowable_building_types').to_frame(columns)\n for i in ['cost_in_city', 'cost_in_ugb', 'cost_outside_ugb' ]:\n conditional = i +'_conditional'\n allowable.rename(columns={i: conditional}, inplace = True)\n return allowable, btypes\n\ndef parcel_custom_callback(parcels, pf):\n columns = ['developable_sqft', 'total_yearly_rent',\n 'mean_sqft_per_unit_ave_800_linear']\n site_proposals = orca.get_table('site_proposals').to_frame(columns)\n parcels['parcel_size'] = site_proposals.developable_sqft\n parcels['land_cost'] = site_proposals.total_yearly_rent\n mean_sqft_per_unit = site_proposals.mean_sqft_per_unit_ave_800_linear\n mean_sqft_per_unit[mean_sqft_per_unit < 400] = 400\n parcels['ave_unit_size'] = mean_sqft_per_unit\n parcels = parcels[parcels.parcel_size > 2000]\n\n return parcels\n\ndef modifies_costs(self, form, newdf, total_development_costs):\n \"\"\"\n Modifies total_development costs in two steps: 1) Adds zoning costs,\n conditional costs, and rezoning costs 2) Multiplies by cost shifters\n defined in configs/cost_shifters.yaml for calibration purposes.\n ----------\n form : str\n The name of the form.\n newdf: DataFrame\n Dataframe of allowed site proposals.\n total_development_costs: Array\n Array of costs before considering any planning-related costs,\n created by sqftproforma in the _lookup_parking_cfg function.\n Returns\n -------\n Array of total_development_costs including planning_costs and affected by\n cost shifters\n \"\"\"\n\n costs = adds_planning_costs(self, form, newdf, total_development_costs)\n costs = cost_shifter_callback(self, form, newdf, costs)\n return costs\n\n\ndef adds_planning_costs(self, form, newdf, total_development_costs):\n \"\"\"\n Adds zoning costs, conditional costs, and rezoning costs to\n total_development_costs, taking into account parcel location.\n ----------\n form : str\n The name of the form.\n newdf: DataFrame\n Dataframe of allowed site proposals.\n total_development_costs: Array\n Array of costs before considering any planning-related costs,\n created by sqftproforma in the _lookup_parking_cfg function.\n Returns\n -------\n Array of total_development_costs including planning_costs\n \"\"\"\n\n newdf = planning_costs_by_status_ugb(form, newdf)\n costs = pd.DataFrame(total_development_costs).T\n planning_costs = pd.DataFrame(index=newdf.index,\n columns=costs.columns.tolist())\n\n for col in costs.columns.tolist():\n planning_costs[col] = newdf['planning_costs']\n costs = costs.add(planning_costs, fill_value=0).T.values\n\n return costs\n\n\ndef planning_costs_by_status_ugb(form, newdf):\n \"\"\"\n Helper function that formats the dataframe of allowed site proposals,\n creating a new \"planning_costs\" column that accounts for any applicable\n zoning costs, conditional costs, or rezoning costs based on parcel location\n It also creates the zoning_btype table, which will be called later\n by the develop.py module to retrieve selected building types.\n ----------\n form : str\n The name of the form.\n newdf: DataFrame\n DataFrame of allowed site proposals.\n Returns\n -------\n Formatted DataFrame of allowed site proposals, including a new\n \"planning_costs\" column.\n\n \"\"\"\n id = 'parcel_zoning_id_combination'\n newdf.rename(columns={'potential_zoning_id': 'zoning_id'}, inplace=True)\n btypes_df, btypes = gets_allowable_buildings(form)\n allowed_df = pd.merge(newdf, btypes_df, on='zoning_id', how='left')\n newdf.loc[newdf['rezoning_cost'] < -99999,'rezoning_cost'] = 0\n newdf.loc[newdf['rezoning_cost'].isnull(),'rezoning_cost'] = 0\n for i in ['in_city', 'in_ugb', 'outside_ugb']:\n cost = 'cost_' + i\n cond_cost = 'cost_' + i + '_conditional'\n bname = 'building_type_id_' + i\n status_ugb = 'with' + i\n if i == 'outside_ugb':\n status_ugb = i\n allowed_df.loc[allowed_df[cond_cost].isnull(), cond_cost] = 0\n allowed_df.loc[allowed_df[cond_cost] < -99999, cond_cost] = 0\n allowed_status = allowed_df.loc[allowed_df['status_ugb']==status_ugb]\n allowed_btype = selects_btype(allowed_status, cond_cost)\n allowed_btype.rename(columns={'building_type_id':bname}, inplace=True)\n newdf = pd.merge(newdf, allowed_btype,on=id,how='left')\n newdf.loc[newdf['status_ugb'] == status_ugb,'planning_costs'] = \\\n newdf['rezoning_cost'] + newdf['overlay_cost'] + newdf[cond_cost]\n newdf.loc[newdf['status_ugb'] == status_ugb,'building_type_id'] = \\\n newdf[bname]\n newdf = newdf.drop(columns=[cond_cost, bname])\n zoning_btype = newdf.copy()\n zoning_btype = zoning_btype[[id, 'building_type_id']]\n orca.add_table('zoning_btype_%s' %form, zoning_btype )\n\n return newdf\n\ndef selects_btype(df, cost):\n\n \"\"\"\n Helper function that selects a building type for each zoning_id, based on\n the allowable_building_types table. If the scale factors for probabilities\n ('probability' column in the allowable_building_types table) are all equal\n to one for a given zoning_id, the first building type with the minimum\n conditional cost is selected. If the probability scale factors are\n different, the first building type with the maximum probability scale\n factor is selected.\n ----------\n df : DataFrame\n DataFrame of allowed site proposals, formatted by the\n planning_costs_by_status_ugb() function\n cost: str\n The name of the cost column\n Returns\n -------\n DataFrame with selected building_type_id for each allowed site proposal.\n\n \"\"\"\n\n id = 'parcel_zoning_id_combination'\n df = df[[id, cost, 'building_type_id', 'probability']].copy()\n\n\n # Identifies method to assign btypes (min cost or max probability)\n pcount = df[[id,'probability']].groupby(id)['probability'].count().\\\n reset_index().rename(columns={'probability': 'pcount'})\n psum = df[[id,'probability']].groupby(id)['probability'].sum().\\\n reset_index().rename(columns={'probability': 'psum'})\n prob = pd.merge(pcount, psum, on=id, how='left')\n df = pd.merge(df, prob, on=id, how='left')\n df.loc[df['pcount'] != df['psum'], 'method'] = 'probability'\n df.loc[df['pcount'] == df['psum'], 'method'] = 'cost'\n\n # Creates dataframe with min conditional cost and max probability\n df_prob = df.loc[df['method']=='probability']\n df_cost = df.loc[df['method'] == 'cost']\n btypes_prob = df_prob.sort_values('probability').\\\n groupby(id, as_index=False).last()\n btypes_cost = df_cost.sort_values(cost).groupby(id, as_index=False).first()\n btypes = btypes_prob.append(btypes_cost)\n btypes = btypes[[id, cost, 'building_type_id']]\n\n return btypes\n\[email protected]('cost_shifters')\ndef shifters():\n with open (\"configs/cost_shifters.yaml\") as f:\n cfg = yaml.load(f)\n return cfg\n\ndef cost_shifter_callback(self, form, df, costs):\n \"\"\"\n Multiplies total_development costs (already including planning costs) by\n cost shifter values defined in cost_shifters.yaml by zone_district_id. This\n is done for calibration purposes\n ----------\n form : str\n The name of the form.\n df: DataFrame\n Dataframe of allowed site proposals.\n costs: Array\n Array of total_development costs, already considering planning-related\n costs.\n Returns\n -------\n Array of total_development_costs including planning_costs and multiplied by\n cost shifters\n \"\"\"\n\n shifter_cfg = orca.get_injectable('cost_shifters')['calibration']\n geography = shifter_cfg['calibration_geography_id']\n shift_type = 'residential' if form == 'residential' else 'non_residential'\n shifters = shifter_cfg['proforma_cost_shifters'][shift_type]\n for geo, geo_df in df.reset_index().groupby(geography):\n shifter = shifters[geo]\n costs[:, geo_df.index] *= shifter\n return costs\n\ndef adds_btypes_proposals(feasibility):\n \"\"\"\n Helper function that combines the individual zoning_btype tables into a\n single orca table for all forms (btypes_proposals). The btypes_proposals\n table allows retrieving the building type that was selected by the\n sqftproforma.py module for each parcel_zoning_id_combination. Note: These\n building types were selected using the planning_costs_by_status_ugb()\n function, which selects the building type with minimum conditional costs\n or maximum probability for each zoning_id. Conditional costs and\n probabilities are defined by the user in the allowable_building_types.csv\n file.\n\n ----------\n feasibility : DataFrame\n Table with the results from the feasibility step (sqftproforma.py)\n\n Returns\n -------\n None. Registers btypes_proposals table with Orca.\n\n \"\"\"\n id = 'parcel_zoning_id_combination'\n df = feasibility.copy()\n with open(\"./configs/proforma.yaml\") as f:\n cfg = yaml.load(f)\n forms = cfg['forms_to_test']\n for form in forms:\n btypes_cols = [id, 'building_type_id']\n btypes = orca.get_table('zoning_btype_%s' %form).to_frame(btypes_cols)\n btypes.rename(columns={'building_type_id': 'btype_form'}, inplace=True)\n df = pd.merge(df, btypes, on=id, how='left')\n df.loc[df['form'] == form, 'building_type_id'] = df['btype_form']\n df = df.drop(columns=['btype_form'])\n df = df[[id,'building_type_id']].copy()\n df = df.groupby(id).min().reset_index()\n orca.add_table('btypes_proposals', df)\n\ndef formats_feasibility(site_proposals):\n \"\"\"\n Adds desired columns from site_proposals into the feasibility table\n\n \"\"\"\n id = 'parcel_zoning_id_combination'\n feasibility = orca.get_table('feasibility').to_frame()\n feasibility['parcel_id'] = site_proposals.parcel_id\n feasibility[id] = site_proposals[id]\n feasibility['original_zoning_id'] = site_proposals.zoning_id\n feasibility['zoning_id'] = site_proposals.potential_zoning_id\n feasibility['overlay_id'] = site_proposals.overlay_id\n feasibility['annexed_overlay_id'] = site_proposals.annexed_overlay_id\n feasibility['city'] = site_proposals.city\n feasibility['ugb'] = site_proposals.ugb\n feasibility = feasibility.set_index('parcel_id')\n orca.add_table('feasibility', feasibility)\n adds_btypes_proposals(feasibility)\n\ndef scales_probability(df):\n \"\"\"\n Helper function passed as 'profit_to_prob_func' to the pick() method in the\n develop.py module. This function first retrieves building type that was\n used by the sqftproforma.py module to calculate conditional costs for a\n given zoning_id. Then, this previously selected building type is used to\n identify the corresponding user_defined probability scale factor (from the\n 'allowable_building_types' table). The scale factor is applied to calculate\n the final probability of choosing a given building within the develop.py\n module.\n\n ----------\n df : DataFrame\n DataFrame of potential buildings from SqFtProForma steps, formatted by\n the pick() method in the develop.py module\n Returns\n -------\n DataFrame with probabilities for each record (building) in the input\n dataframe.\n\n \"\"\"\n\n id = 'parcel_zoning_id_combination'\n df = df[[id,'zoning_id','max_profit','parcel_size']]\n btypes = orca.get_table('btypes_proposals').to_frame()\n df = pd.merge(df, btypes, on=id, how='left')\n df['zoning_building'] = df['zoning_id'].astype(int).astype(str) +'_'+ \\\n df['building_type_id'].astype(int).astype(str)\n p_factor = orca.get_table('allowable_building_types').to_frame \\\n (['zoning_id', 'building_type_id', 'probability'])\n p_factor['zoning_building'] = p_factor['zoning_id'].astype(str)+'_'+\\\n p_factor['building_type_id'].astype(str)\n p_factor = p_factor.drop(columns=['zoning_id', 'building_type_id'])\n df = pd.merge(df.reset_index(), p_factor, on='zoning_building')\\\n .set_index('index')\n df = df.drop(columns=['zoning_building'])\n df['max_profit_per_size'] = (df.max_profit / df.parcel_size)\n df['scaled_max_profit_per_size'] = df.probability *(df.max_profit_per_size)\n appended_probabilities = pd.DataFrame()\n for btype in df.building_type_id.unique():\n df_btype = df[df['building_type_id']==btype].copy()\n total_profit_btype = df_btype.scaled_max_profit_per_size.sum()\n df_btype['probabilities'] = df_btype.scaled_max_profit_per_size\\\n /(total_profit_btype)\n appended_probabilities = appended_probabilities.append(df_btype)\n return appended_probabilities\n\ndef update_annexed_col(parcelsdf):\n @orca.column('parcels', 'annexed', cache=True, cache_scope='step')\n def func():\n series = pd.Series(data = parcelsdf.annexed, index =parcelsdf.index)\n return series\ndef update_city(parcelsdf):\n @orca.column('parcels', 'city', cache=True, cache_scope='step')\n def func():\n series = pd.Series(data = parcelsdf.city, index =parcelsdf.index)\n return series\n return func\ndef update_overlay_id(parcelsdf):\n @orca.column('parcels', 'overlay_id', cache=True, cache_scope='step')\n def func():\n series = pd.Series(data = parcelsdf.overlay_id, index = parcelsdf.index)\n return series\n return func\ndef update_zoning_cols(parcelsdf, col):\n @orca.column('parcels', col, cache=True, cache_scope='step')\n def func():\n series = pd.Series(data=parcelsdf[col], index=parcelsdf.index)\n return series\n return func\n\ndef update_annexed(new_buildings):\n \"\"\"\n Updates the 'city' and 'overlay_id fields for parcels that get annexed\n during the simulation year. Prints number of developed, rezoned, and\n annexed parcels.\n ----------\n new_buildings: DataFrame\n Table with the buildings that were selected by the developer model\n\n Returns\n -------\n None\n \"\"\"\n new_buildings['rezoned'] = 0\n new_buildings.loc[new_buildings.zoning_id !=\n new_buildings.original_zoning_id, 'rezoned'] = 1\n new_buildings = new_buildings.copy().sort_values('rezoned').\\\n groupby('parcel_id', as_index=False).last()\n parcel_cols = ['parcel_id', 'city', 'ugb', 'overlay_id']\n parcels = orca.get_table('parcels').to_frame(parcel_cols).reset_index()\n parcels['developed'] = 0\n parcels['annexed'] = 0\n parcels.loc[parcels.parcel_id.isin(new_buildings.parcel_id),\n 'developed'] = 1\n parcels.loc[(parcels['developed'] == 1) & (parcels['ugb'].notnull()) &\n (parcels['city'].isnull()),'annexed'] = 1\n parcels.loc[parcels.annexed == 1, 'city'] = parcels.ugb\n overlays = orca.get_table('zone_overlay_types').to_frame()\n overlays = overlays[overlays['overlay_id']\n != overlays['annexed_overlay_id']].copy()\n cols = overlays.columns.drop(['overlay_id', 'annexed_overlay_id',\n 'overlay_combination', 'cost_in_city',\n 'cost_in_ugb', 'cost_outside_ugb'])\n for col in cols:\n overlays = overlays.rename(columns={col: col + '_overlay'})\n parcels = parcels.\\\n merge(overlays, on='overlay_id',how='left').set_index('parcel_id')\n parcels.loc[\n parcels.annexed == 1, 'overlay_id'] = parcels.annexed_overlay_id\n annexed = parcels[parcels.annexed==1].copy().\\\n groupby('city', as_index=False).sum()\n update_annexed_col(parcels)\n update_city(parcels)\n update_overlay_id(parcels)\n for col in cols:\n col_overlay = col + '_overlay'\n parcels.loc[(parcels.annexed==1) & (parcels[col_overlay].notnull()),\n col] = parcels[col_overlay]\n update_zoning_cols(parcels, col)\n print ('Total parcels that will develop: ',\n new_buildings.parcel_id.nunique())\n print ('Total rezoned parcels: ', new_buildings.rezoned.sum())\n for city in annexed.city.unique():\n print ('Total annexed parcels: ',city,': ',\n annexed[annexed['city']==city].annexed.item())\n\ndef add_extra_columns(df):\n df['units'] = df.residential_units + df.non_residential_sqft\n for col in ['maplot', 'improvement_value', 'imputed']:\n df[col] = 0\n df['impval_per_unit'] = 1 # Placeholder\n btypes_columns = ['parcel_zoning_id_combination', 'building_type_id']\n btypes = orca.get_table('btypes_proposals').to_frame(btypes_columns)\n df = pd.merge(df, btypes, on='parcel_zoning_id_combination', how='left')\n update_annexed(df)\n return df\n\ndef custom_selection(self, df, p, target_units):\n btypes = orca.get_table('btypes_proposals').to_frame()\n df = pd.merge(df, btypes, on='parcel_zoning_id_combination', how ='left')\n selected = np.array([])\n for btype in target_units.index.get_values():\n target_units_btype = target_units.loc[btype].get_value('target_units')\n df_btype = df[df['building_type_id']==btype]\n p_btype = p[p.building_type_id == btype].probabilities\n sample_size = int(min(len(df_btype.index), target_units_btype))\n if sample_size != 0:\n choices_btype = proposal_select.\\\n weighted_random_choice_multiparcel\\\n (df_btype, p_btype, target_units_btype)\n selected = np.append(selected, choices_btype)\n return selected\n\n\[email protected]()\ndef feasibility(site_proposals):\n parcel_utils.run_feasibility(site_proposals,\n parcel_average_price,\n parcel_is_allowed,\n cfg='proforma.yaml',\n parcel_custom_callback=parcel_custom_callback,\n modify_costs=modifies_costs)\n formats_feasibility(site_proposals)\n\n\[email protected]()\ndef residential_developer(feasibility, households, buildings, parcels, year):\n target_vacancies = orca.get_table('target_vacancies').to_frame()\n new_buildings = parcel_utils.run_developer(\n \"residential\",\n households,\n buildings,\n 'residential_units',\n feasibility,\n parcels.developable_sqft,\n parcels.mean_sqft_per_unit,\n parcels.sum_residential_units,\n 'res_developer.yaml',\n year=year,\n target_vacancy = target_vacancies.reset_index(),\n form_to_btype_callback=None,\n add_more_columns_callback=add_extra_columns,\n profit_to_prob_func=scales_probability,\n custom_selection_func=custom_selection)\n\n\[email protected]()\ndef non_residential_developer(feasibility, jobs, buildings, parcels, year):\n target_vacancies = orca.get_table('target_vacancies').to_frame()\n new_buildings = parcel_utils.run_developer(\n [\"office\", \"retail\", \"industrial\"],\n jobs,\n buildings,\n 'job_spaces',\n feasibility,\n parcels.developable_sqft,\n parcels.mean_sqft_per_unit,\n parcels.sum_job_spaces,\n 'nonres_developer.yaml',\n year=year,\n target_vacancy=target_vacancies.reset_index(),\n form_to_btype_callback=None,\n add_more_columns_callback=add_extra_columns,\n profit_to_prob_func=scales_probability,\n custom_selection_func=custom_selection)\n\n\n#### Transition\n\ndef full_transition(agents, agent_controls, totals_column, year,\n location_fname, linked_tables=None,\n accounting_column=None, set_year_built=False):\n \"\"\"\n Run a transition model based on control totals specified in the usual\n UrbanSim way\n Parameters\n ----------\n agents : DataFrameWrapper\n Table to be transitioned\n agent_controls : DataFrameWrapper\n Table of control totals\n totals_column : str\n String indicating the agent_controls column to use for totals.\n year : int\n The year, which will index into the controls\n location_fname : str\n The field name in the resulting dataframe to set to -1 (to unplace\n new agents)\n linked_tables : dict, optional\n Sets the tables linked to new or removed agents to be updated with\n dict of {'table_name':(DataFrameWrapper, 'link_id')}\n accounting_column : str, optional\n Name of column with accounting totals/quantities to apply toward the\n control. If not provided then row counts will be used for accounting.\n set_year_built: boolean\n Indicates whether to update 'year_built' columns with current\n simulation year\n Returns\n -------\n Nothing\n \"\"\"\n ct = agent_controls.to_frame()\n agnt = agents.local\n print(\"Total agents before transition: {}\".format(len(agnt)))\n tran = transition.TabularTotalsTransition(ct, totals_column,\n accounting_column)\n updated, added, copied, removed = tran.transition(agnt, year)\n updated.loc[added, location_fname] = -1\n if set_year_built:\n updated.loc[added, 'year_built'] = year\n\n updated_links = {}\n if linked_tables:\n for table_name, (table, col) in linked_tables.iteritems():\n print('updating linked table {}'.format(table_name))\n updated_links[table_name] = \\\n update_linked_table(table, col, added, copied, removed)\n orca.add_table(table_name, updated_links[table_name])\n\n print(\"Total agents after transition: {}\".format(len(updated)))\n orca.add_table(agents.name, updated[agents.local_columns])\n return updated, added, copied, removed\n\n\[email protected]('household_transition')\ndef household_transition(households, annual_household_control_totals, year):\n full_transition(households, annual_household_control_totals,\n 'total_number_of_households', year, 'building_id')\n\n\[email protected]('job_transition')\ndef job_transition(jobs, annual_employment_control_totals, year):\n full_transition(jobs, annual_employment_control_totals,\n 'total_number_of_jobs', year, 'building_id')\n\n", "id": "491061", "language": "Python", "matching_score": 5.725887775421143, "max_stars_count": 2, "path": "bayarea/models.py" }, { "content": "import orca\nimport numpy as np\nimport pandas as pd\n\n\n# data documentation: https://berkeley.app.box.com/notes/282712547032\n\n\n# Set data directory\n\nd = './data/'\n\nif 'data_directory' in orca.list_injectables():\n d = orca.get_injectable('data_directory')\n\n\[email protected]('store', cache=True)\ndef hdfstore():\n return pd.HDFStore((d+\"model_data.h5\"),\n mode='r')\n\n\[email protected]('parcels', cache=True)\ndef parcels(store):\n df = store['parcels']\n df.index.name = 'parcel_id'\n return df\n\n\[email protected]('buildings', cache=True)\ndef buildings(store):\n df = store['buildings']\n return df\n\[email protected]('building_types', cache=True)\ndef buildings(store):\n df = store['building_types']\n return df\n\[email protected]('jobs', cache=True)\ndef jobs(store):\n df = store['jobs']\n return df\n\n\[email protected]('establishments', cache=True)\ndef establishments(store):\n df = store['establishments']\n return df\n\n\[email protected]('households', cache=True)\ndef households(store):\n df = store['households']\n return df\n\n\[email protected]('persons', cache=True)\ndef persons(store):\n df = store['persons']\n return df\n\n\[email protected]('rentals', cache=True)\ndef rentals(store):\n rentals = store['rentals']\n rentals.rent[rentals.rent < 100] = 100\n rentals.rent[rentals.rent > 10000] = 10000\n\n rentals.rent_sqft[rentals.rent_sqft < .2] = .2\n rentals.rent_sqft[rentals.rent_sqft > 50] = 50\n return rentals\n\n\[email protected]('units', cache=True)\ndef units(store):\n df = store['units']\n df.index.name = 'unit_id'\n return df\n\n\[email protected]('nodessmall', cache=True)\ndef nodessmall(store):\n df = store['nodessmall']\n df.index.name = 'osmid'\n return df\n\n\[email protected]('edgessmall', cache=True)\ndef edgessmall(store):\n df = store['edgessmall']\n return df\n\n\[email protected]('nodeswalk', cache=True)\ndef nodeswalk(store):\n df = store['nodeswalk']\n df.index.name = 'node_id_walk'\n return df\n\n\[email protected]('edgeswalk', cache=True)\ndef edgeswalk(store):\n df = store['edgeswalk']\n return df\n\n\[email protected]('nodesbeam', cache=True)\ndef nodesbeam(store):\n df = store['nodesbeam']\n df.index.name = 'node_id_beam'\n return df\n\n\[email protected]('edgesbeam', cache=True)\ndef edgesbeam(store):\n df = store['edgesbeam']\n return df\n\n\n# Broadcasts, a.k.a. merge relationships\n\n\norca.broadcast(\n 'parcels', 'buildings', cast_index=True, onto_on='parcel_id')\norca.broadcast(\n 'buildings', 'units', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'units', 'households', cast_index=True, onto_on='unit_id')\norca.broadcast(\n 'households', 'persons', cast_index=True, onto_on='household_id')\norca.broadcast(\n 'buildings', 'jobs', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'buildings', 'establishments', cast_index=True, onto_on='building_id')\norca.broadcast(\n 'nodeswalk', 'parcels', cast_index=True, onto_on='node_id_walk')\norca.broadcast(\n 'nodeswalk', 'craigslist', cast_index=True, onto_on='node_id_walk')\norca.broadcast(\n 'nodessmall', 'craigslist', cast_index=True, onto_on='node_id_small')\norca.broadcast(\n 'nodessmall', 'parcels', cast_index=True, onto_on='node_id_small')\norca.broadcast(\n 'nodesbeam', 'parcels', cast_index=True, onto_on='node_id_beam')\norca.broadcast(\n 'nodesbeam', 'craigslist', cast_index=True, onto_on='node_id_beam')\n \n# @orca.table('annual_employment_control_totals', cache=True)\n# def aect(store):\n # df = store['annual_employment_control_totals']\n # return df\n\n\n# @orca.table('annual_household_control_totals', cache=True)\n# def ahct(store):\n # df = store['annual_household_control_totals']\n # return df\n\n\n\n# def register_aggregation_table(table_name, table_id):\n # \"\"\"\n # Generator function for tables representing aggregate geography.\n # \"\"\"\n\n # @orca.table(table_name, cache=True)\n # def func(parcels):\n # geog_ids = parcels[table_id].value_counts().index.values\n # df = pd.DataFrame(index=geog_ids)\n # df.index.name = table_id\n # return df\n\n # return func\n\n\n# # aggregate_geos = {'zonings': 'zoning_id',\n # # 'locations': 'location_id',\n # # 'block_groups': 'block_group_id',\n # # 'blocks': 'block_id',\n # # 'zones': 'zone_id',\n # # 'plans': 'plan_id',\n # # 'zone_districts': 'zone_district_id',\n # # 'zone_subdistricts': 'zone_subdistrict_id'}\n# # orca.add_injectable('aggregate_geos', aggregate_geos)\n\n\n# aggregate_geos = {'blocks': 'block_id'}\n# orca.add_injectable('aggregate_geos', aggregate_geos)\n\n# for geog in aggregate_geos.items():\n # register_aggregation_table(geog[0], geog[1])\n\n\n# @orca.injectable('year')\n# def year():\n # default_year = 2010\n # try:\n # iter_var = orca.get_injectable('iter_var')\n # if iter_var is not None:\n # return iter_var\n # else:\n # return default_year\n # except:\n # return default_year\n\n# @orca.table('building_types', cache=True)\n\n \n# @orca.table('plan_types', cache=True)\n# def plan_types():\n # df = pd.read_csv('./data/plan_types.csv').set_index('plan_id')\n # return df\n\n\n# @orca.table('zone_types', cache=True)\n# def zone_types():\n # df = pd.read_csv('./data/zone_types.csv').set_index('zoning_id')\n # return df\n\n \n# @orca.table('plan_compatible_zones', cache=True)\n# def plan_compatible_zones():\n # df = pd.read_csv('./data/plan_compatible_zones.csv').\\\n # set_index('plan_zone_id')\n # return df\n\n\n\n# @orca.table('allowable_building_types', cache=True)\n# def allowable_building_types():\n # df = pd.read_csv('./data/allowable_building_types.csv').\\\n # set_index('zoning_building_id')\n # return df\n\n \n# @orca.table('building_sqft_per_job', cache=True)\n# def building_sqft_per_job():\n # df = pd.read_csv('./data/bsqft_per_job.csv')\n # return df\n\n\n# @orca.table('zone_overlay_types', cache=True)\n# def zone_overlay_types():\n # df = pd.read_csv('./data/zone_overlay_types.csv')\n # return df\n\n\n# @orca.table('site_proposals', cache=False)\n# def site_proposals(parcels, zone_types, plan_compatible_zones):\n # # Prepares input files\n # parcelsdf = parcels.local.reset_index()\n # zone_typesdf = zone_types.to_frame().reset_index()\n # plan_compatible_zonesdf = plan_compatible_zones.to_frame()\n\n # # Identifies parcel location (\"status_ugb\")\n # parcelsdf = defines_location(parcelsdf)\n\n # # Creates possible parcel_zoning combinations\n # site_proposals = creates_site_proposals\\\n # (parcelsdf, plan_compatible_zonesdf, zone_typesdf)\n\n # #Calculates rezoning costs if applicable\n # site_proposals = rezoning_costs(site_proposals)\n\n # # Calculates overlay costs if applicable\n # site_proposals = overlay_costs(site_proposals)\n\n # # Formats output\n # site_proposals = formats_site_proposals(site_proposals)\n\n # return site_proposals\n\n\n# def defines_location(parcelsdf):\n # parcelsdf.loc[parcelsdf['city'].notnull(),'status_ugb'] = 'within_city'\n # parcelsdf.loc[(parcelsdf['city'].isnull()) &\n # (parcelsdf['ugb'].notnull()),'status_ugb'] = 'within_ugb'\n # parcelsdf.loc[(parcelsdf['city'].isnull()) &\n # (parcelsdf['ugb'].isnull()),'status_ugb'] = 'outside_ugb'\n # return parcelsdf\n\n\n# def creates_site_proposals(parcelsdf, plan_compatible_zonesdf, zone_typesdf):\n # # parcels without zoning_id are removed from site_proposals\n # parcelsdf[['zoning_id', 'plan_id']] = \\\n # parcelsdf[['zoning_id', 'plan_id']].fillna(value=0)\n # parcelsdf = parcelsdf[parcelsdf['zoning_id'] != 0]\n\n # # Identifies valid plan_zoning combinations existing in parcels table but\n # # missing in plan_compatible_zones table. This ensures that all existing\n # # parcel-zone combinations are also included in site_proposals\n # missing_plan_zoning_combinations = \\\n # missing_plan_zone_comb(parcelsdf, plan_compatible_zonesdf)\n\n # # Merges plan_compatible_zones table to parcels table to create\n # # all potential parcel_zoning combinations\n # plan_compatible_zonesdf = plan_compatible_zonesdf[\n # ['plan_id', 'zoning_id', 'cost_in_city',\n # 'cost_in_ugb', 'cost_outside_ugb']]\n # plan_compatible_zonesdf = plan_compatible_zonesdf.rename(\n # columns={'zoning_id': 'potential_zoning_id',\n # 'cost_in_city': 'cost_in_city_',\n # 'cost_in_ugb': 'cost_in_ugb_',\n # 'cost_outside_ugb': 'cost_outside_ugb_'})\n\n # site_proposals = pd.merge(\n # parcelsdf, plan_compatible_zonesdf, on='plan_id', how='left')\n\n # # Parcels that have zoning_id information but no plan_id information\n # # are only represented with original zoning_id\n # site_proposals.loc[(site_proposals.plan_id == 0) &\n # (site_proposals.zoning_id != 0),\n # 'potential_zoning_id'] = site_proposals['zoning_id']\n\n # # Parcels that have a plan_id that doesn't exist in the\n # # plan_compatible_zones table and Plans with zoning_id = 0 in the\n # # plan_compatible_zones table can be identified with null and zero\n # # 'potential_zoning_id`, respectively. This variable is filled with\n # # `zoning_id` in these cases, to represent the original zoning_id only\n # site_proposals.loc[site_proposals.potential_zoning_id.isnull(),\n # 'potential_zoning_id'] = site_proposals['zoning_id']\n # site_proposals.loc[site_proposals.potential_zoning_id == 0,\n # 'potential_zoning_id'] = site_proposals['zoning_id']\n\n # # Appends missing plan_zoning combinations to the site_proposals table\n # site_proposals = \\\n # site_proposals.append(missing_plan_zoning_combinations).reset_index()\n # site_proposals.loc[site_proposals.missing == 1, 'potential_zoning_id'] = \\\n # site_proposals['zoning_id']\n # site_proposals.drop(columns=['missing'], inplace = True)\n\n # # Removes site proposals that would require rezoning but have\n # # can_rezone==True\n # zone_typesdf = \\\n # zone_typesdf.rename(columns={'zoning_id': 'potential_zoning_id'})\n # site_proposals = pd.merge(\n # site_proposals, zone_typesdf, on = 'potential_zoning_id', how = 'left')\n # site_proposals['remove'] = 0\n # site_proposals.loc[(site_proposals['zoning_id']!=\n # site_proposals['potential_zoning_id']) &\n # (site_proposals['can_rezone']==0), 'remove'] = 1\n # site_proposals = site_proposals[site_proposals['remove'] == 0]\n\n # return site_proposals\n\n\n# def missing_plan_zone_comb(parcelsdf, plan_compatible_zonesdf):\n # possible = plan_compatible_zonesdf[['plan_id', 'zoning_id']].copy()\n # possible = possible[possible['plan_id'] != 0]\n # possible = possible[possible['zoning_id'] != 0]\n # possible['represented'] = 1\n # actual = parcelsdf[parcelsdf['plan_id'] != 0].copy()\n # actual = actual.merge(possible, on=['plan_id', 'zoning_id'], how='left')\n # missing = actual[(actual['represented'] != 1)].copy()\n # missing = missing[missing['zoning_id'] != 0]\n # missings = missing[missing['plan_id'] != 0]\n # missing = missing.drop(columns=['represented']).copy()\n # missing['potential_zoning_id'] = missing['zoning_id']\n # missing['cost_in_city_'] = 0\n # missing['cost_in_ugb_'] = 0\n # missing['cost_outside_ugb_'] = 0\n # missing['missing'] = 1\n # return missing\n\n# def rezoning_costs(site_proposals):\n # # Identifies combinations that imply rezoning\n # site_proposals.loc[site_proposals.zoning_id !=\n # site_proposals.potential_zoning_id, 'rezoning'] = 1\n # site_proposals.loc[site_proposals['rezoning'] != 1, 'rezoning_cost'] = 0\n\n # # Includes column with rezoning_cost (considering status_ugb)\n # site_proposals.loc[(site_proposals['rezoning'] == 1) &\n # (site_proposals['status_ugb'] == 'within_city'),\n # 'rezoning_cost'] = site_proposals['cost_in_city_']\n # site_proposals.loc[(site_proposals['rezoning'] == 1) &\n # (site_proposals['status_ugb'] == 'within_ugb'),\n # 'rezoning_cost'] = site_proposals['cost_in_ugb_']\n # site_proposals.loc[\n # (site_proposals['rezoning'] == 1) &\n # (site_proposals['status_ugb'] == 'outside_ugb'), 'rezoning_cost'] = \\\n # site_proposals['cost_outside_ugb_']\n # site_proposals = \\\n # site_proposals.drop(columns=['cost_in_city_', 'cost_in_ugb_',\n # 'cost_outside_ugb_', 'rezoning'])\n # return site_proposals\n\n\n# def overlay_costs(site_proposals):\n\n # # Includes column with overlay_cost\n # # (considering location in relation to ugb)\n # overlays = orca.get_table('zone_overlay_types').to_frame()\n # overlays = overlays[['overlay_id', 'annexed_overlay_id',\n # 'overlay_combination' , 'cost_in_city', 'cost_in_ugb',\n # 'cost_outside_ugb']].copy()\n # overlays = overlays.rename(columns={'cost_in_city': 'cost_in_city_',\n # 'cost_in_ugb': 'cost_in_ugb_',\n # 'cost_outside_ugb':\n # 'cost_outside_ugb_'})\n\n # site_proposals.loc[site_proposals.overlay_id.isnull(), 'overlay_id'] = '-1'\n # site_proposals['overlay_id'] = \\\n # site_proposals['overlay_id'].astype(float).astype(int)\n # site_proposals = \\\n # pd.merge(site_proposals, overlays, on='overlay_id', how = 'left')\n # site_proposals.loc[site_proposals['status_ugb'] == 'within_city',\n # 'overlay_cost'] = site_proposals['cost_in_city_']\n # site_proposals.loc[site_proposals['status_ugb'] == 'within_ugb',\n # 'overlay_cost'] = site_proposals['cost_in_ugb_']\n # site_proposals.loc[site_proposals['status_ugb'] == 'outside_ugb',\n # 'overlay_cost'] = site_proposals['cost_outside_ugb_']\n # site_proposals = site_proposals.drop\\\n # (columns=['cost_in_city_', 'cost_in_ugb_', 'cost_outside_ugb_'])\n\n # return site_proposals\n\n# def formats_site_proposals(site_proposals):\n # # Removes irrelevant fields and renames \"potential_zoning_id\" to\n # # \"parcel_zoning_id_combination\", unique to each combination in the table\n # site_proposals['parcel_zoning_id_combination'] = \\\n # site_proposals['parcel_id'].astype(int).astype(str) + \"_\" + \\\n # site_proposals['potential_zoning_id'].astype(int).astype(str)\n # site_proposals = site_proposals.rename\\\n # (columns={'zoning_id': \"original_zoning_id\"})\n\n # # Reorders columns to have newly created columns at the beggining.\n # ordered_columns = ['parcel_zoning_id_combination', 'parcel_id',\n # 'primary_id', 'zone_id','x', 'y','block_group_id',\n # 'block_id', 'zone_district_id','zone_subdistrict_id',\n # 'location_id','city', 'ugb','status_ugb','plan_id',\n # 'overlay_id', 'annexed_overlay_id','original_zoning_id',\n # 'zoning_name','potential_zoning_id','can_rezone',\n # 'rezoning_cost', 'overlay_cost', 'land_value', 'acres',\n # 'proportion_undevelopable','Shape_Length', 'Shape_Area',\n # 'max_far','placeholder_max_far', 'max_dua',\n # 'placeholder_max_dua','min_far', 'min_dua',\n # 'max_height', 'min_front_setback','max_front_setback',\n # 'rear_setback','side_setback','coverage', 'OBJECTID']\n\n\n # site_proposals = site_proposals.reindex(columns=ordered_columns)\n # return site_proposals\n\n# # @orca.table('target_vacancies', cache=True)\n# # def target_vacancies():\n # # vacancies = pd.read_csv('./data/target_vacancies.csv').\\\n # # set_index('building_type_id')\n # # return vacancies\n\n\n# # Dictionary of variables to generate output indicators and charts\n# def creates_main_dicts():\n # dict = {'total': {'households': 'Total households',\n # 'jobs': 'Total jobs'},\n # 'sum': {\n # 'residential_units': 'Total residential units in buildings',\n # 'residential_sqft':\n # 'Total residential area in buildings (sqft)',\n # 'non_residential_sqft':\n # 'Total non residential sqft in buildings',\n # 'job_spaces': 'Total job spaces in buildings',\n # 'residential_units': 'Total number of residential units',\n # 'acres': 'Total area (acres)',\n # 'persons': 'Total persons in households',\n # 'workers': 'Total workers in households',\n # 'children': 'Total children in households',\n # 'cars': 'Total vehicles in households',\n # 'income': 'Total annual income from households',\n # 'recent_mover':\n # 'Total households that moved within last 5 yrs'},\n # 'mean': {\n # 'non_residential_sqft':\n # 'Average non residential sqft in buildings',\n # 'sqft_per_unit': 'Average area per residential unit in sqft',\n # 'sqft_per_unit_ave_800_linear':\n # 'Average area per residential unit in sqft within 800m '\n # 'along the auto street network (using flat decay)',\n # 'job_spaces': 'Average job spaces in buildings',\n # 'year_built': 'Average year of construction of buildings',\n # 'sector_id': 'Average job sector id',\n # 'acres': 'Average parcel area (acres)',\n # 'persons': 'Average persons in households',\n # 'workers': 'Average workers in households',\n # 'children': 'Average children in households',\n # 'cars': 'Average vehicles in households',\n # 'income': 'Average household annual income',\n # 'age_of_head': 'Average age of the household head',\n # 'x': 'Average x coordinate of parcels',\n # 'y': 'Average y coordinate of parcels',\n # 'value_per_unit': 'Average assessed value per unit',\n # 'value_per_sqft': 'Average assessed value per sqft of area'},\n # 'median': {\n # 'building_type_id': 'Median building type id',\n # 'income_quartile': 'Median income quartile',\n # 'tenure': 'Median tenure code of households',\n # 'race_of_head': 'Median race code of head of household',\n # 'sector_id': 'Median job sector id'},\n # 'other': {'density_buildings': 'Density of buildings',\n # 'density_households': 'Density of households',\n # 'density_jobs': 'Density of jobs',\n # 'ratio_jobs_to_households': 'Job-housing balance',\n # 'ratio_workers_to_persons': 'Ratio of workers to persons',\n # 'ratio_households_to_residential_units':\n # 'Residential occupancy rate',\n # 'residential_vacancy_rate':\n # 'Total residential vacancy rate',\n # 'non_residential_vacancy_rate':\n # 'Total non residential vacancy rate',\n # 'remaining_nonresidential_sqft_capacity':\n # 'Total remaining non residential sqft capacity',\n # 'remaining_residential_unit_capacity':\n # 'Total remaining residential unit capacity',\n # 'ave_annual_rent_sqft_400m':'Average annual rent per sqft '\n # 'within 400m along the auto street network (flat decay)',\n # 'ave_annual_office_rent_sqft_800m':'Average annual office '\n # 'rent per sqft within 800m along the auto street network '\n # '(using flat decay)',\n # 'ave_annual_industrial_rent_sqft_800m':'Average annual '\n # 'industrial rent per sqft within 800m along the auto '\n # 'street network (using flat decay)'}}\n # custom_dict = {'jobs_sector_id':\n # {'data_name': 'Total jobs',\n # 'aggregation_name': 'sector id'},\n # 'households_income_quartile':\n # {'data_name': 'Total households',\n # 'aggregation_name': 'income quartile'},\n # 'households_age_of_head_quartile':\n # {'data_name': 'Total households',\n # 'aggregation_name': 'age of head quartile'},\n # 'households_recent_mover_income':\n # {'data_name': 'Total households that moved within last'\n # ' 5 years',\n # 'aggregation_name': 'income quartile (1 = lowest '\n # 'quartile, 2 = all others)'},\n # 'buildings_repm_id':\n # {'data_name': 'Total buildings',\n # 'aggregation_name': 'representative building type'}}\n # prop_vars = {'households': ['persons', 'race_of_head', 'workers',\n # 'children','cars', 'tenure', 'recent_mover',\n # 'income_quartile'],\n # 'jobs': ['sector_id'],\n # 'buildings': ['building_type_id']}\n # uses = ['retail', 'industrial','sf_detached', 'duplex_townhome',\n # 'multifamily', 'office']\n # return dict, custom_dict, prop_vars, uses\n\n\n# def adds_dict_proportions(prop_vars, dict):\n # prop = {}\n # for agent in prop_vars:\n # vars = prop_vars[agent]\n # agents = orca.get_table(agent)\n # for var in vars:\n # agents_by_cat = agents[var].value_counts()\n # cats_to_measure = agents_by_cat[agents_by_cat > 500].index.values\n # for cat in cats_to_measure:\n # new_var = var + '_' + str(cat)\n # desc = 'Proportion of ' + agent + ' with ' + var + \\\n # ' equal to ' + str(cat)\n # prop[new_var] = desc\n # dict['prop'] = prop\n # return dict\n\n# def adds_derived_vars_dict(dict, uses):\n # new_dict = {}\n # derived_vars = {'total': ['households', 'jobs'],\n # 'sum': dict['sum'].keys(),\n # 'mean': dict['mean'].keys(),\n # 'median': dict['median'].keys(),\n # 'prop': dict['prop'].keys()}\n # for agg in ['total', 'sum', 'mean', 'median', 'prop','other']:\n # for var in dict[agg]:\n # if agg != 'other':\n # new_var = agg + '_' + var\n # else:\n # new_var = var\n # new_dict[new_var] = dict[agg][var]\n # for use in uses:\n # var = 'mean_pred_' + use + '_price'\n # new_var = var + '_ave_800_linear'\n # new_dict[var] = 'Average predicted ' + use + ' price per sqft'\n # method =' within 800m along the auto street network (using flat decay)'\n # new_dict[new_var] = new_dict[var] + method\n\n # for dist in [500, 1000, 1500]:\n # for method in ['linear', 'flat']:\n # for agg in ['total', 'sum', 'mean', 'prop']:\n # for var in derived_vars[agg]:\n # new_var = agg + '_' + var + '_ave_' + str(\n # dist) + '_' + method\n # desc = 'Log of average within ' + str(dist/1000) + \\\n # 'km along the auto street network (' + method + \\\n # ' decay) of: ' + \\\n # dict[agg][var].strip('Log of ').capitalize()\n # new_dict[new_var] = desc\n\n # new_var = 'without_log_' + new_var\n # desc = 'Average within ' + str(dist / 1000) + \\\n # 'km along the auto street network (' + method + \\\n # ' decay) of: ' + dict[agg][var]\n # new_dict[new_var] = desc\n # for agg in ['total', 'sum']:\n # for var in derived_vars[agg]:\n # new_var = agg + '_' + var + '_sum_' + str(\n # dist) + '_' + method\n # desc = 'Log of sum within ' + str(dist/1000) + \\\n # 'km along the auto street network (' + method + \\\n # ' decay) of: ' + \\\n # dict[agg][var].strip('Log of ').capitalize()\n # new_dict[new_var] = desc\n\n # new_var = 'without_log_' + new_var\n # desc = 'Sum within ' + str(dist / 1000) + \\\n # 'km along the auto street network (' + method + \\\n # ' decay) of: ' + dict[agg][var]\n # new_dict[new_var] = desc\n # return new_dict\n\n# @orca.injectable('dictionary')\n# def dictionary():\n # new_dict = {}\n # dict, custom_dict, prop_vars, uses = creates_main_dicts()\n # dict = adds_dict_proportions(prop_vars, dict)\n # new_dict = adds_derived_vars_dict(dict, uses)\n # full_dict = {'var_dict': new_dict}\n # full_dict['custom_var_dict'] = custom_dict\n # return full_dict\n\n\n", "id": "4201963", "language": "Python", "matching_score": 7.19540548324585, "max_stars_count": 2, "path": "bayarea/datasources.py" }, { "content": "import copy\nimport orca\nimport numpy as np\nimport pandas as pd\n\nfrom urbansim.utils import misc\nfrom variable_generators import generators\n\nfrom bayarea import datasources\n\n###########################\n# walk network vars #\n###########################\[email protected]('parcels')\ndef node_id_walk(parcels, netwalk):\n idswalk_parcel = netwalk.get_node_ids(parcels.x, parcels.y)\n return idswalk_parcel\n\n\[email protected]('rentals')\ndef node_id_walk(rentals, netwalk):\n idswalk_rentals = netwalk.get_node_ids(rentals.longitude, rentals.latitude)\n return idswalk_rentals\n\n\[email protected]('buildings')\ndef node_id_walk(parcels, buildings):\n return misc.reindex(parcels.node_id_walk, buildings.parcel_id)\n\n\[email protected]('units')\ndef node_id_walk(buildings, units):\n return misc.reindex(buildings.node_id_walk, units.building_id)\n\n\[email protected]('households')\ndef node_id_walk(units, households):\n return misc.reindex(units.node_id_walk, households.unit_id)\n\n\[email protected]('persons')\ndef node_id_walk(households, persons):\n return misc.reindex(households.node_id_walk, persons.household_id)\n\n\[email protected]('jobs')\ndef node_id_walk(buildings, jobs):\n return misc.reindex(buildings.node_id_walk, jobs.building_id)\n\n\n###########################\n# beam network vars #\n###########################\[email protected]('parcels')\ndef node_id_beam(parcels, netbeam):\n idsbeam_parcel = netbeam.get_node_ids(parcels.x, parcels.y)\n return idsbeam_parcel\n\n\[email protected]('rentals')\ndef node_id_beam(rentals, netbeam):\n idsbeam_rentals = netbeam.get_node_ids(\n rentals.longitude, rentals.latitude)\n return idsbeam_rentals\n\n\[email protected]('buildings')\ndef node_id_beam(parcels, buildings):\n return misc.reindex(parcels.node_id_beam, buildings.parcel_id)\n\n\[email protected]('jobs')\ndef node_id_beam(buildings, jobs):\n return misc.reindex(buildings.node_id_beam, jobs.building_id)\n\n\n\n# ## BEAM ##\n\n# @orca.column('buildings')\n# def all_buildings(buildings):\n # return pd.Series(np.ones(len(buildings)).astype('int32'), index=buildings.index)\n\n# @orca.column('parcels')\n# def node_id_beam(parcels, netbeam):\n # idsbeam_parcel = netbeam.get_node_ids(parcels.x, parcels.y)\n # return idsbeam_parcel\n\n\n# @orca.column('rentals')\n# def node_id_beam(rentals, netbeam):\n # idsbeam_rentals = netbeam.get_node_ids(\n # rentals.longitude, rentals.latitude)\n # return idsbeam_rentals\n\n\n# @orca.column('buildings')\n# def node_id_beam(parcels, buildings):\n # return misc.reindex(parcels.node_id_beam, buildings.parcel_id)\n\n\n# @orca.column('jobs')\n# def node_id_beam(buildings, jobs):\n # return misc.reindex(buildings.node_id_beam, jobs.building_id)\n\n# ## WALK ##\n\n# @orca.column('parcels')\n# def node_id_walk(parcels, netwalk):\n # idswalk_parcel = netwalk.get_node_ids(parcels.x, parcels.y)\n # return idswalk_parcel\n\n\n# @orca.column('rentals')\n# def node_id_walk(rentals, netwalk):\n # idswalk_rentals = netwalk.get_node_ids(\n # rentals.longitude, rentals.latitude)\n # return idswalk_rentals\n\n\n# @orca.column('buildings')\n# def node_id_walk(parcels, buildings):\n # return misc.reindex(parcels.node_id_walk, buildings.parcel_id)\n\n\n# @orca.column('jobs')\n# def node_id_walk(buildings, jobs):\n # return misc.reindex(buildings.node_id_walk, jobs.building_id)\n\n# def agg_var_building_type(geography, geography_id, var, buildingtype):\n\n # \"\"\"\n # Register parcel or zone variable by building type with orca.\n # Parameters\n # ----------\n # geography: str\n # Name of the larger geography to summarize the building variable ('parcels' or 'zones')\n # geography_id: str\n # Unique identifier of the geography used to summarize ('parcel_id' or 'zone_id)\n # var: str\n # Variable that will be aggregated ('residential_units' or 'non_residential_sqft')\n # buildingtype: int\n # Numeric code for the building type stored in building_type_id\n\n # Returns\n # -------\n # func : function\n # \"\"\"\n # var_name = 'sum_' + var + '_' + str(buildingtype)\n # @orca.column(geography, var_name, cache=True, cache_scope='step')\n # def func():\n # buildings = orca.get_table('buildings').to_frame(['building_type_id','parcel_id', 'residential_units', 'non_residential_sqft'])\n # parcel_zones = orca.get_table('parcels').to_frame(['parcel_id', 'zone_id'])\n # buildings = pd.merge(buildings, parcel_zones, on='parcel_id', how='left')\n # buildings_iter = buildings[buildings['building_type_id'] == buildingtype].copy()\n # values = buildings_iter[var].groupby(buildings_iter[geography_id]).sum().fillna(0)\n # locations_index = orca.get_table(geography).index\n # series = pd.Series(data=values, index=locations_index)\n # series = series.fillna(0)\n # return series\n # return func\n\n# btype_columns = ['building_type_id', 'is_residential', 'is_non_residential']\n# btypes = orca.get_table('building_types').to_frame(btype_columns).reset_index()\n# res_types = btypes[btypes['is_residential'] == True].building_type_id\n# nonres_types = btypes[btypes['is_non_residential'] == True].building_type_id\n# geographic_types = ['parcels', 'zones']\n# vars = ['residential_units', 'non_residential_sqft']\n\n# for geography in geographic_types:\n # if geography == 'parcels':\n # geography_id = 'parcel_id'\n # else:\n # geography_id = 'zone_id'\n # for var in vars:\n # if var == 'residential_units':\n # for buildingtype in res_types:\n # agg_var_building_type(geography, geography_id, var, buildingtype)\n # else:\n # for buildingtype in nonres_types:\n # agg_var_building_type(geography, geography_id, var, buildingtype)\n\n\n# @orca.column('zones', cache=True, cache_scope='step')\n# def residential_vacancy_rate(zones):\n # zones = zones.to_frame(['zone_id', 'total_households', 'sum_residential_units'])\n # zones['residential_vacancy_rate'] = 1 - zones['total_households'] / zones['sum_residential_units']\n # zones.loc[zones['residential_vacancy_rate'] < 0, 'residential_vacancy_rate'] = 0\n # zones = zones.fillna(0)\n # return zones.residential_vacancy_rate\n\n# @orca.column('zones', cache=True, cache_scope='step')\n# def non_residential_vacancy_rate(zones):\n # zones = zones.to_frame(['zone_id', 'total_jobs', 'sum_job_spaces'])\n # zones['non_residential_vacancy_rate'] = 1 - zones['total_jobs'] / zones['sum_job_spaces']\n # zones.loc[zones['non_residential_vacancy_rate'] < 0, 'non_residential_vacancy_rate'] = 0\n # zones = zones.fillna(0)\n # return zones.non_residential_vacancy_rate\n\n# @orca.column('parcels', cache=True, cache_scope='step')\n# def remaining_residential_unit_capacity(parcels):\n # parcels=calc_remaining_residential_capacity()\n # parcels.loc[parcels['remaining_residential_unit_capacity'] < 0, 'remaining_residential_unit_capacity'] = 0\n # parcels = parcels.fillna(0)\n # return parcels.remaining_residential_unit_capacity\n\n\n# @orca.column('zones', cache=True, cache_scope='step')\n# def remaining_residential_unit_capacity(zones):\n # parcels = calc_remaining_residential_capacity()\n # zones = parcels['remaining_residential_unit_capacity'].groupby(parcels['zone_id']).sum().reset_index()\n # zones.loc[zones['remaining_residential_unit_capacity'] < 0, 'remaining_residential_unit_capacity'] = 0\n # zones = zones.fillna(0)\n # return zones.remaining_residential_unit_capacity\n\n\n# @orca.column('parcels', cache=True, cache_scope='step')\n# def remaining_nonresidential_sqft_capacity(parcels):\n # parcels=calc_remaining_nonresidential_capacity()\n # parcels.loc[parcels['remaining_nonresidential_sqft_capacity'] < 0, 'remaining_nonresidential_sqft_capacity'] = 0\n # parcels = parcels.fillna(0)\n # return parcels.remaining_nonresidential_sqft_capacity\n\n\n# @orca.column('zones', cache=True, cache_scope='step')\n# def remaining_nonresidential_sqft_capacity(zones):\n # parcels = calc_remaining_nonresidential_capacity()\n # zones = parcels['remaining_nonresidential_sqft_capacity'].groupby(parcels['zone_id']).sum().reset_index()\n # zones.loc[zones['remaining_nonresidential_sqft_capacity']<0,'remaining_nonresidential_sqft_capacity']=0\n # zones = zones.fillna(0)\n # return zones.remaining_nonresidential_sqft_capacity\n\n\n# def calc_remaining_residential_capacity():\n # parcels = orca.get_table('parcels').to_frame(['parcel_id', 'zone_id', 'zoning_id', 'acres', 'sum_residential_units'])\n # zone_types = orca.get_table('zone_types').to_frame(['zoning_id', 'max_dua'])\n # allowable_buildings = orca.get_table('allowable_building_types').to_frame(['zoning_id','building_type_id'])\n # df = pd.merge(zone_types, allowable_buildings, on='zoning_id', how='left')\n # btype_columns = ['building_type_id','is_residential']\n # btypes = orca.get_table('building_types').to_frame(btype_columns).reset_index()\n # res_types = btypes[btypes['is_residential'] == True].building_type_id\n # df.loc[df.building_type_id.isin(res_types), 'residential'] = 1\n # df = df['residential'].groupby(df['zoning_id']).sum().reset_index()\n # zone_types = pd.merge(zone_types, df, on='zoning_id', how='left').fillna(0)\n # parcels = pd.merge(parcels, zone_types, on='zoning_id', how='left')\n # parcels.loc[parcels.residential>0,'remaining_residential_unit_capacity'] = parcels['acres']*parcels['max_dua'] \\\n # - parcels['sum_residential_units']\n # parcels=parcels.fillna(0)\n # return parcels\n\n# def calc_remaining_nonresidential_capacity():\n # parcels = orca.get_table('parcels').to_frame(['parcel_id', 'zone_id', 'zoning_id', 'acres', 'sum_non_residential_sqft'])\n # zone_types = orca.get_table('zone_types').to_frame(['zoning_id', 'max_far'])\n # allowable_buildings = orca.get_table('allowable_building_types').to_frame(['zoning_id', 'building_type_id'])\n # df = pd.merge(zone_types, allowable_buildings, on='zoning_id', how='left')\n # btype_columns = ['building_type_id', 'is_non_residential']\n # btypes = orca.get_table('building_types').to_frame(btype_columns).reset_index()\n # nonres_types = btypes[btypes['is_non_residential'] == True].building_type_id\n # df.loc[df.building_type_id.isin(nonres_types), 'non_residential'] = 1\n # df = df['non_residential'].groupby(df['zoning_id']).sum().reset_index()\n # zone_types = pd.merge(zone_types, df, on='zoning_id', how='left').fillna(0)\n # parcels = pd.merge(parcels, zone_types, on='zoning_id', how='left')\n # parcels.loc[parcels.non_residential > 0, 'remaining_nonresidential_sqft_capacity'] = parcels['acres'] \\\n # *43560 * parcels['max_far'] - parcels['sum_non_residential_sqft']\n # parcels = parcels.fillna(0)\n\n # return parcels\n\n# @orca.column('buildings', cache=True)\n# def repm_id(buildings):\n # buildings = orca.get_table('buildings').to_frame(['building_type_id'])\n # buildings['repm_id'] = 'na'\n\n # # Retail\n # retail_btypes = [4200, 4210, 4220, 4230, 4240, 4250, 4260, 4290, 4300, 4310]\n # buildings.repm_id[buildings.building_type_id.isin(retail_btypes)] = 'retail'\n\n # # Industrial\n # industrial_btypes = [5100, 5200]\n # buildings.repm_id[buildings.building_type_id.isin(industrial_btypes)] = 'industrial'\n\n # # Office\n # buildings.repm_id[buildings.building_type_id == 4100] = 'office'\n\n # # Residential\n # buildings.repm_id[buildings.building_type_id == 1110] = 'res_sf_detached'\n # duplex_townhome_btypes = [1121, 1122]\n # buildings.repm_id[buildings.building_type_id.isin(duplex_townhome_btypes)] = 'duplex_townhome'\n # mf_btypes = [1210, 1220]\n # buildings.repm_id[buildings.building_type_id.isin(mf_btypes)] = 'multifamily'\n\n # # Educational\n # educ_btypes = [6110, 6120, 6130, 6140, 6150, 6160]\n # buildings.repm_id[buildings.building_type_id.isin(educ_btypes)] = 'educational'\n\n # # Other\n # other_btypes = [4320, 4400, 4900, 6300, 6400, 8000, 9000, -1]\n # buildings.repm_id[buildings.building_type_id.isin(other_btypes)] = 'other'\n\n # return buildings.repm_id\n\n\n# @orca.column('buildings', cache=True)\n# def job_spaces(buildings):\n # df_per_job = orca.get_table('building_sqft_per_job').to_frame().set_index('building_type_id')\n # sqft_per_job = orca.get_table('buildings').building_type_id.map(df_per_job.area_per_job)\n # spaces = (buildings.non_residential_sqft / sqft_per_job).astype('int')\n # spaces.fillna(0)\n # return spaces\n\n\[email protected]('households', cache=True)\ndef income_quartile(households):\n s = pd.Series(pd.qcut(households.income, 4, labels=False),\n index=households.index)\n # e.g. convert income quartile from 0-3 to 1-4\n s = s.add(1)\n return s\n\n\[email protected]('households', cache=True)\ndef age_of_head_quartile(households):\n s = pd.Series(pd.qcut(households.age_of_head, 4, labels=False),\n index=households.index)\n s = s.add(1)\n return s\n\n\[email protected]('households', cache=True)\ndef recent_mover_income(households):\n households = orca.get_table('households').to_frame(['recent_mover', 'income_quartile'])\n households['recent_mover_income'] = 0\n households[(households.recent_mover == 1) & (households.income_quartile == 1)] = 1\n households[(households.recent_mover == 1) & (households.income_quartile > 1)] = 2\n return households.recent_mover_income\n\n\[email protected]('buildings', cache=True)\ndef sqft_per_unit(buildings):\n residential_sqft = buildings.residential_sqft\n residential_units = buildings.residential_units\n sqft_per_unit = residential_sqft / residential_units\n return sqft_per_unit.fillna(0).replace(np.inf, 0)\n\n\[email protected]('buildings', cache=True)\ndef value_per_unit(buildings):\n improvement_value = buildings.improvement_value\n residential_units = buildings.residential_units\n value_per_unit = improvement_value / residential_units\n return value_per_unit.fillna(0).replace(np.inf, 0)\n\n\[email protected]('buildings', cache=True)\ndef value_per_sqft(buildings):\n improvement_value = buildings.improvement_value\n non_residential_sqft = buildings.non_residential_sqft\n value_per_sqft = improvement_value / non_residential_sqft\n return value_per_sqft.fillna(0).replace(np.inf, 0)\n\n\[email protected]('households', cache=False)\ndef parcel_id(households, buildings):\n return misc.reindex(buildings.parcel_id, households.building_id)\n\n\[email protected]('jobs', cache=False)\ndef parcel_id(jobs, buildings):\n return misc.reindex(buildings.parcel_id, jobs.building_id)\n\n\n# geographic_levels = [('parcels', 'parcel_id')]\n# # Define parcel -> agent/building disaggregation vars\n# for base_geography in ['households', 'jobs', 'buildings']:\n # for geography in geographic_levels:\n # geography_name = geography[0]\n # geography_id = geography[1]\n # if geography_name != base_geography:\n # for var in orca.get_table(geography_name).columns:\n # generators.make_disagg_var(geography_name, base_geography, var,\n # geography_id, name_based_on_geography=False)\n\n# # Generate variables to serve as a pool of variables for location\n# # choice model to select from\n\n# aggregation_functions = ['mean', 'median', 'std', 'sum']\n\n# geographic_levels = copy.copy(orca.get_injectable('aggregate_geos'))\n# geographic_levels['parcels'] = 'parcel_id'\n\n# variables_to_aggregate = {\n # 'households': ['persons', 'income', 'race_of_head', 'age_of_head',\n # 'workers', 'children', 'cars', 'hispanic_head', 'tenure',\n # 'recent_mover', 'income_quartile'],\n # 'jobs': ['sector_id'],\n # 'parcels': ['acres', 'x', 'y', 'land_value', 'proportion_undevelopable'],\n # 'buildings': ['building_type_id', 'residential_units', 'non_residential_sqft', 'year_built', \n # 'value_per_unit', 'sqft_per_unit', 'job_spaces']\n # }\n\n# discrete_variables = {\n # 'households': ['persons', 'race_of_head', 'workers', 'children',\n # 'cars', 'hispanic_head', 'tenure', 'recent_mover', 'income_quartile'],\n # 'jobs': ['sector_id'],\n # 'buildings': ['building_type_id']\n # }\n# sum_vars = ['persons', 'workers', 'children', 'cars', 'hispanic_head',\n # 'recent_mover', 'acres', 'land_value', 'residential_units',\n # 'non_residential_sqft', 'job_spaces']\n\n# geog_vars_to_dummify = orca.get_injectable('aggregate_geos').values()\n\n# generated_variables = set([])\n\n# orca.add_column('parcels', 'sum_acres', orca.get_table('parcels').acres) # temporary\n\n# for agent in variables_to_aggregate.keys():\n # for geography_name, geography_id in geographic_levels.items():\n # if geography_name != agent:\n\n # # Define size variables\n # generators.make_size_var(agent, geography_name, geography_id)\n # generated_variables.add('total_' + agent)\n\n # # Define attribute variables\n # variables = variables_to_aggregate[agent]\n # for var in variables:\n # for aggregation_function in aggregation_functions:\n # if aggregation_function == 'sum':\n # if var in sum_vars:\n # generators.make_agg_var(agent, geography_name,\n # geography_id,\n # var, aggregation_function)\n # generated_variables.add(\n # aggregation_function + '_' + var)\n\n # else:\n # generators.make_agg_var(agent, geography_name,\n # geography_id, var,\n # aggregation_function)\n # generated_variables.add(\n # aggregation_function + '_' + var)\n\n# # Define prop_X_X variables\n# for agent in discrete_variables.keys():\n # agents = orca.get_table(agent)\n # discrete_vars = discrete_variables[agent]\n # for var in discrete_vars:\n # agents_by_cat = agents[var].value_counts()\n # cats_to_measure = agents_by_cat[agents_by_cat > 500].index.values\n # for cat in cats_to_measure:\n # for geography_name, geography_id in geographic_levels.items():\n # generators.make_proportion_var(agent, geography_name,\n # geography_id, var, cat)\n # generated_variables.add('prop_%s_%s' % (var, int(cat)))\n\n# # Making proportion by geography with global building types\n# agent = 'buildings'\n# var = 'repm_id'\n# cats_to_measure = ['res_sf_detached', 'duplex_townhome', 'multifamily', \n # 'retail', 'industrial', 'office', 'educational']\n# for cat in cats_to_measure:\n # var_name = 'prop_repm_id_{}'.format(cat)\n # for geography_name, geography_id in geographic_levels.items():\n # generators.make_proportion_var(agent, geography_name, geography_id, var, cat)\n # generated_variables.add('prop_%s_%s' % (var, cat))\n\n# # Define ratio variables\n# for geography_name in geographic_levels.keys():\n\n # # Jobs-housing balance\n # generators.make_ratio_var('jobs', 'households', geography_name)\n # generated_variables.add('ratio_jobs_to_households')\n\n # # # workers-persons ratio\n # generators.make_ratio_var('workers', 'persons', geography_name, prefix1 = 'sum', prefix2 = 'sum')\n # generated_variables.add('ratio_workers_to_persons')\n\n # # Residential occupancy rate\n # generators.make_ratio_var('households', 'residential_units', geography_name, prefix2 = 'sum')\n # generated_variables.add('ratio_households_to_residential_units')\n\n # # Density\n # for agent in discrete_variables.keys():\n # generators.make_density_var(agent, geography_name)\n # generated_variables.add('density_%s' % agent)\n\n\n# for geog_var in geog_vars_to_dummify:\n # geog_ids = np.unique(orca.get_table('parcels')[geog_var])\n # if len(geog_ids) < 50:\n # for geog_id in geog_ids:\n # generators.make_dummy_variable('parcels', geog_var, geog_id)\n\n\n# #### Accessibility variable creation functions ####\n\n# def register_pandana_access_variable(column_name, onto_table, variable_to_summarize,\n # distance, agg_type='sum', decay='linear', log=True):\n # \"\"\"\n # Register pandana accessibility variable with orca.\n # Parameters\n # ----------\n # column_name : str\n # Name of the orca column to register this variable as.\n # onto_table : str\n # Name of the orca table to register this table with.\n # variable_to_summarize : str\n # Name of the onto_table variable to summarize.\n # distance : int\n # Distance along the network to query.\n # agg_type : str\n # Pandana aggregation type.\n # decay : str\n # Pandana decay type.\n # Returns\n # -------\n # column_func : function\n # \"\"\"\n # @orca.column(onto_table, column_name, cache=True, cache_scope='iteration')\n # def column_func():\n # net = orca.get_injectable('net') # Get the pandana network\n # table = orca.get_table(onto_table).to_frame(['node_id', variable_to_summarize])\n # net.set(table.node_id, variable=table[variable_to_summarize])\n # try:\n # results = net.aggregate(distance, type=agg_type, decay=decay)\n # except:\n # results = net.aggregate(distance, type=agg_type, decay=decay) # import pdb; pdb.set_trace()\n # if log:\n # results = results.apply(eval('np.log1p'))\n # return misc.reindex(results, table.node_id)\n # return column_func\n\n\n# def register_pandana_access_variable(network_name, column_name, onto_table, variable_to_summarize,\n # distance, agg_type='sum', decay='linear', log=True):\n # \"\"\"\n # Register pandana accessibility variable with orca.\n # Parameters\n # ----------\n # network_name: str\n # Name of the network object to use to compute variable.\n # column_name : str\n # Name of the orca column to register this variable as.\n # onto_table : str\n # Name of the orca table to register this table with.\n # variable_to_summarize : str\n # Name of the onto_table variable to summarize.\n # distance : int\n # Distance along the network to query.\n # agg_type : str\n # Pandana aggregation type.\n # decay : str\n # Pandana decay type.\n # Returns\n # -------\n # column_func : function\n # \"\"\"\n # @orca.column(network_name, onto_table, column_name, cache=True, cache_scope='iteration')\n # def column_func():\n # net = orca.get_injectable(network_name) # Get the pandana network\n # table = orca.get_table(onto_table).to_frame(['node_id', variable_to_summarize])\n # net.set(table.node_id, variable=table[variable_to_summarize])\n # try:\n # results = net.aggregate(distance, type=agg_type, decay=decay)\n # except:\n # results = net.aggregate(distance, type=agg_type, decay=decay) # import pdb; pdb.set_trace()\n # if log:\n # results = results.apply(eval('np.log1p'))\n # return misc.reindex(results, table.node_id)\n # return column_func\n\n# def register_skim_access_variable(column_name, variable_to_summarize, impedance_measure,\n # distance, log=False):\n # \"\"\"\n # Register skim-based accessibility variable with orca.\n # Parameters\n # ----------\n # column_name : str\n # Name of the orca column to register this variable as.\n # impedance_measure : str\n # Name of the skims column to use to measure inter-zone impedance.\n # variable_to_summarize : str\n # Name of the zonal variable to summarize.\n # distance : int\n # Distance to query in the skims (e.g. 30 minutes travel time).\n # Returns\n # -------\n # column_func : function\n # \"\"\"\n # @orca.column('zones', column_name, cache=True, cache_scope='iteration')\n # def column_func(zones, travel_data):\n # results = misc.compute_range(travel_data.to_frame(), zones.get_column(variable_to_summarize),\n # impedance_measure, distance, agg=np.sum)\n # if log:\n # results = results.apply(eval('np.log1p'))\n\n # if len(results) < len(zones):\n # results = results.reindex(zones.index).fillna(0)\n\n # return results\n # return column_func\n\n\n# # Calculate pandana-based accessibility variable\n# distances = range(500, 2500, 500)\n# print(distances)\n# network_name = 'netwalk'\n# agg_types = ['ave', 'sum']\n# decay_types = ['linear', 'flat']\n# variables_to_aggregate = ['sum_children',\n # 'sum_persons',\n # 'sum_workers',\n # 'sum_residential_units',\n # 'sum_non_residential_sqft',\n # 'total_households',\n # 'total_jobs',\n # 'sum_residential_units',\n # 'sum_non_residential_sqft'] # add building vars here\n\n# variables_to_aggregate_avg_only = ['prop_race_of_head_1',\n # 'prop_race_of_head_9',\n # 'mean_age_of_head',\n # 'mean_children',\n # 'mean_income',\n # 'mean_workers',\n # 'mean_value_per_unit',\n # 'mean_non_residential_sqft'] # Add building/job vars here\n# access_vars = []\n# for distance in distances:\n # for decay in decay_types:\n # for variable in variables_to_aggregate:\n # for agg_type in agg_types:\n # var_name = '_'.join([variable, agg_type, str(distance), decay])\n # access_vars.append(var_name)\n # register_pandana_access_variable(network_name, var_name, 'parcels', variable, distance, agg_type=agg_type, decay=decay)\n # not_log = 'without_log_' + var_name\n # register_pandana_access_variable(network_name, not_log, 'parcels', variable, distance, agg_type=agg_type, decay=decay, log=False)\n # generated_variables.add(var_name)\n\n # for variable in variables_to_aggregate_avg_only:\n # var_name = '_'.join([variable, 'ave', str(distance), decay])\n # access_vars.append(var_name)\n # register_pandana_access_variable(network_name, var_name, 'parcels', variable, distance, agg_type='ave', decay=decay)\n # not_log = 'without_log_' + var_name\n # register_pandana_access_variable(network_name, not_log, 'parcels', variable, distance, agg_type='ave', decay=decay, log=False)\n # generated_variables.add(var_name)\n\n# # Network-based price aggregations for proforma input\n# price_cols = ['pred_sf_detached_price', 'pred_duplex_townhome_price',\n # 'pred_multifamily_price', 'pred_office_price', 'pred_retail_price',\n # 'pred_industrial_price']\n# for price_col in price_cols:\n # generators.make_agg_var('buildings', 'parcels',\n # 'parcel_id', price_col,\n # 'mean')\n # register_pandana_access_variable('%s_ave_800_linear' % price_col, 'parcels', 'mean_%s' % price_col,\n # 800, agg_type='ave', decay='flat', log=False)\n\n# register_pandana_access_variable('mean_sqft_per_unit_ave_800_linear', 'parcels', 'mean_sqft_per_unit',\n # 800, agg_type='ave', decay='flat', log=False)\n\n# # Calculate skim-based accessibility variable\n# variables_to_aggregate = ['total_jobs', 'sum_persons']\n# skim_access_vars = []\n# # Transit skim variables\n# travel_times = [5, 10, 15, 25]\n# for time in travel_times:\n # for variable in variables_to_aggregate:\n # var_name = '_'.join([variable, str(time), 'am_peak_travel_time'])\n # skim_access_vars.append(var_name)\n # register_skim_access_variable(var_name, variable, 'am_peak_travel_time', time)\n # generated_variables.add(var_name)\n\n # var_name = '_'.join([variable, str(time), 'md_offpeak_travel_time'])\n # skim_access_vars.append(var_name)\n # register_skim_access_variable(var_name, variable, 'md_offpeak_travel_time', time)\n # generated_variables.add(var_name)\n\n\n# # Disaggregate higher-level variables to the building level\n# for base_geography in ['buildings']:\n # for geography_name, geography_id in geographic_levels.items():\n # if geography_name != base_geography:\n # for var in orca.get_table(geography_name).columns:\n # generators.make_disagg_var(geography_name, base_geography, var,\n # geography_id, name_based_on_geography=True)\n\n\n# # Create logged version of all building variables for estimation\n# def register_ln_variable(table_name, column_to_ln):\n # \"\"\"\n # Register logged variable with orca.\n # Parameters\n # ----------\n # table_name : str\n # Name of the orca table that this column is part of.\n # column_to_ln : str\n # Name of the orca column to log.\n # Returns\n # -------\n # column_func : function\n # \"\"\"\n # new_col_name = 'ln_' + column_to_ln\n\n # @orca.column(table_name, new_col_name, cache=True, cache_scope='iteration')\n # def column_func():\n # return np.log1p(orca.get_table(table_name)[column_to_ln])\n # return column_func\n\n\n# for var in orca.get_table('buildings').columns:\n # register_ln_variable('buildings', var)\n\n\n# # Building type dummies\n# @orca.column('buildings', cache=True)\n# def is_office():\n # series = (orca.get_table('buildings').building_type_id.isin([4100, 2121, 2122])).astype(int)\n # return series\n\n\n# @orca.column('buildings', cache=True)\n# def is_warehouse():\n # return (orca.get_table('buildings').building_type_id.isin([5100])).astype(int)\n\n\n# @orca.column('buildings', cache=True)\n# def is_industrial():\n # return (orca.get_table('buildings').building_type_id.isin([5100, 5200])).astype(int)\n\n\n# @orca.column('buildings', cache=True)\n# def is_multifamily():\n # return (orca.get_table('buildings').building_type_id.isin([1210, 1220])).astype(int)\n\n\n# # Building Age dummies\n# @orca.column('buildings', cache=True)\n# def built_before_1950():\n # return (orca.get_table('buildings').year_built < 1950).astype(int)\n\n\n# @orca.column('buildings', cache=True)\n# def built_after_2000():\n # return (orca.get_table('buildings').year_built > 2000).astype(int)\n\n\n# @orca.column('buildings', cache=True)\n# def land_value_per_acre():\n # return (orca.get_table('buildings').land_value / \n # orca.get_table('buildings').acres).fillna(0)\n\n\n# register_ln_variable('buildings', 'land_value_per_acre')\n\n# # HOUSEHOLDS VARIABLES\n# @orca.column('households', cache=True)\n# def income_quartile_1():\n # return (orca.get_table('households').income_quartile == 1).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_quartile_2():\n # return (orca.get_table('households').income_quartile == 2).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_quartile_3():\n # return (orca.get_table('households').income_quartile == 3).astype(int)\n\n# @orca.column('households', cache=True)\n# def no_children():\n # return (orca.get_table('households').children == 0).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def has_children():\n # return (orca.get_table('households').children > 0).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def no_workers():\n # return (orca.get_table('households').workers == 0).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def race_notwhite():\n # return (orca.get_table('households').race_of_head > 1).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def race_white():\n # return (orca.get_table('households').race_of_head == 1).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def race_black():\n # return (orca.get_table('households').race_of_head == 2).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def race_asian():\n # return (orca.get_table('households').race_of_head == 6).astype(int)\n\n# @orca.column('households', cache=True)\n# def no_hispanic_head():\n # return (orca.get_table('households').race_of_head != 1).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def zero_carowner():\n # return (orca.get_table('households').cars == 0).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def carowner():\n # return (orca.get_table('households').cars > 0).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_less25K():\n # return (orca.get_table('households')['income'] < 25000).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_25to45K():\n # return ((orca.get_table('households')['income'] >= 25000) & (orca.get_table('households')['income'] < 45000)).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_45to70K():\n # return ((orca.get_table('households')['income'] >= 45000) & (orca.get_table('households')['income'] < 70000)).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_70to90K():\n # return ((orca.get_table('households')['income'] >= 70000) & (orca.get_table('households')['income'] < 90000)).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_90to110K():\n # return ((orca.get_table('households')['income'] >= 90000) & (orca.get_table('households')['income'] < 110000)).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_110to150K():\n # return ((orca.get_table('households')['income'] >= 110000) & (orca.get_table('households')['income'] < 150000)).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def income_more150K():\n # return (orca.get_table('households')['income'] >= 150000).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def ratio_income_persons():\n # return orca.get_table('households')['income'] / orca.get_table('households')['persons']\n\n\n# @orca.column('households', cache=True)\n# def tenure_rent():\n # return (orca.get_table('households').tenure == 2).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def tenure_own():\n # return (orca.get_table('households').tenure == 1).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def living_alone():\n # return (orca.get_table('households').persons == 1).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def hh_size_2():\n # return (orca.get_table('households').persons == 2).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def hh_size_3():\n # return (orca.get_table('households').persons == 3).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def hh_size_4():\n # return (orca.get_table('households').persons == 4).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def hh_size_more4():\n # return (orca.get_table('households').persons > 4).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def age_head_less40():\n # return (orca.get_table('households').age_of_head <= 40).astype(int)\n\n\n# @orca.column('households', cache=True)\n# def age_head_more40_age():\n # return (orca.get_table('households').age_of_head > 40).astype(int) * orca.get_table('households').age_of_head\n\n\n# @orca.column('households', cache=True)\n# def ratio_cars_workers():\n # ratio = orca.get_table('households')['cars'] / orca.get_table('households')['workers']\n # ratio.replace({np.inf : 0, -np.inf : 0},inplace=True)\n # ratio.fillna(0, inplace=True)\n # return ratio\n\n# ln_vars = ['age_of_head', 'persons', 'workers',\n # 'income', 'cars',\n # 'ratio_income_persons', 'ratio_cars_workers']\n\n# for lnv in ln_vars:\n # register_ln_variable('households', lnv)\n\n\n# @orca.column('parcels', cache=False)\n# def total_yearly_rent(parcels):\n # parcels = parcels.to_frame(['sum_residential_units', 'sum_non_residential_sqft',\n # 'mean_pred_sf_detached_price', 'mean_pred_duplex_townhome_price', 'mean_pred_multifamily_price',\n # 'mean_pred_office_price', 'mean_pred_retail_price', 'mean_pred_industrial_price'])\n\n # parcels[parcels < 0] = 0\n # parcels['mean_resunit_price'] = parcels[['mean_pred_sf_detached_price', 'mean_pred_duplex_townhome_price', 'mean_pred_multifamily_price']].mean(axis=1)\n # parcels['mean_nrsf_price'] = parcels[['mean_pred_office_price', 'mean_pred_retail_price', 'mean_pred_industrial_price']].mean(axis=1)\n\n # res_price = parcels.mean_resunit_price * parcels.sum_residential_units\n # nonres_price = parcels.mean_nrsf_price * parcels.sum_non_residential_sqft\n # return (res_price + nonres_price) * .05 ## expressed in current annual rent\n\n\n# @orca.column('parcels', cache=True)\n# def developable_sqft(parcels):\n # return (1 - (parcels.proportion_undevelopable / 100.0)) * parcels.acres * 43560\n\n\n# ## Disagg from parcel to site proposals\n# parcel_cols = orca.get_table('parcels').columns\n# site_proposal_cols = orca.get_table('site_proposals').columns\n# for var in parcel_cols:\n # if var not in site_proposal_cols:\n # generators.make_disagg_var('parcels', 'site_proposals', var,\n # 'parcel_id', name_based_on_geography=False)\n\n\n# # Household_pums variables\n# @orca.column('households_pums', cache=True)\n# def income_quartile(households_pums):\n # s = pd.Series(pd.qcut(households_pums.income, 4, labels=False),\n # index=households_pums.index)\n # # e.g. convert income quartile from 0-3 to 1-4\n # s = s.add(1)\n # return s\n\n\n# @orca.column('households_pums', cache=True)\n# def income_quartile_1():\n # return (orca.get_table('households_pums').income_quartile == 1).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def income_quartile_2():\n # return (orca.get_table('households_pums').income_quartile == 2).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def income_quartile_3():\n # return (orca.get_table('households_pums').income_quartile == 3).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def no_workers():\n # return (orca.get_table('households_pums').workers == 0).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def race_notwhite():\n # return (orca.get_table('households_pums').race_of_head > 1).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def race_white():\n # return (orca.get_table('households_pums').race_of_head == 1).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def race_black():\n # return (orca.get_table('households_pums').race_of_head == 2).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def race_asian():\n # return (orca.get_table('households_pums').race_of_head == 6).astype(int)\n\n# @orca.column('households_pums', cache=True)\n# def no_hispanic_head():\n # return (orca.get_table('households_pums').race_of_head != 1).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def zero_carowner():\n # return (orca.get_table('households_pums').cars == 0).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def carowner():\n # return (orca.get_table('households_pums').cars > 0).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def income_more150K():\n # return (orca.get_table('households_pums')['income'] >= 150000).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def ratio_income_persons():\n # return orca.get_table('households_pums')['income'] / orca.get_table('households_pums')['persons']\n\n\n# @orca.column('households_pums', cache=True)\n# def ratio_cars_workers():\n # ratio = orca.get_table('households_pums')['cars'] / orca.get_table('households_pums')['workers']\n # ratio.replace({np.inf : 0, -np.inf : 0},inplace=True)\n # ratio.fillna(0, inplace=True)\n # return ratio\n\n\n# @orca.column('households_pums', cache=True)\n# def tenure_rent():\n # return (orca.get_table('households_pums').tenure == 2).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def tenure_own():\n # return (orca.get_table('households_pums').tenure == 1).astype(int)\n\n\n# @orca.column('households_pums', cache=True)\n# def living_alone():\n # return (orca.get_table('households_pums').persons == 1).astype(int)\n\n\n# ln_vars = ['age_of_head', 'persons', 'workers',\n # 'income', 'cars',\n # 'ratio_income_persons', 'ratio_cars_workers']\n\n# for lnv in ln_vars:\n # register_ln_variable('households_pums', lnv)\n", "id": "1852906", "language": "Python", "matching_score": 3.7610511779785156, "max_stars_count": 2, "path": "bayarea/variables.py" }, { "content": "import orca\nimport pandas as pd\nimport numpy as np\nfrom urbansim.utils import misc\n\n\ndef register_skim_access_variable(\n column_name, variable_to_summarize, impedance_measure,\n distance, skims_table, agg=np.sum, log=False):\n \"\"\"\n Register skim-based accessibility variable with orca.\n Parameters\n ----------\n column_name : str\n Name of the orca column to register this variable as.\n impedance_measure : str\n Name of the skims column to use to measure inter-zone impedance.\n variable_to_summarize : str\n Name of the zonal variable to summarize.\n distance : int\n Distance to query in the skims (e.g. 30 minutes travel time).\n mode_name: str\n Name of the mode to query in the skims.\n period: str\n Period (AM, PM, OffPeak) to query in the skims.\n\n Returns\n -------\n column_func : function\n \"\"\"\n @orca.column('zones', column_name, cache=True, cache_scope='iteration')\n def column_func(zones):\n df = skims_table.to_frame()\n results = misc.compute_range(\n df, zones.get_column(variable_to_summarize),\n impedance_measure, distance, agg=agg)\n\n if len(results) < len(zones):\n results = results.reindex(zones.index).fillna(0)\n\n # add vars from orig zone, typically not included in skims\n results = results + zones[variable_to_summarize]\n\n if log:\n results = results.apply(eval('np.log1p'))\n\n return results\n\n return\n\n\ndef impute_missing_skims(mtc_skims, beam_skims_raw):\n df = beam_skims_raw.to_frame()\n\n # seconds to minutes\n df['gen_tt'] = df['generalizedTimeInS'] / 60\n\n mtc = mtc_skims.to_frame(columns=['orig', 'dest', 'da_distance_AM'])\n mtc.rename(\n columns={'orig': 'from_zone_id', 'dest': 'to_zone_id'},\n inplace=True)\n mtc.set_index(['from_zone_id', 'to_zone_id'], inplace=True)\n\n # miles to meters\n mtc['dist'] = mtc['da_distance_AM'] * 1609.34\n\n # impute mtc zone-to-zone distances where zero-valued in beam skims\n if len(df.loc[df['distanceInM'] == 0, 'distanceInM']) > 0:\n df.loc[df['distanceInM'] == 0, 'distanceInM'] = mtc.loc[\n pd.MultiIndex.from_frame(df.loc[df['distanceInM'] == 0, [\n 'from_zone_id', 'to_zone_id']]), 'dist'].values\n\n # use MTC dists for all intra-taz distances\n intra_taz_mask = df['from_zone_id'] == df['to_zone_id']\n df.loc[intra_taz_mask, 'distanceInM'] = mtc.loc[pd.MultiIndex.from_frame(\n df.loc[intra_taz_mask, ['from_zone_id', 'to_zone_id']]), 'dist'].values\n\n\n # create morning peak lookup\n df['gen_time_per_m'] = df['gen_tt'] / df['distanceInM']\n df['gen_cost_per_m'] = df['gen_cost'] / df['distanceInM']\n df.loc[df['hour'].isin([7, 8, 9]), 'period'] = 'AM'\n df_am = df[df['period'] == 'AM']\n df_am = df_am.replace([np.inf, -np.inf], np.nan)\n df_am = df_am.loc[df_am.index.repeat(df_am.numObservations)] # weighted\n am_lookup = df_am[[\n 'mode', 'gen_time_per_m', 'gen_cost_per_m']].dropna().groupby(\n ['mode']).mean().reset_index()\n\n # morning averages\n df_am_avg = df_am[[\n 'from_zone_id', 'to_zone_id', 'mode', 'gen_tt',\n 'gen_cost']].groupby(\n ['from_zone_id', 'to_zone_id', 'mode']).mean().reset_index()\n\n # long to wide\n df_am_pivot = df_am_avg.pivot_table(\n index=['from_zone_id', 'to_zone_id'], columns='mode')\n df_am_pivot.columns = ['_'.join(col) for col in df_am_pivot.columns.values]\n\n # combine with mtc-based dists\n merged = pd.merge(\n mtc[['dist']], df_am_pivot, left_index=True, right_index=True,\n how='left')\n\n # impute\n for mode in am_lookup['mode'].values:\n for impedance in ['gen_tt', 'gen_cost']:\n if impedance == 'gen_tt':\n lookup_col = 'gen_time_per_m'\n elif impedance == 'gen_cost':\n lookup_col = 'gen_cost_per_m'\n colname = impedance + '_' + mode\n lookup_val = am_lookup.loc[\n am_lookup['mode'] == mode, lookup_col].values[0]\n merged.loc[pd.isnull(merged[colname]), colname] = merged.loc[\n pd.isnull(merged[colname]), 'dist'] * lookup_val\n\n assert len(merged) == 2114116\n\n return merged\n", "id": "2162020", "language": "Python", "matching_score": 5.627845764160156, "max_stars_count": 1, "path": "activitysynth/scripts/utils.py" }, { "content": "import orca\nimport numpy as np\nfrom urbansim.utils import misc\n\n\ndef register_skim_access_variable(\n column_name, variable_to_summarize, impedance_measure,\n distance, skims_table, agg=np.sum, log=False):\n \"\"\"\n Register skim-based accessibility variable with orca.\n Parameters\n ----------\n column_name : str\n Name of the orca column to register this variable as.\n impedance_measure : str\n Name of the skims column to use to measure inter-zone impedance.\n variable_to_summarize : str\n Name of the zonal variable to summarize.\n distance : int\n Distance to query in the skims (e.g. 30 minutes travel time).\n mode_name: str\n Name of the mode to query in the skims.\n period: str\n Period (AM, PM, OffPeak) to query in the skims.\n\n Returns\n -------\n column_func : function\n \"\"\"\n @orca.column('zones', column_name, cache=True, cache_scope='iteration')\n def column_func(zones):\n df = skims_table.to_frame()\n results = misc.compute_range(\n df, zones.get_column(variable_to_summarize),\n impedance_measure, distance, agg=agg)\n\n if len(results) < len(zones):\n results = results.reindex(zones.index).fillna(0)\n\n # add vars from orig zone, typically not included in skims\n results = results + zones[variable_to_summarize]\n\n if log:\n results = results.apply(eval('np.log1p'))\n\n return results\n\n return\n", "id": "1246659", "language": "Python", "matching_score": 0.26294195652008057, "max_stars_count": 1, "path": "spring-2019-models/scripts/utils.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport orca\nimport os\nfrom urbansim_defaults.utils import _remove_developed_buildings\nfrom urbansim.developer.developer import Developer as dev\n\n\n#####################\n# UTILITY FUNCTIONS\n#####################\n\n\n# similar to the function in urbansim_defaults, except it assumes you want\n# to use your own pick function\ndef add_buildings(buildings, new_buildings,\n remove_developed_buildings=True):\n\n old_buildings = buildings.to_frame(buildings.local_columns)\n new_buildings = new_buildings[buildings.local_columns]\n\n if remove_developed_buildings:\n unplace_agents = [\"households\", \"jobs\"]\n old_buildings = \\\n _remove_developed_buildings(old_buildings, new_buildings,\n unplace_agents)\n\n all_buildings = dev.merge(old_buildings, new_buildings)\n\n orca.add_table(\"buildings\", all_buildings)\n\n\n# assume df1 and df2 each have 2 float columns specifying x and y\n# in the same order and coordinate system and no nans. returns the indexes\n# from df1 that are closest to each row in df2\ndef nearest_neighbor(df1, df2):\n from sklearn.neighbors import KDTree\n kdt = KDTree(df1.as_matrix())\n indexes = kdt.query(df2.as_matrix(), k=1, return_distance=False)\n return df1.index.values[indexes]\n\n\n# need to reindex from geom id to the id used on parcels\ndef geom_id_to_parcel_id(df, parcels):\n s = parcels.geom_id # get geom_id\n s = pd.Series(s.index, index=s.values) # invert series\n df[\"new_index\"] = s.loc[df.index] # get right parcel_id for each geom_id\n df = df.dropna(subset=[\"new_index\"])\n df[\"new_index\"] = df.new_index.astype('int')\n df = df.set_index(\"new_index\", drop=True)\n df.index.name = \"parcel_id\"\n return df\n\n\ndef parcel_id_to_geom_id(s):\n parcels = orca.get_table(\"parcels\")\n g = parcels.geom_id # get geom_id\n return pd.Series(g.loc[s.values].values, index=s.index)\n\n\n# pick random indexes from s without replacement\ndef random_indexes(s, num):\n return np.random.choice(s.index.values, int(num), replace=False)\n\n\n# This method takes a series of floating point numbers, rounds to\n# integers (e.g. to while number households), while making sure to\n# meet the given target for the sum. We're obviously going to lose\n# some resolution on the distrbution implied by s in order to meet\n# the target exactly\ndef round_series_match_target(s, target, fillna):\n s = s.fillna(fillna).round().astype('int')\n diff = target - s.sum()\n if diff > 0:\n s.loc[random_indexes(s, diff)] += 1\n elif diff < 0:\n s.loc[random_indexes(s, diff*-1)] -= 1\n\n assert s.sum() == target\n return s\n\n\n# scales (floating point ok) so that the sum of s if equal to\n# the specified target - pass check_close to verify that it's\n# within a certain range of the target\ndef scale_by_target(s, target, check_close=None):\n ratio = float(target) / s.sum()\n if check_close:\n assert 1.0-check_close < ratio < 1.0+check_close\n return s * ratio\n\n\n# this should be fairly self explanitory if you know ipf\n# seed_matrix is your best bet at the totals, col_marginals are\n# observed column marginals and row_marginals is the same for rows\ndef simple_ipf(seed_matrix, col_marginals, row_marginals, tolerance=1, cnt=0):\n assert np.absolute(row_marginals.sum() - col_marginals.sum()) < 5.0\n\n # first normalize on columns\n ratios = col_marginals / seed_matrix.sum(axis=0)\n seed_matrix *= ratios.values\n closeness = np.absolute(row_marginals - seed_matrix.sum(axis=1)).sum()\n assert np.absolute(col_marginals - seed_matrix.sum(axis=0)).sum() < .01\n # print \"row closeness\", closeness\n if closeness < tolerance:\n return seed_matrix\n\n # first normalize on rows\n ratios = row_marginals / seed_matrix.sum(axis=1)\n ratios[row_marginals == 0] = 0\n seed_matrix = seed_matrix * ratios.reshape((ratios.size, 1))\n assert np.absolute(row_marginals - seed_matrix.sum(axis=1)).sum() < .01\n closeness = np.absolute(col_marginals - seed_matrix.sum(axis=0)).sum()\n # print \"col closeness\", closeness\n if closeness < tolerance:\n return seed_matrix\n\n if cnt >= 50:\n return seed_matrix\n\n return simple_ipf(seed_matrix, col_marginals, row_marginals,\n tolerance, cnt+1)\n\n\"\"\"\nBELOW IS A SET OF UTITLIES TO COMPARE TWO SUMMARY DATAFRAMES, MAINLY LOOKING\nFOR PCT DIFFERENCE AND THEN FORMATTING INTO A DESCRIPTION OR INTO AN EXCEL\nOUTPUT FILE WHICH IS COLOR CODED TO HIGHLIGHT THE DIFFERENCES\n\"\"\"\n\n# for labels and cols in df1, find value in df2, and make sure value is\n# within pctdiff - if not return dataframe col, row and values in two frames\n# pctdiff should be specified as a number between 1 and 100\n\n\ndef compare_dfs(df1, df2):\n\n df3 = pd.DataFrame(index=df1.index, columns=df1.columns)\n\n # for each row\n for label, row in df1.iterrows():\n\n # assume row exists in comparison\n rowcomp = df2.loc[label]\n\n # for each value\n for col, val in row.iteritems():\n\n val2 = rowcomp[col]\n\n df3.loc[label, col] = \\\n int(abs(val - val2) / ((val + val2 + .01) / 2.0) * 100.0)\n\n return df3\n\n\n# identify small values as a boolean T/F for each column\ndef small_vals(df):\n\n df = df.copy()\n\n for col in df.columns:\n df[col] = df[col] < df[col].mean() - .5*df[col].std()\n\n return df\n\n\ndef compare_dfs_excel(df1, df2, excelname=\"out.xlsx\"):\n import palettable\n import xlsxwriter\n\n writer = pd.ExcelWriter(excelname, engine='xlsxwriter')\n\n df1.reset_index().to_excel(writer, index=False, sheet_name='df1')\n df2.reset_index().to_excel(writer, index=False, sheet_name='df2')\n writer.sheets['df1'].set_zoom(150)\n writer.sheets['df2'].set_zoom(150)\n\n df3 = compare_dfs(df1, df2)\n\n df3.reset_index().to_excel(writer, index=False, sheet_name='comparison')\n\n workbook = writer.book\n worksheet = writer.sheets['comparison']\n worksheet.set_zoom(150)\n\n color_range = \"B2:Z{}\".format(len(df1)+1)\n\n reds = palettable.colorbrewer.sequential.Blues_5.hex_colors\n reds = {\n i: workbook.add_format({'bg_color': reds[i]})\n for i in range(len(reds))\n }\n\n blues = palettable.colorbrewer.sequential.Oranges_5.hex_colors\n blues = {\n i: workbook.add_format({'bg_color': blues[i]})\n for i in range(len(blues))\n }\n\n def apply_format(worksheet, df, format, filter):\n\n s = df.stack()\n\n for (lab, col), val in filter(s).iteritems():\n\n rowind = df.index.get_loc(lab)+2\n colind = df.columns.get_loc(col)+1\n colletter = xlsxwriter.utility.xl_col_to_name(colind)\n\n worksheet.write(colletter+str(rowind), val, format)\n\n for i in range(5):\n apply_format(worksheet, df3, reds[i], lambda s: s[s > i*10+10])\n\n df3[small_vals(df1)] = np.nan\n for i in range(5):\n apply_format(worksheet, df3, blues[i], lambda s: s[s > i*10+10])\n\n writer.save()\n\n\n# compare certain columns of two dataframes for differences above a certain\n# amount and return a string describing the differences\ndef compare_summary(df1, df2, index_names=None, pctdiff=10,\n cols=[\"tothh\", \"totemp\"], geog_name=\"Superdistrict\"):\n\n if cols:\n df1, df2 = df1[cols], df2[cols]\n\n df3 = compare_dfs(df1, df2)\n df3[small_vals(df1)] = np.nan\n s = df3[cols].stack()\n\n buf = \"\"\n for (lab, col), val in s[s > 10].iteritems():\n lab = index_names.loc[lab]\n buf += \"%s '%s' is %d%% off in column '%s'\\n\" % \\\n (geog_name, lab, val, col)\n\n return buf\n", "id": "9281658", "language": "Python", "matching_score": 2.336066484451294, "max_stars_count": 0, "path": "bayarea_urbansim/baus/utils.py" }, { "content": "# do some error checking of the results of a simulation\n# this is designed to run on the parcel output that is used by urbansim_explorer\n\nimport sys\nimport pandas as pd\nimport orca\nsys.path.append(\".\")\nimport models\n\nargs = sys.argv[1:]\nrunnum = int(args[0])\n\ndevs = pd.read_csv('runs/run%d_parcel_output.csv' % runnum, low_memory=False)\n\ndevs[\"building_ratio\"] = devs.building_sqft / devs.total_sqft.replace(0, 1)\n\ndf = devs[devs.building_ratio < 1.5]\n\nif len(df):\n\n print \"Found %d devs that redev only slightly larger buildings (they change uses)\" % len(df)\n #print df\n\n # write out some more detailed info on these buildings\n '''\n b = orca.get_table('buildings').local\n df2 = b[b.parcel_id.isin(df.parcel_id)]\n print df2.building_type_id.value_counts()\n df2.to_csv('out.csv')\n df[df.parcel_id.isin(df2.parcel_id)].to_csv('out2.csv')\n '''\n\ndevs[\"parcel_id_counts\"] = devs.parcel_id.value_counts().loc[devs.parcel_id].values\n\ndf = devs.query(\"parcel_id_counts > 1 and SDEM == False\")\n\nif len(df):\n\n print \"Found %d devs that redev a parcel that's already built on\" % len(df)\n print df\n\n\njdf10 = pd.read_csv('runs/run%d_juris_summaries_2010.csv' % runnum, index_col='juris')\njdf40 = pd.read_csv('runs/run%d_juris_summaries_2040.csv' % runnum, index_col='juris')\n\nempdiff = jdf40.loc['San Francisco'].totemp - jdf10.loc['San Francisco'].totemp\npctdiff = float(empdiff) / jdf10.loc['San Francisco'].totemp * 100\nprint \"San Francisco employment goes up by %d jobs (%.1f pct)\" % (empdiff, pctdiff)\n", "id": "12708114", "language": "Python", "matching_score": 2.40985107421875, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/check_sim_results.py" }, { "content": "import pandas as pd\nimport sys\n\nargs = sys.argv[1:]\n\nif len(args) != 2:\n\tprint \"Identify buildings which are not in the baserun\"\n\tprint \"Usage <scenario runnum> <base runrum>\"\n\tsys.exit()\n\nscen = pd.read_csv(\"runs/run%d_parcel_output.csv\" % int(args[0]), low_memory=False)\nbase = pd.read_csv(\"runs/run%d_parcel_output.csv\" % int(args[1]), low_memory=False)\n\nrows = []\ncnt1 = cnt2 = cnt3 = 0\nfor ind, row in scen.iterrows():\n\n #print row\n\n if row.SDEM == True:\n continue\n\n prevrow = base[base.parcel_id == row.parcel_id]\n\n if len(prevrow) == 0:\n\n # new parcels get added\n rows.append(row)\n cnt1 += 1\n\n else:\n\n prevrow = prevrow.iloc[0]\n\n def close(v1, v2, tol):\n \t return abs(v1-v2) < tol\n\n if not close(row.residential_units, prevrow.residential_units, 2.5) or \\\n not close(row.non_residential_sqft, prevrow.non_residential_sqft, 2500):\n\n rows.append(row)\n cnt2 += 1\n\n else:\n cnt3 += 1\n\nprint \"%d new developments and %d changed developments (%d unchanged)\" % (cnt1, cnt2, cnt3)\n\nbase = pd.DataFrame(rows).to_csv(\"runs/run%d_parcel_output_diff.csv\" % int(args[0]))\n", "id": "1304878", "language": "Python", "matching_score": 1.3929368257522583, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/devdiff.py" }, { "content": "import pandas as pd\nimport string\n\nf = pd.read_excel(open(\"../TripModeChoice.xls\"), sheetname=None)\n\ncoeffs = {}\ncoeff_ind = None\nspecs = {}\nind = None\n\nfor key, df in f.iteritems():\n if \"debug\" in key or \"model\" in key or \"data\" in key:\n continue\n if key in [\"School\", \"University\", \"Work\", \"WorkBased\", \"Escort\"]:\n continue\n print key\n\n \"\"\"\n Really these lines just extract the relevant parts out of the spreadsheet\n \"\"\"\n\n # the headers are actually split up among a couple of rows (ouch)\n df.columns = list(df.iloc[1].values)[:6] + list(df.iloc[2].values)[6:]\n\n filt = '1 < = No <= 49'\n coeffs[key] = df.query(filt).set_index('Token')['Formula for variable']\n if coeff_ind is None:\n coeff_ind = coeffs[key].index\n\n df = df.iloc[2:]\n\n # this is not the best - there are a couple of cases where the specs\n # differ across segments and we want them to be the same FOR NOW - they\n # can differ once we get them all lined up\n if key == \"Work\":\n df = df.query(\"No >= 141\")\n else:\n df = df.query(\"No >= 144\")\n\n df = df.drop(['No', 'Token', 'Filter', 'Index'], axis=1)\n df.columns = ['Description', 'Expression'] + list(df.columns[2:])\n df.set_index(['Description', 'Expression'], inplace=True)\n\n # these lines merge the alternatives that are used\n # into a comma separated list\n alt_l = []\n val_l = []\n for _, row in df.iterrows():\n alts = list(row.dropna().index)\n vals = list(row.dropna())\n\n # assert only 1 unique value\n if len(vals) == 0:\n vals = [0]\n\n assert len(set(vals)) == 1\n val = vals[0]\n\n alt_l.append(string.join(alts, \",\"))\n val_l.append(val)\n\n # print alts\n df = pd.DataFrame({\n 'Alternatives': alt_l,\n key: val_l\n }, index=df.index).set_index('Alternatives', append=True)\n\n if ind is None:\n ind = df.index\n\n assert len(ind) == len(df)\n\n # ok, this is a bit bizarre, but it appears to me the coefficients are in\n # the expressions column so you can just write 1s in the cells - if we're\n # going to stack the columns we need to move the coeffs back to the cells\n df = df.reset_index()\n\n # tmp = df.Expression\n # df[\"Expression\"].iloc[232:] = df.iloc[232:][key]\n df[key].iloc[319:] = df[\"Expression\"].iloc[319:]\n\n specs[key] = df[key].values\n\ndf = pd.DataFrame(specs)\ndf.index = ind\n\ndf.to_csv('trip_mode_choice.csv')\n\npd.DataFrame(coeffs).loc[coeff_ind].to_csv('trip_mode_choice_coeffs.csv')\n", "id": "8444810", "language": "Python", "matching_score": 1.2604939937591553, "max_stars_count": 0, "path": "activitysim/scripts/stack_mode_choice2.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport tabulate\nimport yaml\nimport os\n\n\nclass model:\n def __init__(self, filename):\n self.filename = filename\n \n with open(\"../lcog/configs/\"+filename+\".yaml\", 'r') as stream:\n self.yaml_content = yaml.load(stream)\n \n self.model_type = self.yaml_content['saved_object']['template']\n self.title = \"**{} Model**\".format(filename.upper())\n \n sumlines = self.yaml_content['saved_object']['summary_table'].splitlines()\n separators = [i for i, line in enumerate(sumlines) if line.startswith(('=','-'))]\n \n scores = []\n [[scores.extend(i.strip().split(' ')) for i in line.split(':')] for line in sumlines[separators[0] + 1:separators[1]]]\n\n rows = [i.split() for i in sumlines[separators[2] + 1:separators[3]]]\n \n if self.model_type == 'LargeMultinomialLogitStep':\n self.segment = \"* Agent segment: {}\".format(self.yaml_content['saved_object']['out_chooser_filters'][-1])\n \n a = [i for i, sc in enumerate(scores) if sc == 'Log-Likelihood']\n loglike = float(scores[a[0]+1].replace(',',''))\n a = [i for i, sc in enumerate(scores) if sc == 'LL-Null']\n llnull = float(scores[a[0]+1].replace(',',''))\n log_ratio = abs(loglike/llnull)\n \n a = [i for i, sc in enumerate(scores) if sc == 'Pseudo R-squ.']\n r_squared = scores[a[0]+1] \n \n cols = ['Variable','Coefficient','Std. Error','Z-score', 'P>|z|']\n df = pd.DataFrame(columns= cols, data = rows )\n df.drop(columns=['P>|z|'],inplace=True)\n table_md = tabulate.tabulate(df, tablefmt=\"pipe\", headers=\"keys\")\n self.table_md = table_md\n self.descrip = '* Log likelihood ratio (measure of fit): {}\\n* R²: {}\\n'.format(\n log_ratio , r_squared )\n \n elif self.model_type == 'OLSRegressionStep':\n self.segment = \"* Agent segment: {}\".format(self.yaml_content['saved_object']['filters'].split(') & (')[-1].strip(')'))\n \n cols = ['Variable','Coefficient','Std. Error','T-score', 'p>|t|','interval','interval2']\n df = pd.DataFrame(columns= cols, data = rows )\n df.drop(columns=['p>|t|','interval','interval2'],inplace=True)\n table_md = tabulate.tabulate(df, tablefmt=\"pipe\", headers=\"keys\")\n self.table_md = table_md\n a = [i for i, sc in enumerate(scores) if sc == 'F-statistic']\n f_statistic = scores[a[0]+1]\n a = [i for i, sc in enumerate(scores) if sc == 'R-squared']\n r_squared = scores[a[0]+1]\n \n self.descrip = '* F-statistic: {}\\n* R²: {}'.format(\n f_statistic , r_squared )\n \n elif self.model_type == 'BinaryLogitStep':\n self.segment = \"* Agent segment: {}\".format(self.yaml_content['saved_object']['tables'])\n\n a = [i for i, sc in enumerate(scores) if sc == 'Log-Likelihood']\n loglike = float(scores[a[0]+1].replace(',',''))\n a = [i for i, sc in enumerate(scores) if sc == 'LL-Null']\n llnull = float(scores[a[0]+1].replace(',',''))\n log_ratio = abs(loglike/llnull)\n \n a = [i for i, sc in enumerate(scores) if sc == 'Pseudo R-squ.']\n r_squared = scores[a[0]+1] \n\n cols = ['Variable','Coefficient','Std. Error','Z-score', 'P>|z|', 'interval', 'interval2' ]\n df = pd.DataFrame(columns= cols, data = rows )\n df.drop(columns=['P>|z|', 'interval', 'interval2'],inplace=True)\n table_md = tabulate.tabulate(df, tablefmt=\"pipe\", headers=\"keys\")\n self.table_md = table_md\n \n self.descrip = '* Log likelihood ratio (measure of fit): {}\\n* R²: {}\\n'.format(\n log_ratio , r_squared )\n \n self.tabletitle = 'Estimated coefficients for {}:'.format(filename)\n\n def summary_to_md_file(self):\n all_text = '\\n\\n{}\\n\\n{}\\n{}\\n{}\\n\\n{}\\n'.format(self.title, self.segment, self.descrip, self.tabletitle, self.table_md)\n \n file_name=\"Model-Estimation-Results\"\n with open('{}.md'.format(file_name), mode='a') as md_file:\n md_file.write('\\n' + all_text)\n\n#Start with Global description\nfile_name=\"Model-Estimation-Results\"\nintro_text = \"This page documents choice model estimation results for the parcel-level LCOG UrbanSim model.\" \\\n +\"The location choice models drive the placement of new and relocating agents in the simulation.\" \\\n +\"The Relocation choice models estimates de moving of households from their current location in the simulation.\" \\\n +\"The model is estimated previously to the location choice models since this instance will determine the relocation or not and therefore will create the need of finding another building to moving households.\" \\\n +\"The Tenure choice models models households' decisions to own or rent the place they live. This model is applied after estimating the new location with the hlcm.\" \\\n +\"The price regression models estimate the variation of buildings' values.\\n\"\\\n+\"For each submodel, a table of estimated coefficients/significances are presented. \"\\\n+\"Measures of fit and other model evaluation metrics accompany the coefficient tables.\\n\\n\"\\\n+\"Model acronyms are defined as:\\n\"\\\n+\"HLCM: Household location choice model\\n\"\\\n+\"household_relocation_choice_model: Household Relocation choice model\\n\"\\\n+\"tenure_choice_model: Household Tenure choice model\\n\"\\\n+\"ELCM: Employment location choice model\\n\"\\\n\"REPM: Residential/Employment Price model\\n\"\n# NRDPLCM: Non-residential development project location choice model\n# RDPLCM: Residential development project location choice model'\nwith open('{}.md'.format(file_name), mode='w') as md_file:\n md_file.write(intro_text)\n\n# Print every model type with the corresponding big title\nmodels_types_dict = {'hlcm': 'Household Location Choice Models' ,\n 'household_relocation_choice_model': 'Household Relocation Choice Model' ,\n 'tenure_choice_model': 'Household Tenure Choice Model' ,\n 'elcm': 'Employment Location Choice Models',\n 'repm': 'Residential/Employment Price Models'}\nfor typ in models_types_dict.keys():\n title = models_types_dict[typ]\n text_title = '\\n## {}'.format(title)\n with open('{}.md'.format(file_name), mode='a') as md_file:\n md_file.write(text_title)\n\n files_typ= [name.split('.')[0] for name in os.listdir(\"../lcog/configs/\") if name.startswith(typ)]\n for each_model in files_typ:\n print(each_model)\n model_class = model(each_model)\n model_class.summary_to_md_file()\n", "id": "2048940", "language": "Python", "matching_score": 1.2981363534927368, "max_stars_count": 2, "path": "bayarea/scripts/model_esti_results_to_md.py" }, { "content": "import os\nimport yaml\nimport numpy as np\nimport pandas as pd\nimport orca\nimport orca_test as ot\nfrom orca_test import OrcaSpec, TableSpec, ColumnSpec, InjectableSpec\nfrom urbansim.developer.developer import Developer as dev\nfrom urbansim.models.relocation import RelocationModel\nfrom urbansim.utils import misc\nfrom urbansim_defaults import utils\n\n\n##########################################################################################\n#\n# (1) UAL ORCA STEPS FOR DATA MODEL INITIALIZATION\n#\n##########################################################################################\n\n\[email protected]('ual_settings', cache=True)\ndef ual_settings():\n \"\"\" \n This step loads the UAL settings, which are kept separate for clarity.\n \n Data expectations\n -----------------\n - 'configs' folder contains a file called 'ual_settings.yaml'\n - 'os.path' is expected to provide the root level of the urbansim instance, so be sure \n to either (a) launch the python process from that directory, or (b) use os.chdir to \n switch to that directory before running any model steps\n \"\"\"\n with open(os.path.join(misc.configs_dir(), 'ual_settings.yaml')) as f:\n return yaml.load(f)\n\n\ndef _ual_create_empty_units(buildings):\n \"\"\"\n Create a table of empty units corresponding to an input table of buildings. This\n function is used (a) in initialization and (b) after the developer model steps run.\n \n Parameters\n ----------\n buildings : DataFrameWrapper or DataFrame\n Must contain an index to be used as the building identifier, and a count of \n 'residential_units' which will determine the number of units to create\n \n Returns\n -------\n df : DataFrame\n Table of units, to be processed within an orca step\n \"\"\"\n # The '.astype(int)' deals with a bug (?) where the developer model creates \n # floating-point unit counts\n \n df = pd.DataFrame({\n 'unit_residential_price': 0,\n 'unit_residential_rent': 0,\n 'num_units': 1,\n 'building_id': np.repeat(buildings.index.values,\n buildings.residential_units.values.astype(int)),\n # counter of the units in a building\n 'unit_num': np.concatenate([np.arange(i) for i in \\\n buildings.residential_units.values.astype(int)])\n }).sort_values(by=['building_id', 'unit_num']).reset_index(drop=True)\n df.index.name = 'unit_id'\n return df\n \n \[email protected]('ual_initialize_residential_units')\ndef ual_initialize_residential_units(buildings, ual_settings):\n \"\"\"\n This initialization step creates and registers a table of synthetic residential units, \n based on building info.\n \n Data expections\n ---------------\n - 'buildings' table has following columns:\n - index that serves as its id\n - 'residential_units' (int, never missing)\n - 'zone_id' (int, non-missing??)\n - 'ual_settings' injectable contains list of tables called 'unit_aggregation_tables'\n \n Results\n -------\n - initializes a 'residential_units' table with the following columns:\n - 'unit_id' (index)\n - 'num_units' (int, always '1', needed when passing the table to utility functions\n that expect it to look like a 'buildings' table)\n - 'unit_residential_price' (float, 0-filled)\n - 'unit_residential_rent' (float, 0-filled)\n - 'building_id' (int, non-missing, corresponds to index of 'buildings' table)\n - 'unit_num' (int, non-missing, unique within building) \n - 'submarket_id' (int, non-missing, computed, corresponds to index of 'zones' table)\n - adds broadcasts linking 'residential_units' table to:\n - 'buildings' table\n - initializes a 'unit_aggregations' injectable containing tables as specified in \n 'ual_settings' -> 'unit_aggregation_tables'\n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('buildings',\n ColumnSpec('building_id', primary_key=True),\n ColumnSpec('residential_units', min=0, missing=False),\n ColumnSpec('zone_id', foreign_key='zones.zone_id', missing=False)),\n TableSpec('residential_units', registered=False),\n InjectableSpec('ual_settings', has_key='unit_aggregation_tables')))\n\n @orca.table('residential_units', cache=True)\n def residential_units(buildings):\n return _ual_create_empty_units(buildings)\n \n @orca.column('residential_units', 'submarket_id')\n def submarket_id(residential_units, buildings):\n # The submarket is used for supply/demand equilibration. It's the same as the \n # zone_id, but in a separate column to avoid name conflicts when tables are merged.\n return misc.reindex(buildings.zone_id, residential_units.building_id)\n\n orca.broadcast('buildings', 'residential_units', cast_index=True, onto_on='building_id')\n \n # This injectable provides a list of tables needed for hedonic and LCM model steps, \n # but it cannot be evaluated until the network aggregation steps are run\n @orca.injectable('unit_aggregations')\n def unit_aggregations(ual_settings):\n return [orca.get_table(tbl) for tbl in ual_settings['unit_aggregation_tables']]\n\n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('', \n TableSpec('residential_units',\n ColumnSpec('unit_id', primary_key=True),\n ColumnSpec('num_units', min=1, max=1, missing=False),\n ColumnSpec('unit_residential_price', min=0, missing=False),\n ColumnSpec('unit_residential_rent', min=0, missing=False),\n ColumnSpec('building_id', foreign_key='buildings.building_id', missing=False),\n ColumnSpec('unit_num', min=0, missing=False),\n ColumnSpec('submarket_id', foreign_key='zones.zone_id', missing=False))))\n return\n\n \[email protected]('ual_match_households_to_units')\ndef ual_match_households_to_units(households, residential_units):\n \"\"\"\n This initialization step adds a 'unit_id' to the households table and populates it\n based on existing assignments of households to buildings. This also allows us to add\n a 'vacant_units' count to the residential_units table. \n \n Data expectations\n -----------------\n - 'households' table has NO column 'unit_id'\n - 'households' table has column 'building_id' (int, '-1'-filled, corresponds to index \n of 'buildings' table)\n - 'residential_units' table has an index that serves as its id, and following columns:\n - 'building_id' (int, non-missing, corresponds to index of 'buildings' table)\n - 'unit_num' (int, non-missing, unique within building)\n \n Results\n -------\n - adds following column to 'households' table:\n - 'unit_id' (int, '-1'-filled, corresponds to index of 'residential_units' table)\n - adds following column to 'residential_units' table:\n - 'vacant_units' (int, 0 or 1, computed)\n - adds a broadcast linking 'households' to 'residential_units'\n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('households',\n ColumnSpec('unit_id', registered=False),\n ColumnSpec('building_id', foreign_key='buildings.building_id', missing_val_coding=-1)),\n TableSpec('residential_units',\n ColumnSpec('unit_id', primary_key=True),\n ColumnSpec('building_id', foreign_key='buildings.building_id', missing=False),\n ColumnSpec('unit_num', min=0, missing=False))))\n \n hh = households.to_frame(households.local_columns)\n units = residential_units.to_frame(['building_id', 'unit_num'])\n \n # This code block is from Fletcher\n unit_lookup = units.reset_index().set_index(['building_id', 'unit_num'])\n hh = hh.sort_values(by=['building_id'], ascending=True)\n building_counts = hh.building_id.value_counts().sort_index()\n hh['unit_num'] = np.concatenate([np.arange(i) for i in building_counts.values])\n unplaced = hh[hh.building_id == -1].index\n placed = hh[hh.building_id != -1].index\n indexes = [tuple(t) for t in hh.loc[placed, ['building_id', 'unit_num']].values]\n hh.loc[placed, 'unit_id'] = unit_lookup.loc[indexes].unit_id.values\n hh.loc[unplaced, 'unit_id'] = -1\n orca.add_table('households', hh)\n \n @orca.column('residential_units', 'vacant_units')\n def vacant_units(residential_units, households):\n return residential_units.num_units.sub(\n households.unit_id[households.unit_id != -1].value_counts(), fill_value=0)\n\n orca.broadcast('residential_units', 'households', cast_index=True, onto_on='unit_id')\n\n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('households',\n ColumnSpec('unit_id', foreign_key='residential_units.unit_id', missing_val_coding=-1)),\n TableSpec('residential_units',\n ColumnSpec('vacant_units', min=0, max=1))))\n return\n\n\[email protected]('ual_assign_tenure_to_units')\ndef ual_assign_tenure_to_units(residential_units, households):\n \"\"\"\n This initialization step assigns tenure to residential units, based on the 'hownrent'\n attribute of the households occupying them. (Tenure for unoccupied units is assigned\n randomly.)\n \n Data expections\n ---------------\n - 'residential_units' table has NO column 'hownrent'\n - 'households' table has following columns: \n - 'hownrent' (int, missing values ok) \n - 'unit_id' (int, '-1'-filled, corresponds to index of 'residential_units' table)\n \n Results\n -------\n - adds following column to 'residential_units' table:\n - 'hownrent' (int in range [1,2], non-missing)\n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('residential_units',\n ColumnSpec('hownrent', registered=False)),\n TableSpec('households',\n ColumnSpec('hownrent', min=1, max=2, missing_val_coding=np.nan),\n ColumnSpec('unit_id', foreign_key='residential_units.unit_id', missing_val_coding=-1))))\n \n units = residential_units.to_frame(residential_units.local_columns)\n hh = households.to_frame(['hownrent', 'unit_id'])\n \n # 'Hownrent' is a PUMS field where 1=owns, 2=rents. Note that there's also a field\n # in the MTC households table called 'tenure', with min=1, max=4, mean=2. Not sure \n # where this comes from or what the values indicate.\n \n units['hownrent'] = np.nan\n own = hh[(hh.hownrent == 1) & (hh.unit_id != -1)].unit_id.values\n rent = hh[(hh.hownrent == 2) & (hh.unit_id != -1)].unit_id.values\n units.loc[own, 'hownrent'] = 1\n units.loc[rent, 'hownrent'] = 2\n \n print \"Initial unit tenure assignment: %d%% owner occupied, %d%% unfilled\" % \\\n (round(len(units[units.hownrent == 1])*100/len(units[units.hownrent.notnull()])), \\\n round(len(units[units.hownrent.isnull()])*100/len(units)))\n \n # Fill remaining units with random tenure assignment \n # TO DO: Make this weighted by existing allocation, rather than 50/50\n unfilled = units[units.hownrent.isnull()].index\n units.loc[unfilled, 'hownrent'] = np.random.randint(1, 3, len(unfilled))\n \n orca.add_table('residential_units', units)\n \n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('residential_units',\n ColumnSpec('hownrent', min=1, max=2, missing_val_coding=np.nan))))\n return\n\n\[email protected]('ual_load_rental_listings')\ndef ual_load_rental_listings():\n \"\"\"\n This initialization step loads the Craigslist rental listings data for hedonic \n estimation. Not needed for simulation.\n \n Data expectations\n -----------------\n - injectable 'net' that can provide 'node_id' and 'tmnode_id' from lat-lon coordinates\n - some way to get 'zone_id' (currently using parcels table)\n - 'sfbay_craigslist.csv' file\n \n Results\n -------\n - creates new 'craigslist' table with the following columns:\n - 'price' (int, may be missing)\n - 'sqft_per_unit' (int, may be missing)\n - 'price_per_sqft' (float, may be missing)\n - 'bedrooms' (int, may be missing)\n - 'neighborhood' (string, ''-filled)\n - 'node_id' (int, may be missing, corresponds to index of 'nodes' table)\n - 'tmnode_id' (int, may be missing, corresponds to index of 'tmnodes' tables)\n - 'zone_id' (int, may be missing, corresponds to index of 'zones' table)\n - adds broadcasts linking 'craigslist' to 'nodes', 'tmnodes', 'logsums'\n \"\"\"\n @orca.table('craigslist', cache=True)\n def craigslist():\n df = pd.read_csv(os.path.join(misc.data_dir(), \"sfbay_craigslist.csv\"))\n net = orca.get_injectable('net')\n df['node_id'] = net['walk'].get_node_ids(df['lon'], df['lat'])\n df['tmnode_id'] = net['drive'].get_node_ids(df['lon'], df['lat'])\n # fill nans -- missing bedrooms are mostly studio apts\n df['bedrooms'] = df.bedrooms.replace(np.nan, 1)\n df['neighborhood'] = df.neighborhood.replace(np.nan, '')\n return df\n\n # Is it simpler to just do this in the table definition since it is never updated?\n @orca.column('craigslist', 'zone_id', cache=True)\n def zone_id(craigslist, parcels):\n return misc.reindex(parcels.zone_id, craigslist.node_id)\n \n orca.broadcast('nodes', 'craigslist', cast_index=True, onto_on='node_id')\n orca.broadcast('tmnodes', 'craigslist', cast_index=True, onto_on='tmnode_id')\n orca.broadcast('logsums', 'craigslist', cast_index=True, onto_on='zone_id')\n return\n\n\n\n##########################################################################################\n#\n# (2) UAL ORCA STEPS FOR DATA MODEL MAINTENANCE\n#\n##########################################################################################\n\n\[email protected]('ual_reconcile_placed_households')\ndef reconcile_placed_households(households, residential_units):\n \"\"\"\n This data maintenance step keeps the building/unit/household correspondence up to \n date by reconciling placed households.\n \n In the current data model, households should have both a 'building_id' and 'unit_id'\n when they have been matched with housing. But the existing HLCM models assign only\n a 'unit_id', so this model step updates the building id's accordingly. \n \n Data expectations\n -----------------\n - 'households' table has the following columns:\n - index 'household_id'\n - 'unit_id' (int, '-1'-filled)\n - 'building_id' (int, '-1'-filled)\n - 'residential_units' table has the following columns:\n - index 'unit_id'\n - 'building_id' (int, non-missing, corresponds to index of the 'buildings' table)\n \n Results\n -------\n - updates the 'households' table:\n - 'building_id' updated where it was -1 but 'unit_id' wasn't\n \"\"\"\n \n # Verify initial data characteristics\n\n # ot.assert_orca_spec(OrcaSpec('',\n # TableSpec('households',\n # ColumnSpec('household_id', primary_key=True),\n # ColumnSpec('unit_id', foreign_key='residential_units.unit_id', missing_val_coding=-1),\n # ColumnSpec('building_id', foreign_key='buildings.building_id', missing_val_coding=-1)),\n # TableSpec('residential_units',\n # ColumnSpec('unit_id', primary_key=True),\n # ColumnSpec('building_id', foreign_key='buildings.building_id', missing=False))))\n\n hh = households.to_frame(['unit_id', 'building_id'])\n hh.index.rename('household_id',inplace=True)\n hh = hh.reset_index()\n print \"hh columns: %s\" % hh.columns\n \n # hh.index.name = 'household_id'\n units = residential_units.to_frame(['building_id']).reset_index()\n \n # Filter for households missing a 'building_id' but not a 'unit_id'\n hh = hh[(hh.building_id == -1) & (hh.unit_id != -1)]\n \n # Join building id's to the filtered households, using mapping from the units table\n hh = hh.drop('building_id', axis=1)\n hh = pd.merge(hh, units, on='unit_id', how='left').set_index('household_id')\n print \"hh index.names: %s\" % hh.index.names\n \n print \"%d movers updated\" % len(hh)\n households.update_col_from_series('building_id', hh.building_id, cast=True)\n \n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('households',\n ColumnSpec('building_id', foreign_key='buildings.building_id', missing_val_coding=-1))))\n return\n \n\[email protected]('ual_reconcile_unplaced_households')\ndef reconcile_unplaced_households(households):\n \"\"\"\n This data maintenance step keeps the building/unit/household correspondence up to \n date by reconciling unplaced households.\n \n In the current data model, households should have both a 'building_id' and 'unit_id'\n of -1 when they are not matched with housing. But sometimes only of these is set when\n households are created or unplaced. If households have been unplaced from buildings, \n this model step unplaces them from units as well. Or if they have been unplaced from \n units, it unplaces them from buildings. \n \n Data expectations\n -----------------\n - 'households' table has an index, and these columns:\n - 'unit_id' (int, '-1'-filled)\n - 'building_id' (int, '-1'-filled)\n \n Results\n -------\n - updates the 'households' table:\n - 'unit_id' = 'building_id' = -1 for the superset of rows where either column\n initially had this vaue \n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('households',\n ColumnSpec('unit_id', numeric=True, missing_val_coding=-1),\n ColumnSpec('building_id', numeric=True, missing_val_coding=-1))))\n\n def _print_status():\n print \"Households not in a unit: %d\" % (households.unit_id == -1).sum()\n print \"Househing missing a unit: %d\" % households.unit_id.isnull().sum()\n print \"Households not in a building: %d\" % (households.building_id == -1).sum()\n print \"Househing missing a building: %d\" % households.building_id.isnull().sum()\n\n _print_status()\n print \"Reconciling unplaced households...\"\n hh = households.to_frame(['building_id', 'unit_id'])\n \n # Get indexes of households unplaced in buildings or in units\n bldg_unplaced = pd.Series(-1, index=hh[hh.building_id == -1].index)\n unit_unplaced = pd.Series(-1, index=hh[hh.unit_id == -1].index)\n \n # Update those households to be fully unplaced\n households.update_col_from_series('building_id', unit_unplaced, cast=True)\n households.update_col_from_series('unit_id', bldg_unplaced, cast=True)\n _print_status()\n\n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('households',\n ColumnSpec('unit_id', foreign_key='residential_units.unit_id', missing_val_coding=-1),\n ColumnSpec('building_id', foreign_key='buildings.building_id', missing_val_coding=-1))))\n return\n \n\[email protected]('ual_update_building_residential_price')\ndef ual_update_building_residential_price(buildings, residential_units, ual_settings):\n \"\"\"\n This data maintenance step updates the prices in the buildings table to reflect \n changes to the unit-level prices. This allows model steps like 'price_vars' and \n 'feasibility' to read directly from the buildings table. \n \n We currently set the building price per square foot to be the higher of the average\n (a) unit price per square foot or (b) unit price-adjusted rent per square foot.\n \n Data expectations\n -----------------\n - 'residential_units' table has following columns:\n - 'unit_residential_price' (float, 0-filled)\n - 'unit_residential_rent' (float, 0-filled)\n - 'building_id' (int, non-missing, corresponds to index of 'buildings' table)\n - 'buildings' table has following columns:\n - index that serves as its id\n - 'residential_price' (float, 0-filled)\n - 'ual_settings' injectable has a 'cap_rate' (float, range 0 to 1)\n \n Results\n -------\n - updates the 'buildings' table:\n - 'residential_price' = max avg of unit prices or rents \n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('residential_units',\n ColumnSpec('unit_residential_price', min=0),\n ColumnSpec('unit_residential_rent', min=0),\n ColumnSpec('building_id', foreign_key='buildings.building_id', missing=False)),\n TableSpec('buildings',\n ColumnSpec('building_id', primary_key=True),\n ColumnSpec('residential_price', min=0)),\n InjectableSpec('ual_settings', min=0, max=1)))\n\n cols = ['building_id', 'unit_residential_price', 'unit_residential_rent']\n means = residential_units.to_frame(cols).groupby(['building_id']).mean()\n \n # Convert monthly rent to equivalent sale price\n cap_rate = ual_settings.get('cap_rate')\n means['unit_residential_rent'] = means.unit_residential_rent * 12 / cap_rate\n \n # Calculate max of price or rent, by building\n means['max_potential'] = means.max(axis=1)\n print means.describe()\n \n # Update the buildings table\n buildings.update_col_from_series('residential_price', means.max_potential, cast=True)\n \n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('buildings',\n ColumnSpec('residential_price', min=0))))\n return\n \n\[email protected]('ual_remove_old_units')\ndef ual_remove_old_units(buildings, residential_units):\n \"\"\"\n This data maintenance step removes units whose building_ids no longer exist.\n \n If new buildings have been created that re-use prior building_ids, we would fail to\n remove the associated units. Hopefully new buidlings do not duplicate prior ids,\n but this needs to be verified!\n\n Data expectations\n -----------------\n - 'buildings' table has an index that serves as its identifier\n - 'residential_units' table has a column 'building_id' corresponding to the index\n of the 'buildings' table\n \n Results\n -------\n - removes rows from the 'residential_units' table if their 'building_id' no longer\n exists in the 'buildings' table\n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('buildings',\n ColumnSpec('building_id', primary_key=True)),\n TableSpec('residential_units',\n ColumnSpec('building_id', numeric=True))))\n \n units = residential_units.to_frame(residential_units.local_columns)\n current_units = units[units.building_id.isin(buildings.index)]\n \n print \"Removing %d residential units from %d buildings that no longer exist\" % \\\n ((len(units) - len(current_units)), \\\n (len(units.groupby('building_id')) - len(current_units.groupby('building_id'))))\n \n orca.add_table('residential_units', current_units) \n\n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('residential_units',\n ColumnSpec('building_id', foreign_key='buildings.building_id'))))\n return\n\n\[email protected]('ual_initialize_new_units')\ndef ual_initialize_new_units(buildings, residential_units):\n \"\"\"\n This data maintenance step initializes units for buildings that have been newly\n created, conforming to the data requirements of the 'residential_units' table.\n \n Data expectations\n -----------------\n - 'buildings' table has the following columns:\n - index that serves as its identifier\n - 'residential_units' (int, count of units in building)\n - 'residential_units' table has the following columns:\n - index named 'unit_id' that serves as its identifier\n - 'building_id' corresponding to the index of the 'buildings' table\n \n Results\n -------\n - extends the 'residential_units' table, following the same schema as the\n 'ual_initialize_residential_units' model step\n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('buildings',\n ColumnSpec('building_id', primary_key=True),\n ColumnSpec('residential_units', min=0)),\n TableSpec('residential_units',\n ColumnSpec('unit_id', primary_key=True),\n ColumnSpec('building_id', foreign_key='buildings.building_id'))))\n \n old_units = residential_units.to_frame(residential_units.local_columns)\n bldgs = buildings.to_frame(['residential_units'])\n \n # Filter for residential buildings not currently represented in the units table\n bldgs = bldgs[bldgs.residential_units > 0]\n new_bldgs = bldgs[~bldgs.index.isin(old_units.building_id)]\n \n # Create new units, merge them, and update the table\n new_units = _ual_create_empty_units(new_bldgs)\n all_units = dev.merge(old_units, new_units)\n all_units.index.name = 'unit_id'\n \n print \"Creating %d residential units for %d new buildings\" % \\\n (len(new_units), len(new_bldgs))\n \n orca.add_table('residential_units', all_units)\n\n # Verify final data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('residential_units',\n ColumnSpec('unit_id', primary_key=True))))\n return\n \n\[email protected]('ual_assign_tenure_to_new_units')\ndef ual_assign_tenure_to_new_units(residential_units, ual_settings):\n \"\"\"\n This data maintenance step assigns tenure to new residential units. Tenure is\n determined by comparing the fitted sale price and fitted rent from the hedonic models,\n with rents adjusted to price-equivalent terms using a cap rate.\n\n We may want to make this more sophisticated in the future, or at least stochastic. \n Also, it might be better to do this assignment based on the zonal average prices and \n rents following supply/demand equilibration.\n \n Data expectations\n -----------------\n - 'residential_units' table has the following columns:\n - 'hownrent' (int in range 1 to 2, may be missing)\n - 'unit_residential_price' (float, non-missing)\n - 'unit_residential_rent' (float, non-missing)\n \n Results\n -------\n - fills missing values of 'hownrent'\n \"\"\"\n \n # Verify initial data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('residential_units',\n ColumnSpec('hownrent', min=1, max=2, missing_val_coding=np.nan),\n ColumnSpec('unit_residential_price', min=0),\n ColumnSpec('unit_residential_rent', min=0))))\n\n cols = ['hownrent', 'unit_residential_price', 'unit_residential_rent']\n units = residential_units.to_frame(cols)\n \n # Filter for units that are missing a tenure assignment\n units = units[~units.hownrent.isin([1,2])]\n \n # Convert monthly rent to equivalent sale price\n cap_rate = ual_settings.get('cap_rate')\n units['unit_residential_rent'] = units.unit_residential_rent * 12 / cap_rate\n \n # Assign tenure based on higher of price or adjusted rent\n rental_units = (units.unit_residential_rent > units.unit_residential_price)\n units.loc[~rental_units, 'hownrent'] = 1\n units.loc[rental_units, 'hownrent'] = 2\n \n print \"Adding tenure assignment to %d new residential units\" % len(units)\n print units.describe()\n\n residential_units.update_col_from_series('hownrent', units.hownrent, cast=True)\n return\n \n\[email protected]('ual_save_intermediate_tables')\ndef ual_save_intermediate_tables(households, buildings, parcels, jobs, zones, year):\n \"\"\"\n This orca step saves intermediate versions of data tables, for developing \n visualization proofs of concept. \n \"\"\"\n filename = 'baus_' + str(year) + '.h5'\n for table in [households, buildings, parcels, jobs, zones]:\n table.to_frame().to_hdf(filename, table.name)\n\n\n\n##########################################################################################\n#\n# (3) UAL ORCA STEPS FOR SIMULATION LOGIC\n#\n##########################################################################################\n\n\[email protected]('ual_rrh_estimate')\ndef ual_rrh_estimate(craigslist, aggregations):\n \"\"\"\n This model step estimates a residental rental hedonic using craigslist listings.\n \n Data expectations\n -----------------\n - 'craigslist' table and others, as defined in the yaml config\n \"\"\"\n return utils.hedonic_estimate(cfg = 'ual_rrh.yaml', \n tbl = craigslist, \n join_tbls = aggregations)\n\n\ndef _mtc_clip(table, col_name, settings, price_scale=1):\n # This is included to match the MTC hedonic model steps, with 'price_scale' \n # adjusting the clip bounds from price to monthly rent if needed.\n \n if \"rsh_simulate\" in settings:\n low = float(settings[\"rsh_simulate\"][\"low\"]) * price_scale\n high = float(settings[\"rsh_simulate\"][\"high\"]) * price_scale\n table.update_col(col_name, table[col_name].clip(low, high))\n print \"Clipping produces\\n\", table[col_name].describe()\n\n\[email protected]('ual_rsh_simulate')\ndef ual_rsh_simulate(residential_units, unit_aggregations, settings):\n \"\"\"\n This uses the MTC's model specification from rsh.yaml, but generates unit-level\n price predictions rather than building-level.\n \n Data expectations\n -----------------\n - tk\n \"\"\"\n utils.hedonic_simulate(cfg = 'rsh.yaml', \n tbl = residential_units, \n join_tbls = unit_aggregations, \n out_fname = 'unit_residential_price',\n cast=True)\n \n _mtc_clip(residential_units, 'unit_residential_price', settings)\n return\n\n\[email protected]('ual_rrh_simulate')\ndef ual_rrh_simulate(residential_units, unit_aggregations, settings):\n \"\"\"\n This uses an altered hedonic specification to generate unit-level rent predictions.\n \n Data expectations\n -----------------\n - tk\n \"\"\"\n utils.hedonic_simulate(cfg = 'ual_rrh.yaml', \n tbl = residential_units,\n join_tbls = unit_aggregations, \n out_fname = 'unit_residential_rent',\n cast=True)\n\n _mtc_clip(residential_units, 'unit_residential_rent', settings, price_scale=0.05/12)\n return\n\n\[email protected]('ual_households_relocation')\ndef ual_households_relocation(households, ual_settings):\n \"\"\"\n This model step randomly assigns households for relocation, using probabilities\n that depend on their tenure status.\n \n Data expectations\n -----------------\n - 'households' table has following columns:\n - 'hownrent' (int in range [1,2], non-missing)\n - 'building_id' (int, '-1'-filled, corredponds to index of 'buildings' table\n - 'unit_id' (int, '-1'-filled, corresponds to index of 'residential_units' table\n - 'ual_settings.yaml' has:\n - 'relocation_rates' as specified in RelocationModel() documentation\n \n Results\n -------\n - assigns households for relocation by setting their 'building_id' and 'unit_id' to -1\n \"\"\"\n \n # Verify expected data characteristics\n ot.assert_orca_spec(OrcaSpec('',\n TableSpec('households',\n ColumnSpec('hownrent', numeric=True, min=1, max=2, missing=False),\n ColumnSpec('building_id', numeric=True, missing_val_coding=-1),\n ColumnSpec('unit_id', numeric=True, missing_val_coding=-1))))\n \n rates = pd.DataFrame.from_dict(ual_settings['relocation_rates'])\n\n print \"Total agents: %d\" % len(households)\n print \"Total currently unplaced: %d\" % (households.unit_id == -1).sum()\n print \"Assigning for relocation...\"\n \n # Initialize model, choose movers, and un-place them from buildings and units\n m = RelocationModel(rates)\n mover_ids = m.find_movers(households.to_frame(['unit_id', 'hownrent']))\n households.update_col_from_series('building_id', pd.Series(-1, index=mover_ids),cast=True)\n households.update_col_from_series('unit_id', pd.Series(-1, index=mover_ids),cast=True)\n \n print \"Total currently unplaced: %d\" % (households.unit_id == -1).sum()\n return\n\n\[email protected]('ual_hlcm_owner_estimate')\ndef ual_hlcm_owner_estimate(households, residential_units, unit_aggregations):\n return utils.lcm_estimate(cfg = \"ual_hlcm_owner.yaml\",\n choosers = households, \n chosen_fname = \"unit_id\",\n buildings = residential_units, \n join_tbls = unit_aggregations)\n\n\[email protected]('ual_hlcm_renter_estimate')\ndef ual_hlcm_renter_estimate(households, residential_units, unit_aggregations):\n return utils.lcm_estimate(cfg = \"ual_hlcm_renter.yaml\",\n choosers = households, \n chosen_fname = \"unit_id\",\n buildings = residential_units, \n join_tbls = unit_aggregations)\n\n\[email protected]('ual_hlcm_owner_simulate')\ndef ual_hlcm_owner_simulate(households, residential_units, unit_aggregations, ual_settings):\n \n # Note that the submarket id (zone_id) needs to be in the table of alternatives,\n # for supply/demand equilibration, and needs to NOT be in the choosers table, to\n # avoid conflicting when the tables are joined\n \n return utils.lcm_simulate(cfg = 'ual_hlcm_owner.yaml', \n choosers = households, \n buildings = residential_units,\n join_tbls = unit_aggregations,\n out_fname = 'unit_id', \n supply_fname = 'num_units',\n vacant_fname = 'vacant_units',\n enable_supply_correction = \n ual_settings.get('price_equilibration', None),\n cast=True)\n\n\[email protected]('ual_hlcm_renter_simulate')\ndef ual_hlcm_renter_simulate(households, residential_units, unit_aggregations, ual_settings):\n return utils.lcm_simulate(cfg = 'ual_hlcm_renter.yaml', \n choosers = households, \n buildings = residential_units,\n join_tbls = unit_aggregations,\n out_fname = 'unit_id', \n supply_fname = 'num_units',\n vacant_fname = 'vacant_units',\n enable_supply_correction = \n ual_settings.get('rent_equilibration', None),\n cast=True)\n\n\n\n\n\n", "id": "7052552", "language": "Python", "matching_score": 4.258811950683594, "max_stars_count": 0, "path": "bayarea_urbansim/baus/ual.py" }, { "content": "import os\nimport sys\nimport time\nimport traceback\nfrom baus import models\nfrom baus import ual\nimport pandas as pd\nimport orca\nimport socket\nimport warnings\nfrom baus.utils import compare_summary\n\nwarnings.filterwarnings(\"ignore\")\n\nargs = sys.argv[1:]\n\n# Suppress scientific notation in pandas output\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\nSLACK = MAPS = \"URBANSIM_SLACK\" in os.environ\nLOGS = True\nINTERACT = False\nSCENARIO = None\nMODE = \"ual_simulation\"\nS3 = False\nEVERY_NTH_YEAR = 5\nCURRENT_COMMIT = os.popen('git rev-parse HEAD').read()\nCOMPARE_TO_NO_PROJECT = True\nNO_PROJECT = 611\nIN_YEAR, OUT_YEAR = 2010, 2011\n\nLAST_KNOWN_GOOD_RUNS = {\n \"0\": 1057,\n \"1\": 1058,\n \"2\": 1059,\n \"3\": 1060,\n \"4\": 1059,\n \"5\": 1059\n}\n\norca.add_injectable(\"years_per_iter\", EVERY_NTH_YEAR)\n\nif len(args) and args[0] == \"-i\":\n SLACK = MAPS = LOGS = False\n INTERACT = True\n\nif len(args) and args[0] == \"-s\":\n orca.add_injectable(\"scenario\", args[1])\n\nSCENARIO = orca.get_injectable(\"scenario\")\n\nif INTERACT:\n import code\n code.interact(local=locals())\n sys.exit()\n\nrun_num = orca.get_injectable(\"run_number\")\n\nif LOGS:\n print '***The Standard stream is being written to /runs/run{0}.log***'\\\n .format(run_num)\n sys.stdout = sys.stderr = open(\"runs/run%d.log\" % run_num, 'w')\n\nif SLACK:\n from slacker import Slacker\n slack = Slacker(os.environ[\"SLACK_TOKEN\"])\n host = socket.gethostname()\n\n\ndef get_simulation_models(SCENARIO):\n\n models = [\n \"neighborhood_vars\", # local accessibility vars\n \"regional_vars\", # regional accessibility vars\n\n \"rsh_simulate\", # residential sales hedonic\n \"nrh_simulate\", # non-residential rent hedonic\n\n \"households_relocation\",\n \"households_transition\",\n\n \"jobs_relocation\",\n \"jobs_transition\",\n\n \"price_vars\",\n\n \"scheduled_development_events\", # scheduled buildings additions\n\n \"lump_sum_accounts\", # run the subsidized acct system\n \"subsidized_residential_developer_lump_sum_accts\",\n\n \"alt_feasibility\",\n\n \"residential_developer\",\n \"developer_reprocess\",\n \"retail_developer\",\n \"office_developer\",\n\n \"hlcm_simulate\", # put these last so they don't get\n \"elcm_simulate\", # displaced by new dev\n\n \"topsheet\",\n \"parcel_summary\",\n \"building_summary\",\n \"diagnostic_output\",\n \"geographic_summary\",\n \"travel_model_output\"\n ]\n\n # calculate VMT taxes\n if SCENARIO in [\"1\", \"3\", \"4\"]:\n # calculate the vmt fees at the end of the year\n\n # note that you might also have to change the fees that get\n # imposed - look for fees_per_unit column in variables.py\n if SCENARIO == \"3\":\n orca.get_injectable(\"settings\")[\"vmt_fee_res\"] = True\n if SCENARIO == \"1\":\n orca.get_injectable(\"settings\")[\"vmt_fee_com\"] = True\n if SCENARIO == \"4\":\n orca.get_injectable(\"settings\")[\"vmt_fee_com\"] = True\n models.insert(models.index(\"diagnostic_output\"),\n \"calculate_vmt_fees\")\n models.insert(models.index(\"alt_feasibility\"),\n \"subsidized_residential_feasibility\")\n models.insert(models.index(\"alt_feasibility\"),\n \"subsidized_residential_developer_vmt\")\n\n return models\n\n\ndef run_models(MODE, SCENARIO):\n\n orca.run([\"correct_baseyear_data\"])\n\n if MODE == \"simulation\":\n\n years_to_run = range(IN_YEAR, OUT_YEAR+1, EVERY_NTH_YEAR)\n models = get_simulation_models(SCENARIO)\n orca.run(models, iter_vars=years_to_run)\n\n elif MODE == \"ual_simulation\":\n\n years_to_run = range(IN_YEAR, OUT_YEAR+1, EVERY_NTH_YEAR)\n models = get_simulation_models(SCENARIO)\n\n # Initialization steps\n orca.run([\n \"ual_initialize_residential_units\",\n \"ual_match_households_to_units\",\n \"ual_assign_tenure_to_units\",\n# \"ual_load_rental_listings\", # required to estimate rental hedonic\n ])\n\n # Estimation steps\n orca.run([\n# \"neighborhood_vars\", # street network accessibility\n# \"regional_vars\", # road network accessibility\n# \n# \"ual_rrh_estimate\", # estimate residential rental hedonic\n# \n# \"ual_hlcm_owner_estimate\", # estimate location choice for owners\n# \"ual_hlcm_renter_estimate\", # estimate location choice for renters\n\n ])\n\n # Simulation steps\n orca.run([\n\n \"neighborhood_vars\", # street network accessibility\n \"regional_vars\", # road network accessibility\n \n \"ual_rsh_simulate\", # residential sales hedonic for units\n \"ual_rrh_simulate\", # residential rental hedonic for units\n \"nrh_simulate\", # non-residential rent hedonic\n \n \"ual_assign_tenure_to_new_units\", # (based on higher of predicted price or rent)\n \n \"ual_households_relocation\", # uses conditional probabilities\n \"households_transition\",\n \"ual_reconcile_unplaced_households\", # update building/unit/hh correspondence\n\n \"ual_hlcm_owner_simulate\", # allocate owners to vacant owner-occupied units\n \"ual_hlcm_renter_simulate\", # allocate renters to vacant rental units\n \"ual_reconcile_placed_households\", # update building/unit/hh correspondence\n\n \"jobs_relocation\",\n \"jobs_transition\",\n \"elcm_simulate\",\n\n \"ual_update_building_residential_price\", # apply unit prices to buildings \n \"price_vars\",\n \"scheduled_development_events\",\n \"alt_feasibility\",\n \n \"residential_developer\",\n \"developer_reprocess\",\n \"retail_developer\",\n \"office_developer\",\n \n \"ual_remove_old_units\", # (for buildings that were removed)\n \"ual_initialize_new_units\", # set up units for new residential buildings\n \"ual_reconcile_unplaced_households\", # update building/unit/hh correspondence\n\n# \"ual_save_intermediate_tables\", # saves output for visualization\n \n \"topsheet\",\n \"diagnostic_output\",\n \"geographic_summary\",\n \"travel_model_output\"\n\n ], iter_vars=years_to_run)\n\n elif MODE == \"estimation\":\n\n orca.run([\n\n \"neighborhood_vars\", # local accessibility variables\n \"regional_vars\", # regional accessibility variables\n \"rsh_estimate\", # residential sales hedonic\n \"nrh_estimate\", # non-res rent hedonic\n \"rsh_simulate\",\n \"nrh_simulate\",\n \"hlcm_estimate\", # household lcm\n \"elcm_estimate\", # employment lcm\n\n ], iter_vars=[2010])\n\n elif MODE == \"baseyearsim\":\n\n orca.run([\n\n \"neighborhood_vars\", # local accessibility vars\n \"regional_vars\", # regional accessibility vars\n\n \"rsh_simulate\", # residential sales hedonic\n\n \"households_transition\",\n\n \"hlcm_simulate\", # put these last so they don't get\n\n \"geographic_summary\",\n \"travel_model_output\"\n\n ], iter_vars=[2010])\n\n for geog_name in [\"juris\", \"pda\", \"superdistrict\", \"taz\"]:\n os.rename(\n \"runs/run%d_%s_summaries_2010.csv\" % (run_num, geog_name),\n \"output/baseyear_%s_summaries_2010.csv\" % geog_name)\n\n elif MODE == \"feasibility\":\n\n orca.run([\n\n \"neighborhood_vars\", # local accessibility vars\n \"regional_vars\", # regional accessibility vars\n\n \"rsh_simulate\", # residential sales hedonic\n \"nrh_simulate\", # non-residential rent hedonic\n\n \"price_vars\",\n \"subsidized_residential_feasibility\"\n\n ], iter_vars=[2010])\n\n # the whole point of this is to get the feasibility dataframe\n # for debugging\n df = orca.get_table(\"feasibility\").to_frame()\n df = df.stack(level=0).reset_index(level=1, drop=True)\n df.to_csv(\"output/feasibility.csv\")\n\n else:\n\n raise \"Invalid mode\"\n\n\nprint \"Started\", time.ctime()\nprint \"Current Commit : \", CURRENT_COMMIT.rstrip()\nprint \"Current Scenario : \", orca.get_injectable('scenario').rstrip()\n\n\nif SLACK:\n slack.chat.post_message(\n '#sim_updates',\n 'Starting simulation %d on host %s (scenario: %s)' %\n (run_num, host, SCENARIO), as_user=True)\n\ntry:\n\n run_models(MODE, SCENARIO)\n\nexcept Exception as e:\n print traceback.print_exc()\n if SLACK:\n slack.chat.post_message(\n '#sim_updates',\n 'DANG! Simulation failed for %d on host %s'\n % (run_num, host), as_user=True)\n else:\n raise e\n sys.exit(0)\n\nprint \"Finished\", time.ctime()\n\nif MAPS:\n\n from urbansim_explorer import sim_explorer as se\n se.start(\n 'runs/run%d_simulation_output.json' % run_num,\n 'runs/run%d_parcel_output.csv' % run_num,\n write_static_file='/var/www/html/sim_explorer%d.html' % run_num\n )\n\nif SLACK:\n slack.chat.post_message(\n '#sim_updates',\n 'Completed simulation %d on host %s' % (run_num, host), as_user=True)\n\n slack.chat.post_message(\n '#sim_updates',\n 'UrbanSim explorer is available at ' +\n 'http://urbanforecast.com/sim_explorer%d.html' % run_num, as_user=True)\n\n slack.chat.post_message(\n '#sim_updates',\n 'Final topsheet is available at ' +\n 'http://urbanforecast.com/runs/run%d_topsheet_2040.log' % run_num,\n as_user=True)\n\nif MODE == \"simulation\":\n # compute and write the difference report at the superdistrict level\n prev_run = LAST_KNOWN_GOOD_RUNS[SCENARIO]\n # fetch the previous run off of the internet for comparison - the \"last\n # known good run\" should always be available on EC2\n df1 = pd.read_csv(\"http://urbanforecast.com/runs/run%d_superdistrict_summaries_2040.csv\" % prev_run)\n df1 = df1.set_index(df1.columns[0]).sort_index()\n\n df2 = pd.read_csv(\"runs/run%d_superdistrict_summaries_2040.csv\" % run_num)\n df2 = df2.set_index(df2.columns[0]).sort_index()\n\n supnames = \\\n pd.read_csv(\"data/superdistricts.csv\", index_col=\"number\").name\n\n summary = compare_summary(df1, df2, supnames)\n with open(\"runs/run%d_difference_report.log\" % run_num, \"w\") as f:\n f.write(summary)\n\nif SLACK and MODE == \"simulation\":\n\n if len(summary.strip()) != 0:\n sum_lines = len(summary.strip().split(\"\\n\"))\n slack.chat.post_message(\n '#sim_updates',\n ('Difference report is available at ' +\n 'http://urbanforecast.com/runs/run%d_difference_report.log ' +\n '- %d line(s)') % (run_num, sum_lines),\n as_user=True)\n else:\n slack.chat.post_message(\n '#sim_updates', \"No differences with reference run.\", as_user=True)\n\nif S3:\n try:\n os.system(\n 'ls runs/run%d_* ' % run_num +\n '| xargs -I file aws s3 cp file ' +\n 's3://bayarea-urbansim-results')\n except Exception as e:\n raise e\n sys.exit(0)\n", "id": "1154129", "language": "Python", "matching_score": 3.5406434535980225, "max_stars_count": 0, "path": "bayarea_urbansim/run.py" }, { "content": "import sys\nimport time\nimport orca\nimport pandas as pd\nimport numpy as np\nfrom urbansim import accounts\nfrom urbansim_defaults import utils\nfrom cStringIO import StringIO\nfrom urbansim.utils import misc\n\n\[email protected](\"coffer\", cache=True)\ndef coffer(settings):\n d = {\n \"vmt_fee_acct\": accounts.Account(\"vmt_fee_acct\")\n }\n\n for key, acct in settings[\"acct_settings\"][\"lump_sum_accounts\"].items():\n d[acct[\"name\"]] = accounts.Account(acct[\"name\"])\n\n return d\n\n\[email protected](\"acct_settings\", cache=True)\ndef acct_settings(settings):\n return settings[\"acct_settings\"]\n\n\ndef tax_buildings(buildings, acct_settings, account, year):\n \"\"\"\n Tax buildings and add the tax to an Account object by subaccount\n\n Parameters\n ----------\n buildings : DataFrameWrapper\n The standard buildings DataFrameWrapper\n acct_settings : Dict\n A dictionary of settings to parameterize the model. Needs these keys:\n sending_buildings_subaccount_def - maps buildings to subaccounts\n sending_buildings_filter - filter for eligible buildings\n sending_buildings_tax - a Pandas eval expression to compute the tax\n accounts : Account\n The Account object to use for subsidization\n year : int\n The current simulation year (will be added as metadata)\n\n Returns\n -------\n Nothing\n \"\"\"\n buildings = buildings.query(acct_settings[\"sending_buildings_filter\"])\n\n tax = buildings.eval(acct_settings[\"sending_buildings_tax\"])\n\n subaccounts = buildings[acct_settings[\"sending_buildings_subaccount_def\"]]\n\n tot_tax_by_subaccount = tax.groupby(subaccounts).sum()\n\n for subacct, amt in tot_tax_by_subaccount.iteritems():\n metadata = {\n \"description\": \"Collecting property tax\",\n \"year\": year\n }\n account.add_transaction(amt, subaccount=subacct,\n metadata=metadata)\n\n print \"Sample rows from property tax accts:\"\n print account.to_frame().\\\n sort(columns=[\"amount\"], ascending=False).head(7)\n\n\[email protected]()\ndef lump_sum_accounts(settings, year, buildings, coffer,\n summary, years_per_iter, scenario):\n\n s = settings[\"acct_settings\"][\"lump_sum_accounts\"]\n\n for key, acct in s.items():\n\n if scenario not in acct[\"enable_in_scenarios\"]:\n continue\n\n amt = float(acct[\"total_amount\"])\n\n amt *= years_per_iter\n\n metadata = {\n \"description\": \"%s subsidies\" % acct[\"name\"],\n \"year\": year\n }\n # the subaccount is meaningless here (it's a regional account) -\n # but the subaccount number is referred to below\n coffer[acct[\"name\"]].add_transaction(amt, subaccount=1,\n metadata=metadata)\n\n\n# this will compute the reduction in revenue from a project due to\n# inclustionary housing - the calculation will be described in thorough\n# comments alongside the code\ndef inclusionary_housing_revenue_reduction(feasibility, units):\n\n print \"Computing adjustments due to inclusionary housing\"\n\n # AMI by jurisdiction\n #\n # in practice deed restrictions are done by household size but we aren't\n # going to deed restrict them by household size so it makes sense not to\n # do that here - if we did this by household size like we do in the real\n # world we'd need to have a better representation of what household size\n # is in which unit type\n\n households = orca.get_table(\"households\")\n buildings = orca.get_table(\"buildings\")\n parcels_geography = orca.get_table(\"parcels_geography\")\n h = orca.merge_tables(\"households\",\n [households, buildings, parcels_geography],\n columns=[\"juris_name\", \"income\"])\n AMI = h.groupby(h.juris_name).income.quantile(.5)\n\n # per <NAME> (@akselx)\n # take 90% of AMI and multiple by 33% to get the max amount a\n # household can pay per year, divide by 12 to get monthly amt,\n # subtract condo fee\n\n monthly_condo_fee = 250\n monthly_affordable_payment = AMI * .9 * .33 / 12 - monthly_condo_fee\n\n def value_can_afford(monthly_payment):\n # this is a 10 year average freddie mac interest rate\n ten_year_average_interest = .055\n return np.npv(ten_year_average_interest/12, [monthly_payment]*30*12)\n\n value_can_afford = {k: value_can_afford(v) for k, v in\n monthly_affordable_payment.to_dict().items()}\n value_can_afford = pd.Series(value_can_afford)\n\n # account for interest and property taxes\n interest_and_prop_taxes = .013\n value_can_afford /= 1+interest_and_prop_taxes\n\n # there's a lot more nuance to inclusionary percentages than this -\n # e.g. specific neighborhoods get specific amounts -\n # http://sf-moh.org/modules/showdocument.aspx?documentid=7253\n\n pct_inclusionary = orca.get_injectable(\"inclusionary_housing_settings\")\n juris_name = parcels_geography.juris_name.loc[feasibility.index]\n pct_affordable = juris_name.map(pct_inclusionary).fillna(0)\n value_can_afford = juris_name.map(value_can_afford)\n\n num_affordable_units = (units * pct_affordable).fillna(0).astype(\"int\")\n\n ave_price_per_unit = \\\n feasibility[('residential', 'building_revenue')] / units\n\n revenue_diff_per_unit = (ave_price_per_unit - value_can_afford).fillna(0)\n print \"Revenue difference per unit (not zero values)\"\n print revenue_diff_per_unit[revenue_diff_per_unit > 0].describe()\n\n revenue_reduction = revenue_diff_per_unit * num_affordable_units\n\n s = num_affordable_units.groupby(parcels_geography.juris_name).sum()\n print \"Feasibile affordable units by jurisdiction\"\n print s[s > 0].sort_values()\n\n return revenue_reduction, num_affordable_units\n\n\n# this adds fees to the max_profit column of the feasibility dataframe\n# fees are usually spatially specified and are per unit so that calculation\n# is done here as well\ndef policy_modifications_of_profit(feasibility, parcels):\n\n print \"Making policy modifications to profitability\"\n\n # this first section adds parcel unit-based fees\n\n units = feasibility[('residential', 'residential_sqft')] / \\\n parcels.ave_sqft_per_unit\n fees = (units * parcels.fees_per_unit).fillna(0)\n print \"Sum of residential fees: \", fees.sum()\n\n feasibility[(\"residential\", \"fees\")] = fees\n feasibility[(\"residential\", \"max_profit\")] -= fees\n\n # now non residential fees per sqft\n for use in [\"retail\", \"office\"]:\n\n if (use, 'non_residential_sqft') not in feasibility.columns:\n continue\n\n sqft = feasibility[(use, 'non_residential_sqft')]\n fees = (sqft * parcels.fees_per_sqft).fillna(0)\n print \"Sum of non-residential fees (%s): %.0f\" % (use, fees.sum())\n\n feasibility[(use, \"fees\")] = fees\n feasibility[(use, \"max_profit\")] -= fees\n\n # this section adds inclusionary housing reduction in revenue\n revenue_reduction, num_affordable_units = \\\n inclusionary_housing_revenue_reduction(feasibility, units)\n\n print \"Describe of inclusionary revenue reduction:\\n\", \\\n revenue_reduction[revenue_reduction > 0].describe()\n\n print \"Describe of number of affordable units:\\n\", \\\n num_affordable_units[num_affordable_units > 0].describe()\n\n feasibility[(\"residential\", \"policy_based_revenue_reduction\")] = \\\n revenue_reduction\n feasibility[(\"residential\", \"max_profit\")] -= revenue_reduction\n feasibility[(\"residential\", \"deed_restricted_units\")] = \\\n num_affordable_units\n feasibility[(\"residential\", \"inclusionary_units\")] = \\\n num_affordable_units\n\n settings = orca.get_injectable(\"settings\")\n\n if \"sb743_settings\" in settings[\"acct_settings\"]:\n\n sb743_settings = settings[\"acct_settings\"][\"sb743_settings\"]\n\n if sb743_settings[\"enable\"]:\n\n pct_modifications = feasibility[(\"residential\", \"vmt_res_cat\")].\\\n map(sb743_settings[\"sb743_pcts\"]) + 1\n\n print \"Modifying profit for SB743:\\n\", pct_modifications.describe()\n\n feasibility[(\"residential\", \"max_profit\")] *= pct_modifications\n\n if \"land_value_tax_settings\" in settings[\"acct_settings\"]:\n\n s = settings[\"acct_settings\"][\"land_value_tax_settings\"]\n\n if orca.get_injectable(\"scenario\") in s[\"enable_in_scenarios\"]:\n\n bins = s[\"bins\"]\n pcts = bins[\"pcts\"]\n # need to boud the breaks with a reasonable low and high goalpost\n breaks = [-1]+bins[\"breaks\"]+[2]\n\n pzc = orca.get_table(\"parcels_zoning_calculations\")\n s = pzc.zoned_build_ratio\n # map the breakpoints defined in yaml to the pcts defined there\n pct_modifications = pd.cut(s, breaks, labels=pcts).\\\n astype('float') + 1\n # if some parcels got skipped, fill them in with no modification\n pct_modifications = \\\n pct_modifications.reindex(pzc.index).fillna(1.0)\n\n print \"Modifying profit for Land Value Tax:\\n\", \\\n pct_modifications.describe()\n\n feasibility[(\"residential\", \"max_profit\")] *= pct_modifications\n\n if \"profitability_adjustment_policies\" in settings[\"acct_settings\"]:\n\n for key, policy in \\\n settings[\"acct_settings\"][\n \"profitability_adjustment_policies\"].items():\n\n if orca.get_injectable(\"scenario\") in \\\n policy[\"enable_in_scenarios\"]:\n\n parcels_geography = orca.get_table(\"parcels_geography\")\n\n pct_modifications = \\\n parcels_geography.local.eval(\n policy[\"profitability_adjustment_formula\"])\n pct_modifications += 1.0\n\n print \"Modifying profit for %s:\\n\" % policy[\"name\"], \\\n pct_modifications.describe()\n\n feasibility[(\"residential\", \"max_profit\")] *= pct_modifications\n\n print \"There are %d affordable units if all feasible projects are built\" %\\\n feasibility[(\"residential\", \"deed_restricted_units\")].sum()\n\n return feasibility\n\n\[email protected](\"calculate_vmt_fees\")\ndef calculate_vmt_fees(settings, year, buildings, vmt_fee_categories, coffer,\n summary, years_per_iter):\n\n vmt_settings = settings[\"acct_settings\"][\"vmt_settings\"]\n\n # this is the frame that knows which devs are subsidized\n df = summary.parcel_output\n\n df = df.query(\"%d <= year_built < %d and subsidized != True\" %\n (year, year + years_per_iter))\n\n if not len(df):\n return\n\n print \"%d projects pass the vmt filter\" % len(df)\n\n total_fees = 0\n\n if settings.get(\"vmt_fee_res\", False):\n\n df[\"res_fees\"] = df.vmt_res_cat.map(vmt_settings[\"res_fee_amounts\"])\n total_fees += (df.res_fees * df.residential_units).sum()\n print \"Applying vmt fees to %d units\" % df.residential_units.sum()\n\n if settings.get(\"vmt_fee_com\", False):\n\n df[\"com_fees\"] = df.vmt_res_cat.map(vmt_settings[\"com_fee_amounts\"])\n total_fees += (df.com_fees * df.non_residential_sqft).sum()\n print \"Applying vmt fees to %d commerical sqft\" % \\\n df.non_residential_sqft.sum()\n\n print \"Adding total vmt fees amount of $%.2f\" % total_fees\n\n metadata = {\n \"description\": \"VMT development fees\",\n \"year\": year\n }\n # the subaccount is meaningless here (it's a regional account) -\n # but the subaccount number is referred to below\n coffer[\"vmt_fee_acct\"].add_transaction(total_fees, subaccount=1,\n metadata=metadata)\n\n\ndef run_subsidized_developer(feasibility, parcels, buildings, households,\n acct_settings, settings, account, year,\n form_to_btype_func, add_extra_columns_func,\n summary, create_deed_restricted=False,\n policy_name=\"Unnamed\"):\n \"\"\"\n The subsidized residential developer model.\n\n Parameters\n ----------\n feasibility : DataFrame\n A DataFrame that is returned from run_feasibility for a given form\n parcels : DataFrameWrapper\n The standard parcels DataFrameWrapper (mostly just for run_developer)\n buildings : DataFrameWrapper\n The standard buildings DataFrameWrapper (passed to run_developer)\n households : DataFrameWrapper\n The households DataFrameWrapper (passed to run_developer)\n acct_settings : Dict\n A dictionary of settings to parameterize the model. Needs these keys:\n sending_buildings_subaccount_def - maps buildings to subaccounts\n receiving_buildings_filter - filter for eligible buildings\n settings : Dict\n The overall settings\n account : Account\n The Account object to use for subsidization\n year : int\n The current simulation year (will be added as metadata)\n form_to_btype_func : function\n Passed through to run_developer\n add_extra_columns_func : function\n Passed through to run_developer\n summary : Summary\n Used to add parcel summary information\n create_deed_restricted : bool\n Bool for whether to create deed restricted units with the subsidies\n or not. The logic at the time of this writing is to keep track of\n partial units so that when partial units sum to greater than a unit,\n that unit will be deed restricted.\n\n Returns\n -------\n Nothing\n\n Subsidized residential developer is designed to run before the normal\n residential developer - it will prioritize the developments we're\n subsidizing (although this is not strictly required - running this model\n after the market rate developer will just create a temporarily larger\n supply of units, which will probably create less market rate development in\n the next simulated year)\n the steps for subsidizing are essentially these\n\n 1 run feasibility with only_built set to false so that the feasibility of\n unprofitable units are recorded\n 2 temporarily filter to ONLY unprofitable units to check for possible\n subsidized units (normal developer takes care of market-rate units)\n 3 compute the number of units in these developments\n 4 divide cost by number of units in order to get the subsidy per unit\n 5 filter developments to parcels in \"receiving zone\" similar to the way we\n identified \"sending zones\"\n 6 iterate through subaccounts one at a time as subsidy will be limited\n to available funds in the subaccount (usually by jurisdiction)\n 7 sort ascending by subsidy per unit so that we minimize subsidy (but total\n subsidy is equivalent to total building cost)\n 8 cumsum the total subsidy in the buildings and locate the development\n where the subsidy is less than or equal to the amount in the account -\n filter to only those buildings (these will likely be built)\n 9 pass the results as \"feasible\" to run_developer - this is sort of a\n boundary case of developer but should run OK\n 10 for those developments that get built, make sure to subtract from\n account and keep a record (on the off chance that demand is less than\n the subsidized units, run through the standard code path, although it's\n very unlikely that there would be more subsidized housing than demand)\n \"\"\"\n # step 2\n feasibility = feasibility.replace([np.inf, -np.inf], np.nan)\n feasibility = feasibility[feasibility.max_profit < 0]\n\n # step 3\n feasibility['ave_sqft_per_unit'] = parcels.ave_sqft_per_unit\n feasibility['residential_units'] = \\\n np.floor(feasibility.residential_sqft / feasibility.ave_sqft_per_unit)\n\n # step 3B\n # can only add units - don't subtract units - this is an approximation\n # of the calculation that will be used to do this in the developer model\n feasibility = feasibility[\n feasibility.residential_units > feasibility.total_residential_units]\n\n # step 3C\n # towards the end, because we're about to sort by subsidy per unit, some\n # large projects never get built, because it could be a 100 unit project\n # times a 500k subsidy per unit. thus we're going to try filtering by\n # the maximum subsidy for a single development here\n feasibility = feasibility[feasibility.max_profit > -50*1000000]\n\n # step 4\n feasibility['subsidy_per_unit'] = \\\n -1 * feasibility['max_profit'] / feasibility['residential_units']\n # assumption that even if the developer says this property is almost\n # profitable, even the administration costs are likely to cost at least\n # 10k / unit\n feasibility['subsidy_per_unit'] = feasibility.subsidy_per_unit.clip(10000)\n\n # step 5\n if \"receiving_buildings_filter\" in acct_settings:\n feasibility = feasibility.\\\n query(acct_settings[\"receiving_buildings_filter\"])\n else:\n # otherwise all buildings are valid\n pass\n\n new_buildings_list = []\n sending_bldgs = acct_settings[\"sending_buildings_subaccount_def\"]\n feasibility[\"regional\"] = 1\n feasibility[\"subaccount\"] = feasibility.eval(sending_bldgs)\n # step 6\n for subacct, amount in account.iter_subaccounts():\n print \"Subaccount: \", subacct\n\n df = feasibility[feasibility.subaccount == subacct]\n print \"Number of feasible projects in receiving zone:\", len(df)\n\n if len(df) == 0:\n continue\n\n # step 7\n df = df.sort(columns=['subsidy_per_unit'], ascending=True)\n # df.to_csv('subsidized_units_%d_%s_%s.csv' %\n # (orca.get_injectable(\"year\"), account.name, subacct))\n\n # step 8\n print \"Amount in subaccount: ${:,.2f}\".format(amount)\n num_bldgs = int((-1*df.max_profit).cumsum().searchsorted(amount))\n\n if num_bldgs == 0:\n continue\n\n # technically we only build these buildings if there's demand\n # print \"Building {:d} subsidized buildings\".format(num_bldgs)\n df = df.iloc[:int(num_bldgs)]\n\n df.columns = pd.MultiIndex.from_tuples(\n [(\"residential\", col) for col in df.columns])\n # disable stdout since developer is a bit verbose for this use case\n sys.stdout, old_stdout = StringIO(), sys.stdout\n\n kwargs = settings['residential_developer']\n # step 9\n new_buildings = utils.run_developer(\n \"residential\",\n households,\n buildings,\n \"residential_units\",\n parcels.parcel_size,\n parcels.ave_sqft_per_unit,\n parcels.total_residential_units,\n orca.DataFrameWrapper(\"feasibility\", df),\n year=year,\n form_to_btype_callback=form_to_btype_func,\n add_more_columns_callback=add_extra_columns_func,\n **kwargs)\n sys.stdout = old_stdout\n buildings = orca.get_table(\"buildings\")\n\n if new_buildings is None:\n continue\n\n # keep track of partial subsidized untis so that we always get credit\n # for a partial unit, even if it's not built in this specific building\n partial_subsidized_units = 0\n\n # step 10\n for index, new_building in new_buildings.iterrows():\n\n amt = new_building.max_profit\n metadata = {\n \"description\": \"Developing subsidized building\",\n \"year\": year,\n \"residential_units\": new_building.residential_units,\n \"building_id\": index\n }\n account.add_transaction(amt, subaccount=subacct,\n metadata=metadata)\n\n if create_deed_restricted:\n\n revenue_per_unit = new_building.building_revenue / \\\n new_building.residential_units\n total_subsidy = abs(new_building.max_profit)\n subsidized_units = total_subsidy / revenue_per_unit + \\\n partial_subsidized_units\n # right now there are inclusionary requirements\n already_subsidized_units = new_building.deed_restricted_units\n\n # get remainder\n partial_subsidized_units = subsidized_units % 1\n # round off for now\n subsidized_units = int(subsidized_units) + \\\n already_subsidized_units\n\n buildings.local.loc[index, \"deed_restricted_units\"] =\\\n int(round(subsidized_units))\n\n # also correct the debug output\n new_buildings.loc[index, \"deed_restricted_units\"] =\\\n int(round(subsidized_units))\n\n print \"Amount left after subsidy: ${:,.2f}\".\\\n format(account.total_transactions_by_subacct(subacct))\n\n new_buildings_list.append(new_buildings)\n\n total_len = reduce(lambda x, y: x+len(y), new_buildings_list, 0)\n if total_len == 0:\n print \"No subsidized buildings\"\n return\n\n new_buildings = pd.concat(new_buildings_list)\n print \"Built {} total subsidized buildings\".format(len(new_buildings))\n print \" Total subsidy: ${:,.2f}\".format(\n -1*new_buildings.max_profit.sum())\n print \" Total subsidized units: {:.0f}\".\\\n format(new_buildings.residential_units.sum())\n\n new_buildings[\"subsidized\"] = True\n new_buildings[\"policy_name\"] = policy_name\n\n summary.add_parcel_output(new_buildings)\n\n\[email protected]('subsidized_residential_feasibility')\ndef subsidized_residential_feasibility(\n parcels, settings,\n add_extra_columns_func, parcel_sales_price_sqft_func,\n parcel_is_allowed_func, parcels_geography):\n\n kwargs = settings['feasibility'].copy()\n kwargs[\"only_built\"] = False\n kwargs[\"forms_to_test\"] = [\"residential\"]\n # step 1\n utils.run_feasibility(parcels,\n parcel_sales_price_sqft_func,\n parcel_is_allowed_func,\n **kwargs)\n\n feasibility = orca.get_table(\"feasibility\").to_frame()\n # get rid of the multiindex that comes back from feasibility\n feasibility = feasibility.stack(level=0).reset_index(level=1, drop=True)\n # join to parcels_geography for filtering\n feasibility = feasibility.join(parcels_geography.to_frame())\n\n # add the multiindex back\n feasibility.columns = pd.MultiIndex.from_tuples(\n [(\"residential\", col) for col in feasibility.columns])\n\n feasibility = policy_modifications_of_profit(feasibility, parcels)\n\n orca.add_table(\"feasibility\", feasibility)\n\n df = orca.get_table(\"feasibility\").to_frame()\n df = df.stack(level=0).reset_index(level=1, drop=True)\n df.to_csv(\"runs/run{}_feasibility_{}.csv\".format(\n orca.get_injectable(\"run_number\"),\n orca.get_injectable(\"year\")))\n\n\[email protected]('subsidized_residential_developer_vmt')\ndef subsidized_residential_developer_vmt(\n households, buildings, add_extra_columns_func,\n parcels_geography, year, acct_settings, parcels,\n settings, summary, coffer, form_to_btype_func, feasibility):\n\n feasibility = feasibility.to_frame()\n feasibility = feasibility.stack(level=0).reset_index(level=1, drop=True)\n\n run_subsidized_developer(feasibility,\n parcels,\n buildings,\n households,\n acct_settings[\"vmt_settings\"],\n settings,\n coffer[\"vmt_fee_acct\"],\n year,\n form_to_btype_func,\n add_extra_columns_func,\n summary,\n create_deed_restricted=True,\n policy_name=\"VMT\")\n\n\[email protected]()\ndef subsidized_residential_developer_lump_sum_accts(\n households, buildings, add_extra_columns_func,\n parcels_geography, year, acct_settings, parcels,\n settings, summary, coffer, form_to_btype_func,\n scenario):\n\n for key, acct in settings[\"acct_settings\"][\"lump_sum_accounts\"].items():\n\n # quick return in order to save performance time\n if scenario not in acct[\"enable_in_scenarios\"]:\n continue\n\n print \"Running the subsidized developer for acct: %s\" % acct[\"name\"]\n\n # need to rerun the subsidized feasibility every time and get new\n # results - this is not ideal and is a story to fix in pivotal, but the\n # only cost is in time - the results should be the same\n orca.eval_step(\"subsidized_residential_feasibility\")\n feasibility = orca.get_table(\"feasibility\").to_frame()\n feasibility = feasibility.stack(level=0).\\\n reset_index(level=1, drop=True)\n\n run_subsidized_developer(feasibility,\n parcels,\n buildings,\n households,\n acct,\n settings,\n coffer[acct[\"name\"]],\n year,\n form_to_btype_func,\n add_extra_columns_func,\n summary,\n create_deed_restricted=acct[\n \"subsidize_affordable\"],\n policy_name=acct[\"name\"])\n\n # set to an empty dataframe to save memory\n orca.add_table(\"feasibility\", pd.DataFrame())\n", "id": "7979472", "language": "Python", "matching_score": 5.994385719299316, "max_stars_count": 0, "path": "bayarea_urbansim/baus/subsidies.py" }, { "content": "from urbansim.utils import misc\nimport os\nimport sys\nimport orca\nimport yaml\nimport datasources\nimport variables\nfrom utils import parcel_id_to_geom_id, geom_id_to_parcel_id, add_buildings\nfrom urbansim.utils import networks\nimport pandana.network as pdna\nfrom urbansim_defaults import models\nfrom urbansim_defaults import utils\nfrom urbansim.developer import sqftproforma, developer\nfrom urbansim.developer.developer import Developer as dev\nimport subsidies\nimport summaries\nimport numpy as np\nimport pandas as pd\n\n\[email protected]('rsh_simulate')\ndef rsh_simulate(buildings, aggregations, settings):\n utils.hedonic_simulate(\"rsh.yaml\", buildings, aggregations,\n \"residential_price\", cast=True)\n if \"rsh_simulate\" in settings:\n low = float(settings[\"rsh_simulate\"][\"low\"])\n high = float(settings[\"rsh_simulate\"][\"high\"])\n buildings.update_col(\"residential_price\",\n buildings.residential_price.clip(low, high))\n print \"Clipped rsh_simulate produces\\n\", \\\n buildings.residential_price.describe()\n\n\[email protected]('nrh_simulate')\ndef nrh_simulate(buildings, aggregations):\n return utils.hedonic_simulate(\"nrh.yaml\", buildings, aggregations,\n \"non_residential_price\", cast=True)\n\n\[email protected]('hlcm_simulate')\ndef hlcm_simulate(households, buildings, aggregations, settings, low_income):\n\n fname = misc.config(\"hlcm.yaml\")\n\n print \"\\nAffordable housing HLCM:\\n\"\n\n cfg = yaml.load(open(fname))\n cfg[\"choosers_predict_filters\"] = \"income <= %d\" % low_income\n open(misc.config(\"hlcm_tmp.yaml\"), \"w\").write(yaml.dump(cfg))\n\n # low income into affordable units\n utils.lcm_simulate(\n \"hlcm_tmp.yaml\", households, buildings, aggregations, \"building_id\",\n \"residential_units\", \"vacant_affordable_units\", settings.get(\n \"enable_supply_correction\", None), cast=True)\n\n os.remove(misc.config(\"hlcm_tmp.yaml\"))\n\n print \"\\nMarket rate housing HLCM:\\n\"\n\n # then everyone into market rate units\n utils.lcm_simulate(\n \"hlcm.yaml\", households, buildings, aggregations, \"building_id\",\n \"residential_units\", \"vacant_market_rate_units\", settings.get(\n \"enable_supply_correction\", None), cast=True)\n\n\[email protected]('households_transition')\ndef households_transition(households, household_controls, year, settings):\n s = orca.get_table('households').base_income_quartile.value_counts()\n print \"Distribution by income before:\\n\", (s/s.sum())\n ret = utils.full_transition(households,\n household_controls,\n year,\n settings['households_transition'],\n \"building_id\")\n s = orca.get_table('households').base_income_quartile.value_counts()\n print \"Distribution by income after:\\n\", (s/s.sum())\n return ret\n\n\[email protected]('households_relocation')\ndef households_relocation(households, settings, years_per_iter):\n rate = settings['rates']['households_relocation']\n rate = min(rate * years_per_iter, 1.0)\n return utils.simple_relocation(households, rate, \"building_id\")\n\n\[email protected](cache=True)\ndef employment_relocation_rates():\n\n df = pd.read_csv(os.path.join(\"data\", \"employment_relocation_rates.csv\"))\n\n df = df.set_index(\"zone_id\").stack().reset_index()\n\n df.columns = [\"zone_id\", \"empsix\", \"rate\"]\n\n return df\n\n\n# this is a list of parcel_ids which are to be treated as static\[email protected]()\ndef static_parcels(settings, parcels):\n # list of geom_ids to not relocate\n static_parcels = settings[\"static_parcels\"]\n # geom_ids -> parcel_ids\n return geom_id_to_parcel_id(\n pd.DataFrame(index=static_parcels), parcels).index.values\n\n\[email protected]()\ndef jobs_relocation(jobs, employment_relocation_rates, years_per_iter,\n settings, static_parcels, buildings):\n\n # get buildings that are on those parcels\n static_buildings = buildings.index[\n buildings.parcel_id.isin(static_parcels)]\n\n df = pd.merge(jobs.to_frame([\"zone_id\", \"empsix\"]),\n employment_relocation_rates.local,\n on=[\"zone_id\", \"empsix\"],\n how=\"left\")\n\n df.index = jobs.index\n\n # get the move rate for each job\n rate = (df.rate * years_per_iter).clip(0, 1.0)\n # get random floats and move jobs if they're less than the rate\n move = np.random.random(len(rate)) < rate\n\n # also don't move jobs that are on static parcels\n move &= ~jobs.building_id.isin(static_buildings)\n\n # get the index of the moving jobs\n index = jobs.index[move]\n\n # set jobs that are moving to a building_id of -1 (means unplaced)\n jobs.update_col_from_series(\"building_id\",\n pd.Series(-1, index=index))\n\n\n# this deviates from the step in urbansim_defaults only in how it deals with\n# demolished buildings - this version only demolishes when there is a row to\n# demolish in the csv file - this also allows building multiple buildings and\n# just adding capacity on an existing parcel, by adding one building at a time\[email protected](\"scheduled_development_events\")\ndef scheduled_development_events(buildings, development_projects,\n demolish_events, summary, year, parcels,\n settings, years_per_iter, parcels_geography,\n building_sqft_per_job, vmt_fee_categories):\n\n # first demolish\n demolish = demolish_events.to_frame().\\\n query(\"%d <= year_built < %d\" % (year, year + years_per_iter))\n print \"Demolishing/building %d buildings\" % len(demolish)\n l1 = len(buildings)\n buildings = utils._remove_developed_buildings(\n buildings.to_frame(buildings.local_columns),\n demolish,\n unplace_agents=[\"households\", \"jobs\"])\n orca.add_table(\"buildings\", buildings)\n buildings = orca.get_table(\"buildings\")\n print \"Demolished %d buildings\" % (l1 - len(buildings))\n print \" (this number is smaller when parcel has no existing buildings)\"\n\n # then build\n dps = development_projects.to_frame().\\\n query(\"%d <= year_built < %d\" % (year, year + years_per_iter))\n\n if len(dps) == 0:\n return\n\n new_buildings = utils.scheduled_development_events(\n buildings, dps,\n remove_developed_buildings=False,\n unplace_agents=['households', 'jobs'])\n new_buildings[\"form\"] = new_buildings.building_type_id.map(\n settings['building_type_map']).str.lower()\n new_buildings[\"job_spaces\"] = new_buildings.building_sqft / \\\n new_buildings.building_type_id.fillna(-1).map(building_sqft_per_job)\n new_buildings[\"job_spaces\"] = new_buildings.job_spaces.astype('int')\n new_buildings[\"geom_id\"] = parcel_id_to_geom_id(new_buildings.parcel_id)\n new_buildings[\"SDEM\"] = True\n new_buildings[\"subsidized\"] = False\n\n new_buildings[\"zone_id\"] = misc.reindex(\n parcels.zone_id, new_buildings.parcel_id)\n new_buildings[\"vmt_res_cat\"] = misc.reindex(\n vmt_fee_categories.res_cat, new_buildings.zone_id)\n del new_buildings[\"zone_id\"]\n new_buildings[\"pda\"] = parcels_geography.pda_id.loc[\n new_buildings.parcel_id].values\n\n summary.add_parcel_output(new_buildings)\n\n\[email protected](\"supply_and_demand_multiplier_func\", autocall=False)\ndef supply_and_demand_multiplier_func(demand, supply):\n s = demand / supply\n settings = orca.get_injectable('settings')\n print \"Number of submarkets where demand exceeds supply:\", len(s[s > 1.0])\n # print \"Raw relationship of supply and demand\\n\", s.describe()\n supply_correction = settings[\"enable_supply_correction\"]\n clip_change_high = supply_correction[\"kwargs\"][\"clip_change_high\"]\n t = s\n t -= 1.0\n t = t / t.max() * (clip_change_high-1)\n t += 1.0\n s.loc[s > 1.0] = t.loc[s > 1.0]\n # print \"Shifters for current iteration\\n\", s.describe()\n return s, (s <= 1.0).all()\n\n\n# this if the function for mapping a specific building that we build to a\n# specific building type\[email protected](\"form_to_btype_func\", autocall=False)\ndef form_to_btype_func(building):\n settings = orca.get_injectable('settings')\n form = building.form\n dua = building.residential_units / (building.parcel_size / 43560.0)\n # precise mapping of form to building type for residential\n if form is None or form == \"residential\":\n if dua < 16:\n return 1\n elif dua < 32:\n return 2\n return 3\n return settings[\"form_to_btype\"][form][0]\n\n\[email protected](\"add_extra_columns_func\", autocall=False)\ndef add_extra_columns(df):\n for col in [\"residential_price\", \"non_residential_price\"]:\n df[col] = 0\n\n if \"deed_restricted_units\" not in df.columns:\n df[\"deed_restricted_units\"] = 0\n else:\n print \"Number of deed restricted units built = %d\" %\\\n df.deed_restricted_units.sum()\n\n df[\"redfin_sale_year\"] = 2012\n\n if \"residential_units\" not in df:\n df[\"residential_units\"] = 0\n\n if \"parcel_size\" not in df:\n df[\"parcel_size\"] = \\\n orca.get_table(\"parcels\").parcel_size.loc[df.parcel_id]\n\n if \"year\" in orca.orca._INJECTABLES and \"year_built\" not in df:\n df[\"year_built\"] = orca.get_injectable(\"year\")\n\n if \"form_to_btype_func\" in orca.orca._INJECTABLES and \\\n \"building_type_id\" not in df:\n form_to_btype_func = orca.get_injectable(\"form_to_btype_func\")\n df[\"building_type_id\"] = df.apply(form_to_btype_func, axis=1)\n\n return df\n\n\[email protected]('alt_feasibility')\ndef alt_feasibility(parcels, settings,\n parcel_sales_price_sqft_func,\n parcel_is_allowed_func):\n kwargs = settings['feasibility']\n config = sqftproforma.SqFtProFormaConfig()\n config.parking_rates[\"office\"] = 1.5\n config.parking_rates[\"retail\"] = 1.5\n\n utils.run_feasibility(parcels,\n parcel_sales_price_sqft_func,\n parcel_is_allowed_func,\n config=config,\n **kwargs)\n\n f = subsidies.policy_modifications_of_profit(\n orca.get_table('feasibility').to_frame(),\n parcels)\n\n orca.add_table(\"feasibility\", f)\n\n\[email protected]('residential_developer')\ndef residential_developer(feasibility, households, buildings, parcels, year,\n settings, summary, form_to_btype_func,\n add_extra_columns_func, parcels_geography,\n limits_settings, final_year):\n\n kwargs = settings['residential_developer']\n\n num_units = dev.compute_units_to_build(\n len(households),\n buildings[\"residential_units\"].sum(),\n kwargs['target_vacancy'])\n\n targets = []\n typ = \"Residential\"\n # now apply limits - limits are assumed to be yearly, apply to an\n # entire jurisdiction and be in terms of residential_units or job_spaces\n if typ in limits_settings:\n\n juris_name = parcels_geography.juris_name.\\\n reindex(parcels.index).fillna('Other')\n\n juris_list = limits_settings[typ].keys()\n for juris, limit in limits_settings[typ].items():\n\n # the actual target is the limit times the number of years run\n # so far in the simulation (plus this year), minus the amount\n # built in previous years - in other words, you get rollover\n # and development is lumpy\n\n current_total = parcels.total_residential_units[\n (juris_name == juris) & (parcels.newest_building >= 2010)]\\\n .sum()\n\n target = (year - 2010 + 1) * limit - current_total\n # make sure we don't overshoot the total development of the limit\n # for the horizon year - for instance, in Half Moon Bay we have\n # a very low limit and a single development in a far out year can\n # easily build over the limit for the total simulation\n max_target = (final_year - 2010 + 1) * limit - current_total\n\n if target <= 0:\n continue\n\n targets.append((juris_name == juris, target, max_target, juris))\n num_units -= target\n\n # other cities not in the targets get the remaining target\n targets.append((~juris_name.isin(juris_list), num_units, None, \"none\"))\n\n else:\n # otherwise use all parcels with total number of units\n targets.append((parcels.index == parcels.index,\n num_units, None, \"none\"))\n\n for parcel_mask, target, final_target, juris in targets:\n\n print \"Running developer for %s with target of %d\" % \\\n (str(juris), target)\n\n # this was a fairly heinous bug - have to get the building wrapper\n # again because the buildings df gets modified by the run_developer\n # method below\n buildings = orca.get_table('buildings')\n\n new_buildings = utils.run_developer(\n \"residential\",\n households,\n buildings,\n \"residential_units\",\n parcels.parcel_size[parcel_mask],\n parcels.ave_sqft_per_unit[parcel_mask],\n parcels.total_residential_units[parcel_mask],\n feasibility,\n year=year,\n form_to_btype_callback=form_to_btype_func,\n add_more_columns_callback=add_extra_columns_func,\n num_units_to_build=target,\n **kwargs)\n\n buildings = orca.get_table('buildings')\n\n if new_buildings is not None:\n new_buildings[\"subsidized\"] = False\n\n if final_target is not None and new_buildings is not None:\n # make sure we don't overbuild the target for the whole simulation\n overshoot = new_buildings.net_units.sum() - max_target\n\n if overshoot > 0:\n index = new_buildings.tail(1).index[0]\n index = int(index)\n # make sure we don't get into a negative unit situation\n overshoot = min(overshoot,\n buildings.local.loc[index,\n \"residential_units\"])\n buildings.local.loc[index, \"residential_units\"] -= overshoot\n\n summary.add_parcel_output(new_buildings)\n\n\[email protected]()\ndef retail_developer(jobs, buildings, parcels, nodes, feasibility,\n settings, summary, add_extra_columns_func, net):\n\n dev_settings = settings['non_residential_developer']\n all_units = dev.compute_units_to_build(\n len(jobs),\n buildings.job_spaces.sum(),\n dev_settings['kwargs']['target_vacancy'])\n\n target = all_units * float(dev_settings['type_splits'][\"Retail\"])\n # target here is in sqft\n target *= settings[\"building_sqft_per_job\"][10]\n\n feasibility = feasibility.to_frame().loc[:, \"retail\"]\n feasibility = feasibility.dropna(subset=[\"max_profit\"])\n\n feasibility[\"non_residential_sqft\"] = \\\n feasibility.non_residential_sqft.astype(\"int\")\n\n feasibility[\"retail_ratio\"] = parcels.retail_ratio\n feasibility = feasibility.reset_index()\n\n # create features\n f1 = feasibility.retail_ratio / feasibility.retail_ratio.max()\n f2 = feasibility.max_profit / feasibility.max_profit.max()\n\n # combine features in probability function - it's like combining expense\n # of building the building with the market in the neighborhood\n p = f1 * 1.5 + f2\n p = p.clip(lower=1.0/len(p)/10)\n\n print \"Attempting to build {:,} retail sqft\".format(target)\n\n # order by weighted random sample\n feasibility = feasibility.sample(frac=1.0, weights=p)\n\n bldgs = buildings.to_frame(buildings.local_columns + [\"general_type\"])\n\n devs = []\n\n for dev_id, d in feasibility.iterrows():\n\n if target <= 0:\n break\n\n # any special logic to filter these devs?\n\n # remove new dev sqft from target\n target -= d.non_residential_sqft\n\n # add redeveloped sqft to target\n filt = \"general_type == 'Retail' and parcel_id == %d\" % \\\n d[\"parcel_id\"]\n target += bldgs.query(filt).non_residential_sqft.sum()\n\n devs.append(d)\n\n if len(devs) == 0:\n return\n\n # record keeping - add extra columns to match building dataframe\n # add the buidings and demolish old buildings, and add to debug output\n devs = pd.DataFrame(devs, columns=feasibility.columns)\n\n print \"Building {:,} retail sqft in {:,} projects\".format(\n devs.non_residential_sqft.sum(), len(devs))\n if target > 0:\n print \" WARNING: retail target not met\"\n\n devs[\"form\"] = \"retail\"\n devs = add_extra_columns_func(devs)\n\n add_buildings(buildings, devs)\n\n summary.add_parcel_output(devs)\n\n\[email protected]()\ndef office_developer(feasibility, jobs, buildings, parcels, year,\n settings, summary, form_to_btype_func, scenario,\n add_extra_columns_func, parcels_geography,\n limits_settings):\n\n dev_settings = settings['non_residential_developer']\n\n # I'm going to try a new way of computing this because the math the other\n # way is simply too hard. Basically we used to try and apportion sectors\n # into the demand for office, retail, and industrial, but there's just so\n # much dirtyness to the data, for instance 15% of jobs are in residential\n # buildings, and 15% in other buildings, it's just hard to know how much\n # to build, we I think the right thing to do is to compute the number of\n # job spaces that are required overall, and then to apportion that new dev\n # into the three non-res types with a single set of coefficients\n all_units = dev.compute_units_to_build(\n len(jobs),\n buildings.job_spaces.sum(),\n dev_settings['kwargs']['target_vacancy'])\n\n print \"Total units to build = %d\" % all_units\n if all_units <= 0:\n return\n\n for typ in [\"Office\"]:\n\n print \"\\nRunning for type: \", typ\n\n num_units = all_units * float(dev_settings['type_splits'][typ])\n\n targets = []\n # now apply limits - limits are assumed to be yearly, apply to an\n # entire jurisdiction and be in terms of residential_units or\n # job_spaces\n if year > 2015 and typ in limits_settings:\n\n juris_name = parcels_geography.juris_name.\\\n reindex(parcels.index).fillna('Other')\n\n juris_list = limits_settings[typ].keys()\n for juris, limit in limits_settings[typ].items():\n\n # the actual target is the limit times the number of years run\n # so far in the simulation (plus this year), minus the amount\n # built in previous years - in other words, you get rollover\n # and development is lumpy\n\n current_total = parcels.total_job_spaces[\n (juris_name == juris) & (parcels.newest_building > 2015)]\\\n .sum()\n\n target = (year - 2015 + 1) * limit - current_total\n\n if target <= 0:\n print \"Already met target for juris = %s\" % juris\n print \" target = %d, current_total = %d\" %\\\n (target, current_total)\n continue\n\n targets.append((juris_name == juris, target, juris))\n num_units -= target\n\n # other cities not in the targets get the remaining target\n targets.append((~juris_name.isin(juris_list), num_units, \"none\"))\n\n else:\n # otherwise use all parcels with total number of units\n targets.append((parcels.index == parcels.index, num_units, \"none\"))\n\n for parcel_mask, target, juris in targets:\n\n print \"Running developer for %s with target of %d\" % \\\n (str(juris), target)\n print \"Parcels in play:\\n\", pd.Series(parcel_mask).value_counts()\n\n # this was a fairly heinous bug - have to get the building wrapper\n # again because the buildings df gets modified by the run_developer\n # method below\n buildings = orca.get_table('buildings')\n\n new_buildings = utils.run_developer(\n typ.lower(),\n jobs,\n buildings,\n \"job_spaces\",\n parcels.parcel_size[parcel_mask],\n parcels.ave_sqft_per_unit[parcel_mask],\n parcels.total_job_spaces[parcel_mask],\n feasibility,\n year=year,\n form_to_btype_callback=form_to_btype_func,\n add_more_columns_callback=add_extra_columns_func,\n residential=False,\n num_units_to_build=target,\n **dev_settings['kwargs'])\n\n if new_buildings is not None:\n new_buildings[\"subsidized\"] = False\n\n summary.add_parcel_output(new_buildings)\n\n\[email protected]()\ndef developer_reprocess(buildings, year, years_per_iter, jobs,\n parcels, summary, parcel_is_allowed_func):\n # this takes new units that come out of the developer, both subsidized\n # and non-subsidized and reprocesses them as required - please read\n # comments to see what this means in detail\n\n # 20% of base year buildings which are \"residential\" have job spaces - I\n # mean, there is a ratio of job spaces to res units in residential\n # buildings of 1 to 5 - this ratio should be kept for future year\n # buildings\n s = buildings.general_type == \"Residential\"\n res_units = buildings.residential_units[s].sum()\n job_spaces = buildings.job_spaces[s].sum()\n\n to_add = res_units * .05 - job_spaces\n if to_add > 0:\n print \"Adding %d job_spaces\" % to_add\n res_units = buildings.residential_units[s]\n # bias selection of places to put job spaces based on res units\n print res_units.describe()\n print res_units[res_units < 0]\n add_indexes = np.random.choice(res_units.index.values, size=to_add,\n replace=True,\n p=(res_units/res_units.sum()))\n # collect same indexes\n add_indexes = pd.Series(add_indexes).value_counts()\n # this is sqft per job for residential bldgs\n add_sizes = add_indexes * 400\n print \"Job spaces in res before adjustment: \", \\\n buildings.job_spaces[s].sum()\n buildings.local.loc[add_sizes.index,\n \"non_residential_sqft\"] += add_sizes.values\n print \"Job spaces in res after adjustment: \",\\\n buildings.job_spaces[s].sum()\n\n # the second step here is to add retail to buildings that are greater than\n # X stories tall - presumably this is a ground floor retail policy\n old_buildings = buildings.to_frame(buildings.local_columns)\n new_buildings = old_buildings.query(\n '%d == year_built and stories >= 4' % year)\n\n print \"Attempting to add ground floor retail to %d devs\" % \\\n len(new_buildings)\n retail = parcel_is_allowed_func(\"retail\")\n new_buildings = new_buildings[retail.loc[new_buildings.parcel_id].values]\n print \"Disallowing dev on these parcels:\"\n print \" %d devs left after retail disallowed\" % len(new_buildings)\n\n # this is the key point - make these new buildings' nonres sqft equal\n # to one story of the new buildings\n new_buildings.non_residential_sqft = new_buildings.building_sqft / \\\n new_buildings.stories * .8\n\n new_buildings[\"residential_units\"] = 0\n new_buildings[\"residential_sqft\"] = 0\n new_buildings[\"building_sqft\"] = new_buildings.non_residential_sqft\n new_buildings[\"stories\"] = 1\n new_buildings[\"building_type_id\"] = 10\n\n # this is a fairly arbitrary rule, but we're only adding ground floor\n # retail in areas that are underserved right now - this is defined as\n # the location where the retail ratio (ratio of income to retail sqft)\n # is greater than the median\n ratio = parcels.retail_ratio.loc[new_buildings.parcel_id]\n new_buildings = new_buildings[ratio.values > ratio.median()]\n\n print \"Adding %d sqft of ground floor retail in %d locations\" % \\\n (new_buildings.non_residential_sqft.sum(), len(new_buildings))\n\n all_buildings = dev.merge(old_buildings, new_buildings)\n orca.add_table(\"buildings\", all_buildings)\n\n new_buildings[\"form\"] = \"retail\"\n # this is sqft per job for retail use - this is all rather\n # ad-hoc so I'm hard-coding\n new_buildings[\"job_spaces\"] = \\\n (new_buildings.non_residential_sqft / 445.0).astype('int')\n new_buildings[\"net_units\"] = new_buildings.job_spaces\n summary.add_parcel_output(new_buildings)\n\n # got to get the frame again because we just added rows\n buildings = orca.get_table('buildings')\n buildings_df = buildings.to_frame(\n ['year_built', 'building_sqft', 'general_type'])\n sqft_by_gtype = buildings_df.query('year_built >= %d' % year).\\\n groupby('general_type').building_sqft.sum()\n print \"New square feet by general type in millions:\\n\",\\\n sqft_by_gtype / 1000000.0\n\n\ndef proportional_job_allocation(parcel_id):\n # this method takes a parcel and increases the number of jobs on the\n # parcel in proportion to the ratio of sectors that existed in the base yr\n # this is because elcms can't get the distribution right in some cases, eg\n # to keep mostly gov't jobs in city hall, etc - these are largely\n # institutions and not subject to the market\n\n # get buildings on this parcel\n buildings = orca.get_table(\"buildings\").to_frame(\n [\"parcel_id\", \"job_spaces\", \"zone_id\", \"year_built\"]).\\\n query(\"parcel_id == %d\" % parcel_id)\n\n # get jobs in those buildings\n all_jobs = orca.get_table(\"jobs\").local\n jobs = all_jobs[\n all_jobs.building_id.isin(buildings.query(\"year_built <= 2015\").index)]\n\n # get job distribution by sector for this parcel\n job_dist = jobs.empsix.value_counts()\n\n # only add jobs to new buildings records\n for index, building in buildings.query(\"year_built > 2015\").iterrows():\n\n num_new_jobs = building.job_spaces - len(\n all_jobs.query(\"building_id == %d\" % index))\n\n if num_new_jobs == 0:\n continue\n\n sectors = np.random.choice(job_dist.index, size=num_new_jobs,\n p=job_dist/job_dist.sum())\n new_jobs = pd.DataFrame({\"empsix\": sectors, \"building_id\": index})\n # make sure index is incrementing\n new_jobs.index = new_jobs.index + 1 + np.max(all_jobs.index.values)\n\n print \"Adding {} new jobs to parcel {} with proportional model\".format(\n num_new_jobs, parcel_id)\n print new_jobs.head()\n all_jobs = all_jobs.append(new_jobs)\n orca.add_table(\"jobs\", all_jobs)\n\n\[email protected]()\ndef static_parcel_proportional_job_allocation(static_parcels):\n for parcel_id in static_parcels:\n proportional_job_allocation(parcel_id)\n\n\ndef make_network(name, weight_col, max_distance):\n st = pd.HDFStore(os.path.join(misc.data_dir(), name), \"r\")\n nodes, edges = st.nodes, st.edges\n net = pdna.Network(nodes[\"x\"], nodes[\"y\"], edges[\"from\"], edges[\"to\"],\n edges[[weight_col]])\n net.precompute(max_distance)\n return net\n\n\ndef make_network_from_settings(settings):\n return make_network(\n settings[\"name\"],\n settings.get(\"weight_col\", \"weight\"),\n settings['max_distance']\n )\n\n\[email protected]('net', cache=True)\ndef build_networks(settings):\n nets = {}\n pdna.reserve_num_graphs(len(settings[\"build_networks\"]))\n\n # yeah, starting to hardcode stuff, not great, but can only\n # do nearest queries on the first graph I initialize due to crummy\n # limitation in pandana\n for key in settings[\"build_networks\"].keys():\n nets[key] = make_network_from_settings(\n settings['build_networks'][key]\n )\n\n return nets\n\n\[email protected]('local_pois')\ndef local_pois(settings):\n # because of the aforementioned limit of one netowrk at a time for the\n # POIS, as well as the large amount of memory used, this is now a\n # preprocessing step\n n = make_network(\n settings['build_networks']['walk']['name'],\n \"weight\", 3000)\n\n n.init_pois(\n num_categories=1,\n max_dist=3000,\n max_pois=1)\n\n cols = {}\n\n locations = pd.read_csv(os.path.join(misc.data_dir(), 'bart_stations.csv'))\n n.set_pois(\"tmp\", locations.lng, locations.lat)\n cols[\"bartdist\"] = n.nearest_pois(3000, \"tmp\", num_pois=1)[1]\n\n locname = 'pacheights'\n locs = orca.get_table('landmarks').local.query(\"name == '%s'\" % locname)\n n.set_pois(\"tmp\", locs.lng, locs.lat)\n cols[\"pacheights\"] = n.nearest_pois(3000, \"tmp\", num_pois=1)[1]\n\n df = pd.DataFrame(cols)\n df.index.name = \"node_id\"\n df.to_csv('local_poi_distances.csv')\n\n\[email protected]('neighborhood_vars')\ndef neighborhood_vars(net):\n nodes = networks.from_yaml(net[\"walk\"], \"neighborhood_vars.yaml\")\n nodes = nodes.replace(-np.inf, np.nan)\n nodes = nodes.replace(np.inf, np.nan)\n nodes = nodes.fillna(0)\n\n # nodes2 = pd.read_csv('data/local_poi_distances.csv', index_col=\"node_id\")\n # nodes = pd.concat([nodes, nodes2], axis=1)\n\n print nodes.describe()\n orca.add_table(\"nodes\", nodes)\n\n\[email protected]('regional_vars')\ndef regional_vars(net):\n nodes = networks.from_yaml(net[\"drive\"], \"regional_vars.yaml\")\n nodes = nodes.fillna(0)\n\n nodes2 = pd.read_csv('data/regional_poi_distances.csv',\n index_col=\"tmnode_id\")\n nodes = pd.concat([nodes, nodes2], axis=1)\n\n print nodes.describe()\n orca.add_table(\"tmnodes\", nodes)\n\n\[email protected]('regional_pois')\ndef regional_pois(settings, landmarks):\n # because of the aforementioned limit of one netowrk at a time for the\n # POIS, as well as the large amount of memory used, this is now a\n # preprocessing step\n n = make_network(\n settings['build_networks']['drive']['name'],\n \"CTIMEV\", 75)\n\n n.init_pois(\n num_categories=1,\n max_dist=75,\n max_pois=1)\n\n cols = {}\n for locname in [\"embarcadero\", \"stanford\", \"pacheights\"]:\n locs = landmarks.local.query(\"name == '%s'\" % locname)\n n.set_pois(\"tmp\", locs.lng, locs.lat)\n cols[locname] = n.nearest_pois(75, \"tmp\", num_pois=1)[1]\n\n df = pd.DataFrame(cols)\n print df.describe()\n df.index.name = \"tmnode_id\"\n df.to_csv('regional_poi_distances.csv')\n\n\[email protected]('price_vars')\ndef price_vars(net):\n nodes2 = networks.from_yaml(net[\"walk\"], \"price_vars.yaml\")\n nodes2 = nodes2.fillna(0)\n print nodes2.describe()\n nodes = orca.get_table('nodes')\n nodes = nodes.to_frame().join(nodes2)\n orca.add_table(\"nodes\", nodes)\n\n\n# this is not really simulation - just writing a method to get average\n# appreciation per zone over the past X number of years\[email protected](\"mls_appreciation\")\ndef mls_appreciation(homesales, year, summary):\n buildings = homesales\n\n years = buildings.redfin_sale_year\n zone_ids = buildings.zone_id\n price = buildings.redfin_sale_price\n\n # minimum observations\n min_obs = 10\n\n def aggregate_year(year):\n mask = years == year\n s = price[mask].groupby(zone_ids[mask]).median()\n size = price[mask].groupby(zone_ids[mask]).size()\n return s, size\n\n current, current_size = aggregate_year(2013)\n\n zones = pd.DataFrame(index=zone_ids.unique())\n\n past, past_size = aggregate_year(year)\n appreciation = (current / past).pow(1.0/(2013-year))\n # zero out the zones with too few observations\n appreciation = appreciation * (current_size > min_obs).astype('int')\n appreciation = appreciation * (past_size > min_obs).astype('int')\n zones[\"appreciation\"] = appreciation\n\n print zones.describe()\n\n summary.add_zone_output(zones, \"appreciation\", year)\n summary.write_zone_output()\n\n\[email protected]()\ndef correct_baseyear_data(buildings, parcels, jobs):\n # sonoma county has too much vacancy in the buildings so we're\n # going to lower it a bit to match job totals - I'm doing it here\n # as opposed to in datasources as it requires registered orca\n # variables\n\n '''\n These are the original vacancies\n Alameda 0.607865\n Contra Costa 0.464277\n Marin 0.326655\n Napa 0.427900\n San Francisco 0.714938\n San Mateo 0.285090\n Santa Clara 0.368031\n Solano 0.383663\n Sonoma 0.434263\n '''\n\n '''\n After round one of changes\n Alameda 0.392677\n Contra Costa 0.289937\n Marin 0.183273\n Napa 0.280621\n San Francisco 0.468813\n San Mateo 0.137320\n Santa Clara 0.185758\n Solano 0.211494\n Sonoma 0.144422\n '''\n\n '''\n After round two of changes\n '''\n\n # get buildings by county\n buildings_county = misc.reindex(parcels.county, buildings.parcel_id)\n\n # making sure we have no more than 10% vacancy\n # this is the maximum vacancy you can have any a building so it NOT the\n # same thing as setting the vacancy for the entire county\n SURPLUS_VACANCY = buildings_county.map({\n \"Alameda\": .9, # down .2\n \"Contra Costa\": .7, # down .17\n \"Marin\": .5, # down .14\n \"Napa\": .7, # down .15\n \"San Francisco\": .9, # down .25\n \"San Mateo\": .28, # down .15\n \"Santa Clara\": .35, # down .18\n \"Solano\": .7, # down .17\n \"Sonoma\": .25, # down .3 letting this go lower cause it's the problem\n }).fillna(.2)\n\n # count of jobs by building\n job_counts_by_building = jobs.building_id.value_counts().\\\n reindex(buildings.index).fillna(0)\n # with a 10% vacancy\n job_counts_by_building_surplus = \\\n (job_counts_by_building * (SURPLUS_VACANCY+1)).astype('int')\n # min of job spaces and 10% greater than number of jobs\n correct_job_spaces = pd.DataFrame([\n job_counts_by_building_surplus, buildings.job_spaces]).min()\n # convert back to non res sqft because job spaces is computed\n correct_non_res_sqft = correct_job_spaces * buildings.sqft_per_job\n\n buildings.update_col(\"non_residential_sqft\", correct_non_res_sqft)\n\n jobs_county = misc.reindex(buildings_county, jobs.building_id)\n\n print \"Vacancy rate by county:\\n\", \\\n buildings.job_spaces.groupby(buildings_county).sum() / \\\n jobs_county.value_counts() - 1.0\n\n buildings_juris = misc.reindex(parcels.juris, buildings.parcel_id)\n jobs_juris = misc.reindex(buildings_juris, jobs.building_id)\n s = buildings.job_spaces.groupby(buildings_juris).sum() / \\\n jobs_juris.value_counts() - 1.0\n print \"Vacancy rate by juris:\\n\", s.to_string()\n", "id": "281886", "language": "Python", "matching_score": 8.754752159118652, "max_stars_count": 0, "path": "bayarea_urbansim/baus/models.py" }, { "content": "import numpy as np\nimport pandas as pd\nimport os\nfrom urbansim_defaults import datasources\nfrom urbansim_defaults import utils\nfrom urbansim.utils import misc\nimport orca\nfrom utils import geom_id_to_parcel_id, parcel_id_to_geom_id\nfrom utils import nearest_neighbor\n\n\n#####################\n# TABLES AND INJECTABLES\n#####################\n\n\[email protected]('year')\ndef year():\n try:\n return orca.get_injectable(\"iter_var\")\n except:\n pass\n # if we're not running simulation, return base year\n return 2014\n\n\[email protected]()\ndef initial_year():\n return 2010\n\n\[email protected]()\ndef final_year():\n return 2040\n\n\[email protected]('store', cache=True)\ndef hdfstore(settings):\n return pd.HDFStore(\n os.path.join(misc.data_dir(), settings[\"store\"]))\n\n\[email protected](cache=True)\ndef low_income(settings):\n return int(settings[\"low_income_for_hlcm\"])\n\n\[email protected](\"limits_settings\", cache=True)\ndef limits_settings(settings, scenario):\n\n d = settings['development_limits']\n\n if scenario in d.keys():\n print \"Using limits for scenario: %s\" % scenario\n return d[scenario]\n\n if \"default\" in d.keys():\n print \"Using default limits\"\n return d[\"default\"]\n\n # assume there's no scenario-based limits and the dict is the limits\n return d\n\n\[email protected](cache=True)\ndef inclusionary_housing_settings(settings, scenario):\n\n s = settings['inclusionary_housing_settings']\n\n if scenario in s.keys():\n print \"Using inclusionary settings for scenario: %s\" % scenario\n s = s[scenario]\n\n elif \"default\" in s.keys():\n print \"Using default inclusionary settings\"\n s = s[\"default\"]\n\n d = {}\n for item in s:\n print \"Setting inclusionary rates for %d cities to %.2f\" %\\\n (len(item[\"values\"]), item[\"amount\"])\n # this is a list of inclusionary rates and the cities they apply\n # to - need to turn it in a map of city names to rates\n for juris in item[\"values\"]:\n d[juris] = item[\"amount\"]\n\n return d\n\n\[email protected]('building_sqft_per_job', cache=True)\ndef building_sqft_per_job(settings):\n return settings['building_sqft_per_job']\n\n\n# key locations in the Bay Area for use as attractions in the models\[email protected]('landmarks', cache=True)\ndef landmarks():\n return pd.read_csv(os.path.join(misc.data_dir(), 'landmarks.csv'),\n index_col=\"name\")\n\n\[email protected]('jobs', cache=True)\ndef jobs(store):\n\n if 'jobs_urbansim_allocated' not in store:\n # if jobs allocation hasn't been done, then do it\n # (this should only happen once)\n orca.run([\"allocate_jobs\"])\n\n return store['jobs_urbansim_allocated']\n\n\n# the way this works is there is an orca step to do jobs allocation, which\n# reads base year totals and creates jobs and allocates them to buildings,\n# and writes it back to the h5. then the actual jobs table above just reads\n# the auto-allocated version from the h5. was hoping to just do allocation\n# on the fly but it takes about 4 minutes so way to long to do on the fly\[email protected]('allocate_jobs')\ndef jobs(store, baseyear_taz_controls, settings, parcels):\n\n # this isn't pretty, but can't use orca table because there would\n # be a circular dependenct - I mean jobs dependent on buildings and\n # buildings on jobs, so we have to grab from the store directly\n buildings = store['buildings']\n buildings[\"non_residential_sqft\"][\n buildings.building_type_id.isin([15, 16])] = 0\n buildings[\"building_sqft\"][buildings.building_type_id.isin([15, 16])] = 0\n buildings[\"zone_id\"] = misc.reindex(parcels.zone_id, buildings.parcel_id)\n\n # we need to do a new assignment from the controls to the buildings\n\n # first disaggregate the job totals\n sector_map = settings[\"naics_to_empsix\"]\n jobs = []\n for taz, row in baseyear_taz_controls.local.iterrows():\n for sector_col, num in row.iteritems():\n\n # not a sector total\n if not sector_col.startswith(\"emp_sec\"):\n continue\n\n # get integer sector id\n sector_id = int(''.join(c for c in sector_col if c.isdigit()))\n sector_name = sector_map[sector_id]\n\n jobs += [[sector_id, sector_name, taz, -1]] * int(num)\n\n # df is now the\n df = pd.DataFrame(jobs, columns=[\n 'sector_id', 'empsix', 'taz', 'building_id'])\n\n # just do random assignment weighted by job spaces - we'll then\n # fill in the job_spaces if overfilled in the next step (code\n # has existed in urbansim for a while)\n for taz, cnt in df.groupby('taz').size().iteritems():\n\n potential_add_locations = buildings.non_residential_sqft[\n (buildings.zone_id == taz) &\n (buildings.non_residential_sqft > 0)]\n\n if len(potential_add_locations) == 0:\n # if no non-res buildings, put jobs in res buildings\n potential_add_locations = buildings.building_sqft[\n buildings.zone_id == taz]\n\n weights = potential_add_locations / potential_add_locations.sum()\n\n print taz, len(potential_add_locations),\\\n potential_add_locations.sum(), cnt\n\n buildings_ids = potential_add_locations.sample(\n cnt, replace=True, weights=weights)\n\n df[\"building_id\"][df.taz == taz] = buildings_ids.index.values\n\n s = buildings.zone_id.loc[df.building_id].value_counts()\n t = baseyear_taz_controls.emp_tot - s\n # assert we matched the totals exactly\n assert t.sum() == 0\n\n # this is some exploratory diagnostics comparing the job controls to\n # the buildings table - in other words, comparing non-residential space\n # to the number of jobs\n '''\n old_jobs = store['jobs']\n old_jobs_cnt = old_jobs.groupby('taz').si ze()\n\n emp_tot = baseyear_taz_controls.emp_tot\n print buildings.job_spaces.groupby(buildings.building_type_id).sum()\n supply = buildings.job_spaces.groupby(buildings.zone_id).sum()\n non_residential_sqft = buildings.non_residential_sqft.\\\n groupby(buildings.zone_id).sum()\n s = (supply-emp_tot).order()\n df = pd.DataFrame({\n \"job_spaces\": supply,\n \"jobs\": emp_tot,\n \"non_residential_sqft\": non_residential_sqft\n }, index=s.index)\n df[\"vacant_spaces\"] = supply-emp_tot\n df[\"vacancy_rate\"] = df.vacant_spaces/supply.astype('float')\n df[\"old_jobs\"] = old_jobs_cnt\n df[\"old_vacant_spaces\"] = supply-old_jobs_cnt\n df[\"old_vacancy_rate\"] = df.old_vacant_spaces/supply.astype('float')\n df[\"sqft_per_job\"] = df.non_residential_sqft / df.jobs\n df[\"old_sqft_per_job\"] = df.non_residential_sqft / df.old_jobs\n df.index.name = \"zone_id\"\n print df[[\"jobs\", \"old_jobs\", \"job_spaces\", \"non_residential_sqft\"]].corr()\n df.sort(\"sqft_per_job\").to_csv(\"job_demand.csv\")\n '''\n\n store['jobs_urbansim_allocated'] = df\n\n\[email protected](cache=True)\ndef baseyear_taz_controls():\n return pd.read_csv(os.path.join(\"data\",\n \"baseyear_taz_controls.csv\"), index_col=\"taz1454\")\n\n\[email protected](cache=True)\ndef base_year_summary_taz():\n return pd.read_csv(os.path.join('data',\n 'baseyear_taz_summaries_2010.csv'),\n index_col=\"zone_id\")\n\n\n# the estimation data is not in the buildings table - they are the same\[email protected]('homesales', cache=True)\ndef homesales(store):\n # we need to read directly from the store here. Why? The buildings\n # table itself drops a bunch of columns we need - most notably the\n # redfin_sales_price column. Why? Because the developer model will\n # append rows (new buildings) to the buildings table and we don't want\n # the developer model to know about redfin_sales_price (which is\n # meaningless for forecast buildings)\n df = store['buildings']\n df = df.dropna(subset=[\"redfin_sale_price\"])\n df[\"price_per_sqft\"] = df.eval('redfin_sale_price / sqft_per_unit')\n df = df.query(\"sqft_per_unit > 200\")\n df = df.dropna(subset=[\"price_per_sqft\"])\n return df\n\n\n# non-residential rent data\[email protected]('costar', cache=True)\ndef costar(store, parcels):\n df = pd.read_csv(os.path.join(misc.data_dir(), '2015_08_29_costar.csv'))\n df[\"PropertyType\"] = df.PropertyType.replace(\"General Retail\", \"Retail\")\n df = df[df.PropertyType.isin([\"Office\", \"Retail\", \"Industrial\"])]\n df[\"costar_rent\"] = df[\"Average Weighted Rent\"].astype('float')\n df[\"year_built\"] = df[\"Year Built\"].fillna(1980)\n df = df.dropna(subset=[\"costar_rent\", \"Latitude\", \"Longitude\"])\n\n # now assign parcel id\n df[\"parcel_id\"] = nearest_neighbor(\n parcels.to_frame(['x', 'y']).dropna(subset=['x', 'y']),\n df[['Longitude', 'Latitude']]\n )\n\n return df\n\n\[email protected](cache=True)\ndef zoning_lookup():\n df = pd.read_csv(os.path.join(misc.data_dir(), \"zoning_lookup.csv\"))\n # this part is a bit strange - we do string matching on the names of zoning\n # in order ot link parcels and zoning and some of the strings have small\n # differences, so we copy the row and have different strings for the same\n # lookup row. for now we drop duplicates of the id field in order to run\n # in urbansim (all the attributes of rows that share an id are the same -\n # only the name is different)\n df = df.drop_duplicates(subset='id').set_index('id')\n return df\n\n\[email protected]('zoning_table_city_lookup', cache=True)\ndef zoning_table_city_lookup():\n df = pd.read_csv(os.path.join(misc.data_dir(),\n \"zoning_table_city_lookup.csv\"),\n index_col=\"juris\")\n return df\n\n\n# zoning for use in the \"baseline\" scenario\n# comes in the hdf5\[email protected]('zoning_baseline', cache=True)\ndef zoning_baseline(parcels, zoning_lookup, settings):\n df = pd.read_csv(os.path.join(misc.data_dir(),\n \"2015_12_21_zoning_parcels.csv\"),\n index_col=\"geom_id\")\n df = pd.merge(df, zoning_lookup.to_frame(),\n left_on=\"zoning_id\", right_index=True)\n df = geom_id_to_parcel_id(df, parcels)\n\n d = {k: \"type%d\" % v for k, v in settings[\"building_type_map2\"].items()}\n\n df.columns = [d.get(x, x) for x in df.columns]\n\n return df\n\n\[email protected]('zoning_scenario', cache=True)\ndef zoning_scenario(parcels_geography, scenario, settings):\n\n scenario_zoning = pd.read_csv(\n os.path.join(misc.data_dir(),\n 'zoning_mods_%s.csv' % scenario),\n dtype={'jurisdiction': 'str'})\n\n d = {k: \"type%d\" % v for k, v in settings[\"building_type_map2\"].items()}\n\n for k, v in d.items():\n scenario_zoning['add-'+v] = scenario_zoning.add_bldg.str.contains(k)\n\n for k, v in d.items():\n scenario_zoning['drop-'+v] = scenario_zoning.drop_bldg.\\\n astype(str).str.contains(k)\n\n return pd.merge(parcels_geography.to_frame().reset_index(),\n scenario_zoning,\n on=['zoningmodcat'],\n how='left').set_index('parcel_id')\n\n\n# this is really bizarre, but the parcel table I have right now has empty\n# zone_ids for a few parcels. Not enough to worry about so just filling with\n# the mode\[email protected]('parcels', cache=True)\ndef parcels(store):\n df = store['parcels']\n df[\"zone_id\"] = df.zone_id.replace(0, 1)\n\n cfg = {\n \"fill_nas\": {\n \"zone_id\": {\n \"how\": \"mode\",\n \"type\": \"int\"\n },\n \"shape_area\": {\n \"how\": \"median\",\n \"type\": \"float\"\n }\n }\n }\n df = utils.table_reprocess(cfg, df)\n\n # have to do it this way because otherwise it's a circular reference\n sdem = pd.read_csv(os.path.join(misc.data_dir(),\n \"development_projects.csv\"))\n # mark parcels that are going to be developed by the sdem\n df[\"sdem\"] = df.geom_id.isin(sdem.geom_id).astype('int')\n\n return df\n\n\[email protected]('parcels_zoning_calculations', cache=True)\ndef parcels_zoning_calculations(parcels):\n return pd.DataFrame(data=parcels.to_frame(\n columns=['geom_id',\n 'total_residential_units']),\n index=parcels.index)\n\n\[email protected]('taz')\ndef taz(zones):\n return zones\n\n\[email protected](cache=True)\ndef parcel_rejections():\n url = \"https://forecast-feedback.firebaseio.com/parcelResults.json\"\n return pd.read_json(url, orient=\"index\").set_index(\"geomId\")\n\n\[email protected](cache=True)\ndef parcels_geography(parcels):\n df = pd.read_csv(os.path.join(misc.data_dir(),\n \"02_01_2016_parcels_geography.csv\"),\n index_col=\"geom_id\", dtype={'jurisdiction': 'str'})\n df = geom_id_to_parcel_id(df, parcels)\n\n juris_name = pd.read_csv(os.path.join(misc.data_dir(),\n \"census_id_to_name.csv\"),\n index_col=\"census_id\").name10\n\n df[\"juris_name\"] = df.jurisdiction_id.map(juris_name)\n\n df[\"pda_id\"] = df.pda_id.str.lower()\n\n return df\n\n\[email protected](cache=True)\ndef manual_edits():\n return pd.read_csv(os.path.join(misc.data_dir(), \"manual_edits.csv\"))\n\n\ndef reprocess_dev_projects(df):\n # if dev projects with the same parcel id have more than one build\n # record, we change the later ones to add records - we don't want to\n # constantly be redeveloping projects, but it's a common error for users\n # to make in their development project configuration\n df = df.sort_values([\"geom_id\", \"year_built\"])\n prev_geom_id = None\n for index, rec in df.iterrows():\n if rec.geom_id == prev_geom_id:\n df.loc[index, \"action\"] = \"add\"\n prev_geom_id = rec.geom_id\n\n return df\n\n\[email protected](cache=True)\ndef demolish_events(parcels, settings, scenario):\n df = pd.read_csv(os.path.join(misc.data_dir(), \"development_projects.csv\"))\n df = reprocess_dev_projects(df)\n\n # this filters project by scenario\n if scenario in df:\n # df[scenario] is 1s and 0s indicating whether to include it\n df = df[df[scenario].astype('bool')]\n\n # keep demolish and build records\n df = df[df.action.isin([\"demolish\", \"build\"])]\n\n df = df.dropna(subset=['geom_id'])\n df = df.set_index(\"geom_id\")\n df = geom_id_to_parcel_id(df, parcels).reset_index() # use parcel id\n\n return df\n\n\[email protected](cache=True)\ndef development_projects(parcels, settings, scenario):\n df = pd.read_csv(os.path.join(misc.data_dir(), \"development_projects.csv\"))\n df = reprocess_dev_projects(df)\n\n df = df[df.action.isin([\"add\", \"build\"])]\n\n # this filters project by scenario\n colname = \"scen%s\" % scenario\n # df[colname] is 1s and 0s indicating whether to include it\n # this used to be an optional filter but now I'm going to require it so\n # that we don't accidentally include all the development projects since\n # we've started using scenario-based dev projects pretty extensively\n df = df[df[colname].astype('bool')]\n\n df = df.dropna(subset=['geom_id'])\n\n for fld in ['residential_sqft', 'residential_price',\n 'non_residential_price']:\n df[fld] = 0\n df[\"redfin_sale_year\"] = 2012 # hedonic doesn't tolerate nans\n df[\"stories\"] = df.stories.fillna(1)\n df[\"building_sqft\"] = df.building_sqft.fillna(0)\n df[\"non_residential_sqft\"] = df.non_residential_sqft.fillna(0)\n\n df[\"building_type\"] = df.building_type.replace(\"HP\", \"OF\")\n df[\"building_type\"] = df.building_type.replace(\"GV\", \"OF\")\n df[\"building_type\"] = df.building_type.replace(\"SC\", \"OF\")\n df[\"building_type_id\"] = \\\n df.building_type.map(settings[\"building_type_map2\"])\n\n df = df.dropna(subset=[\"geom_id\"]) # need a geom_id to link to parcel_id\n\n df = df.dropna(subset=[\"year_built\"]) # need a year built to get built\n\n df[\"geom_id\"] = df.geom_id.astype(\"int\")\n df = df.query('residential_units != \"rent\"')\n df[\"residential_units\"] = df.residential_units.fillna(0).astype(\"int\")\n geom_id = df.geom_id\n df = df.set_index(\"geom_id\")\n df = geom_id_to_parcel_id(df, parcels).reset_index() # use parcel id\n df[\"geom_id\"] = geom_id.values # add it back again cause it goes away\n\n # we don't predict prices for schools and hotels right now\n df = df.query(\"building_type_id <= 4 or building_type_id >= 7\")\n\n df[\"deed_restricted_units\"] = 0\n\n print \"Describe of development projects\"\n print df[orca.get_table('buildings').local_columns].describe()\n\n return df\n\n\[email protected]('households', cache=True)\ndef households(store, settings):\n # start with households from urbansim_defaults\n df = datasources.households(store, settings)\n\n # need to keep track of base year income quartiles for use in the\n # transition model - even caching doesn't work because when you add\n # rows via the transitioning, you automatically clear the cache!\n # this is pretty nasty and unfortunate\n df[\"base_income_quartile\"] = pd.Series(pd.qcut(df.income, 4, labels=False),\n index=df.index).add(1)\n df[\"base_income_octile\"] = pd.Series(pd.qcut(df.income, 8, labels=False),\n index=df.index).add(1)\n return df\n\n\[email protected]('buildings', cache=True)\ndef buildings(store, parcels, households, jobs, building_sqft_per_job,\n settings, manual_edits):\n\n # start with buildings from urbansim_defaults\n df = datasources.buildings(store, households, jobs,\n building_sqft_per_job, settings)\n\n df = df.drop(['development_type_id', 'improvement_value',\n 'sqft_per_unit', 'nonres_rent_per_sqft',\n 'res_price_per_sqft', 'redfin_sale_price',\n 'redfin_home_type', 'costar_property_type',\n 'costar_rent'], axis=1)\n\n edits = manual_edits.local\n edits = edits[edits.table == 'buildings']\n for index, row, col, val in \\\n edits[[\"id\", \"attribute\", \"new_value\"]].itertuples():\n df.set_value(row, col, val)\n\n # set the vacancy rate in each building to 5% for testing purposes\n df[\"residential_units\"] = df.residential_units.fillna(0)\n\n # for some reason nonres can be more than total sqft\n df[\"building_sqft\"] = pd.DataFrame({\n \"one\": df.building_sqft,\n \"two\": df.residential_sqft + df.non_residential_sqft}).max(axis=1)\n\n # keeps parking lots from getting redeveloped\n df[\"building_sqft\"][df.building_type_id.isin([15, 16])] = 0\n df[\"non_residential_sqft\"][df.building_type_id.isin([15, 16])] = 0\n\n # don't know what a 0 building type id, set to office\n df[\"building_type_id\"] = df.building_type_id.replace(0, 4)\n\n # we should only be using the \"buildings\" table during simulation, and in\n # simulation we want to normalize the prices to 2012 style prices\n df[\"redfin_sale_year\"] = 2012\n\n # hope we get more data on this soon\n df[\"deed_restricted_units\"] = 0\n zone_ids = misc.reindex(parcels.zone_id, df.parcel_id).\\\n reindex(df.index).fillna(-1)\n\n # sample deed restricted units to match current deed restricted unit\n # zone totals\n for taz, row in pd.read_csv('data/deed_restricted_zone_totals.csv',\n index_col='taz_key').iterrows():\n\n cnt = row[\"units\"]\n\n if cnt <= 0:\n continue\n\n potential_add_locations = df.residential_units[\n (zone_ids == taz) &\n (df.residential_units > 0)]\n\n assert len(potential_add_locations) > 0\n\n weights = potential_add_locations / potential_add_locations.sum()\n\n buildings_ids = potential_add_locations.sample(\n cnt, replace=True, weights=weights)\n\n units = pd.Series(buildings_ids.index.values).value_counts()\n df.loc[units.index, \"deed_restricted_units\"] += units.values\n\n print \"Total deed restricted units after random selection: %d\" % \\\n df.deed_restricted_units.sum()\n\n df[\"deed_restricted_units\"] = \\\n df[[\"deed_restricted_units\", \"residential_units\"]].min(axis=1)\n\n print \"Total deed restricted units after truncating to res units: %d\" % \\\n df.deed_restricted_units.sum()\n\n return df\n\n\[email protected]('household_controls_unstacked', cache=True)\ndef household_controls_unstacked():\n df = pd.read_csv(os.path.join(misc.data_dir(), \"household_controls.csv\"))\n return df.set_index('year')\n\n\n# the following overrides household_controls table defined in urbansim_defaults\[email protected]('household_controls', cache=True)\ndef household_controls(household_controls_unstacked):\n df = household_controls_unstacked.to_frame()\n # rename to match legacy table\n df.columns = [1, 2, 3, 4]\n # stack and fill in columns\n df = df.stack().reset_index().set_index('year')\n # rename to match legacy table\n df.columns = ['base_income_quartile', 'total_number_of_households']\n return df\n\n\[email protected]('employment_controls_unstacked', cache=True)\ndef employment_controls_unstacked():\n df = pd.read_csv(os.path.join(misc.data_dir(), \"employment_controls.csv\"))\n return df.set_index('year')\n\n\n# the following overrides employment_controls\n# table defined in urbansim_defaults\[email protected]('employment_controls', cache=True)\ndef employment_controls(employment_controls_unstacked):\n df = employment_controls_unstacked.to_frame()\n # rename to match legacy table\n df.columns = [1, 2, 3, 4, 5, 6]\n # stack and fill in columns\n df = df.stack().reset_index().set_index('year')\n # rename to match legacy table\n df.columns = ['empsix_id', 'number_of_jobs']\n return df\n\n\[email protected]('zone_forecast_inputs', cache=True)\ndef zone_forecast_inputs():\n return pd.read_csv(os.path.join(misc.data_dir(),\n \"zone_forecast_inputs.csv\"),\n index_col=\"zone_id\")\n\n\n# this is the set of categories by zone of sending and receiving zones\n# in terms of vmt fees\[email protected](\"vmt_fee_categories\", cache=True)\ndef vmt_fee_categories():\n return pd.read_csv(os.path.join(misc.data_dir(), \"vmt_fee_zonecats.csv\"),\n index_col=\"taz\")\n\n\[email protected]('taz_geography', cache=True)\ndef taz_geography():\n tg = pd.read_csv(os.path.join(misc.data_dir(),\n \"taz_geography.csv\"), index_col=\"zone\")\n sr = pd.read_csv(os.path.join(misc.data_dir(),\n \"superdistricts.csv\"), index_col=\"number\")\n tg[\"subregion_id\"] = sr.subregion.loc[tg.superdistrict].values\n tg[\"subregion\"] = tg.subregion_id.map({\n 1: \"Core\",\n 2: \"Urban\",\n 3: \"Suburban\",\n 4: \"Rural\"\n })\n return tg\n\n\n# these are shapes - \"zones\" in the bay area\n\n\[email protected]('zones', cache=True)\ndef zones(store):\n df = store['zones']\n df = df.sort_index()\n return df\n\n\n# this specifies the relationships between tables\norca.broadcast('parcels_geography', 'buildings', cast_index=True,\n onto_on='parcel_id')\norca.broadcast('tmnodes', 'buildings', cast_index=True, onto_on='tmnode_id')\norca.broadcast('parcels', 'homesales', cast_index=True, onto_on='parcel_id')\norca.broadcast('nodes', 'homesales', cast_index=True, onto_on='node_id')\norca.broadcast('tmnodes', 'homesales', cast_index=True, onto_on='tmnode_id')\norca.broadcast('nodes', 'costar', cast_index=True, onto_on='node_id')\norca.broadcast('tmnodes', 'costar', cast_index=True, onto_on='tmnode_id')\norca.broadcast('logsums', 'homesales', cast_index=True, onto_on='zone_id')\norca.broadcast('logsums', 'costar', cast_index=True, onto_on='zone_id')\norca.broadcast('taz_geography', 'parcels', cast_index=True,\n onto_on='zone_id')\n", "id": "4828424", "language": "Python", "matching_score": 6.216156005859375, "max_stars_count": 0, "path": "bayarea_urbansim/baus/datasources.py" }, { "content": "import numpy as np\nimport pandas as pd\nfrom urbansim.utils import misc\nimport orca\nimport datasources\nfrom utils import nearest_neighbor\nfrom urbansim_defaults import utils\nfrom urbansim_defaults import variables\n\n\n#####################\n# HOUSEHOLDS VARIABLES\n#####################\n\n\n# used to pretent a segmented choice model isn't actually segmented\[email protected]('households', 'ones', cache=True)\ndef income_decile(households):\n return pd.Series(1, households.index)\n\n\[email protected]('households', 'tmnode_id', cache=True)\ndef node_id(households, buildings):\n return misc.reindex(buildings.tmnode_id, households.building_id)\n\n\n#####################\n# HOMESALES VARIABLES\n#####################\n\n\[email protected]('homesales', cache=True)\ndef general_type(buildings):\n return buildings.general_type\n\n\nBUILDING_AGE_BREAK = 40\n\n\[email protected]('homesales', cache=True)\ndef building_age(homesales):\n return 2014 - homesales.year_built\n\n\[email protected]('homesales', cache=True)\ndef building_age_recent(homesales):\n s = homesales.building_age\n return s * (s < BUILDING_AGE_BREAK)\n\n\[email protected]('homesales', cache=True)\ndef building_age_old(homesales):\n s = homesales.building_age\n return s * (s >= BUILDING_AGE_BREAK)\n\n\[email protected]('homesales', 'juris_ave_income', cache=True)\ndef juris_ave_income(parcels, homesales):\n return misc.reindex(parcels.juris_ave_income, homesales.parcel_id)\n\n\[email protected]('homesales', cache=True)\ndef zonal_veryhighinc(homesales, taz):\n return misc.reindex(taz.veryhighinc, homesales.zone_id).\\\n reindex(homesales.index).fillna(0)\n\n\[email protected]('homesales', 'is_sanfran', cache=True)\ndef is_sanfran(parcels, homesales):\n return misc.reindex(parcels.is_sanfran, homesales.parcel_id)\n\n\[email protected]('homesales', 'node_id', cache=True)\ndef node_id(homesales, parcels):\n return misc.reindex(parcels.node_id, homesales.parcel_id)\n\n\[email protected]('homesales', 'tmnode_id', cache=True)\ndef tmnode_id(homesales, parcels):\n return misc.reindex(parcels.tmnode_id, homesales.parcel_id)\n\n\[email protected]('homesales', 'zone_id', cache=True)\ndef zone_id(homesales, parcels):\n return misc.reindex(parcels.zone_id, homesales.parcel_id)\n\n\[email protected]('homesales', cache=True)\ndef modern_condo(homesales):\n # this is to try and differentiate between new\n # construction in the city vs in the burbs\n return ((homesales.year_built > 2000) *\n (homesales.building_type_id == 3)).astype('int')\n\n\[email protected]('homesales', cache=True)\ndef new_construction(homesales):\n return (homesales.year_built > 2000).astype('int')\n\n\[email protected]('homesales', cache=True)\ndef historic(homesales):\n return (homesales.year_built < 1940).astype('int')\n\n\[email protected]('homesales', cache=True)\ndef base_price_per_sqft(homesales):\n s = homesales.price_per_sqft.groupby(homesales.zone_id).quantile()\n return misc.reindex(s, homesales.zone_id)\n\n\[email protected]('homesales', cache=True)\ndef transit_type(homesales, parcels_geography):\n return misc.reindex(parcels_geography.tpp_id, homesales.parcel_id).\\\n reindex(homesales.index).fillna('none')\n\n\n#####################\n# COSTAR VARIABLES\n#####################\n\n\[email protected]('costar', 'juris_ave_income', cache=True)\ndef juris_ave_income(parcels, costar):\n return misc.reindex(parcels.juris_ave_income, costar.parcel_id)\n\n\[email protected]('costar', 'is_sanfran', cache=True)\ndef is_sanfran(parcels, costar):\n return misc.reindex(parcels.is_sanfran, costar.parcel_id)\n\n\[email protected]('costar', 'general_type')\ndef general_type(costar):\n return costar.PropertyType\n\n\[email protected]('costar', 'node_id')\ndef node_id(parcels, costar):\n return misc.reindex(parcels.node_id, costar.parcel_id)\n\n\[email protected]('costar', 'tmnode_id')\ndef tmnode_id(parcels, costar):\n return misc.reindex(parcels.tmnode_id, costar.parcel_id)\n\n\[email protected]('costar', 'zone_id')\ndef zone_id(parcels, costar):\n return misc.reindex(parcels.zone_id, costar.parcel_id)\n\n\[email protected]('costar', cache=True)\ndef transit_type(costar, parcels_geography):\n return misc.reindex(parcels_geography.tpp_id, costar.parcel_id).\\\n reindex(costar.index).fillna('none')\n\n\n#####################\n# JOBS VARIABLES\n#####################\n\n\[email protected]('jobs', 'tmnode_id', cache=True)\ndef tmnode_id(jobs, buildings):\n return misc.reindex(buildings.tmnode_id, jobs.building_id)\n\n\[email protected]('jobs', 'naics', cache=True)\ndef naics(jobs):\n return jobs.sector_id\n\n\n# @orca.column('jobs', 'empsix', cache=True)\n# def empsix(jobs, settings):\n# return jobs.naics.map(settings['naics_to_empsix'])\n\n\[email protected]('jobs', 'empsix_id', cache=True)\ndef empsix_id(jobs, settings):\n return jobs.empsix.map(settings['empsix_name_to_id'])\n\n\n#####################\n# BUILDINGS VARIABLES\n#####################\n\n\n# I want to round this cause otherwise we'll be underfilling job spaces\n# in the aggregate because of rounding errors - this way some spaces will\n# be underfilled and othersoverfilled which should yield an average of\n# the sqft_per_job table\[email protected]('buildings', 'job_spaces', cache=False)\ndef job_spaces(buildings):\n return (buildings.non_residential_sqft /\n buildings.sqft_per_job).fillna(0).round().astype('int')\n\n\[email protected]('buildings')\ndef market_rate_units(buildings):\n return buildings.residential_units - buildings.deed_restricted_units\n\n\n# this column can be negative when there are more low income households than\n# deed restricted units, which means some of the low income households are in\n# market rate units - this feature is used below\[email protected]('buildings')\ndef vacant_affordable_units_neg(buildings, households, settings, low_income):\n return buildings.deed_restricted_units.sub(\n households.building_id[households.income <= low_income].value_counts(),\n fill_value=0)\n\n\[email protected]('buildings')\ndef vacant_affordable_units(buildings):\n return buildings.vacant_affordable_units_neg.clip(lower=0).\\\n reindex(buildings.index).fillna(0)\n\n\[email protected]('buildings')\ndef vacant_market_rate_units(buildings, households, settings, low_income):\n # this is market rate households per building\n s1 = households.building_id[households.income > low_income].value_counts()\n # this is low income households in market rate units - a negative number\n # in vacant affordable units indicates the number of households in market\n # rate units\n s2 = buildings.vacant_affordable_units_neg.clip(upper=0)*-1\n return buildings.market_rate_units.\\\n sub(s1, fill_value=0).sub(s2, fill_value=0).clip(lower=0)\n\n\[email protected]('buildings', cache=True)\ndef building_age(buildings, year):\n return (year or 2014) - buildings.year_built\n\n\[email protected]('buildings', cache=True)\ndef building_age_recent(buildings):\n s = buildings.building_age\n return s * (s < BUILDING_AGE_BREAK)\n\n\[email protected]('buildings', cache=True)\ndef building_age_old(buildings):\n s = buildings.building_age\n return s * (s >= BUILDING_AGE_BREAK)\n\n\[email protected]('buildings', cache=True)\ndef zonal_veryhighinc(buildings, taz):\n return misc.reindex(taz.veryhighinc, buildings.zone_id).\\\n reindex(buildings.index).fillna(0)\n\n\[email protected]('buildings', cache=True)\ndef transit_type(buildings, parcels_geography):\n return misc.reindex(parcels_geography.tpp_id, buildings.parcel_id).\\\n reindex(buildings.index).fillna('none')\n\n\[email protected]('buildings', cache=False)\ndef unit_price(buildings):\n return buildings.residential_price * buildings.sqft_per_unit\n\n\[email protected]('buildings', cache=True)\ndef base_price_per_sqft(homesales, buildings):\n s = homesales.price_per_sqft.groupby(homesales.zone_id).quantile()\n return misc.reindex(s, buildings.zone_id).reindex(buildings.index)\\\n .fillna(s.quantile())\n\n\[email protected]('buildings', 'tmnode_id', cache=True)\ndef tmnode_id(buildings, parcels):\n return misc.reindex(parcels.tmnode_id, buildings.parcel_id)\n\n\[email protected]('buildings', 'juris_ave_income', cache=True)\ndef juris_ave_income(parcels, buildings):\n return misc.reindex(parcels.juris_ave_income, buildings.parcel_id)\n\n\[email protected]('buildings', 'is_sanfran', cache=True)\ndef is_sanfran(parcels, buildings):\n return misc.reindex(parcels.is_sanfran, buildings.parcel_id)\n\n\[email protected]('buildings', 'sqft_per_unit', cache=True)\ndef unit_sqft(buildings):\n return (buildings.building_sqft /\n buildings.residential_units.replace(0, 1)).clip(400, 6000)\n\n\[email protected]('buildings', cache=True)\ndef modern_condo(buildings):\n # this is to try and differentiate between new construction\n # in the city vs in the burbs\n return ((buildings.year_built > 2000) * (buildings.building_type_id == 3))\\\n .astype('int')\n\n\[email protected]('buildings', cache=True)\ndef new_construction(buildings):\n return (buildings.year_built > 2000).astype('int')\n\n\[email protected]('buildings', cache=True)\ndef historic(buildings):\n return (buildings.year_built < 1940).astype('int')\n\n\[email protected]('buildings', cache=True)\ndef vmt_res_cat(buildings, vmt_fee_categories):\n return misc.reindex(vmt_fee_categories.res_cat, buildings.zone_id)\n\n\n#####################\n# NODES VARIABLES\n#####################\n\n\n# these are computed outcomes of accessibility variables\[email protected]('nodes')\ndef retail_ratio(nodes):\n # then compute the ratio of income to retail sqft - a high number here\n # indicates an underserved market\n return nodes.sum_income_3000 / nodes.retail_sqft_3000.clip(lower=1)\n\n\n#####################\n# PARCELS VARIABLES\n#####################\n\n\[email protected]('parcels')\ndef retail_ratio(parcels, nodes):\n return misc.reindex(nodes.retail_ratio, parcels.node_id)\n\n\n# the stories attributes on parcels will be the max story\n# attribute on the buildings\[email protected]('parcels', cache=True)\ndef stories(buildings):\n return buildings.stories.groupby(buildings.parcel_id).max()\n\n\[email protected]('parcels', cache=True)\ndef height(parcels):\n return parcels.stories * 12\n\n\[email protected]('parcels', cache=True)\ndef vmt_res_cat(parcels, vmt_fee_categories):\n return misc.reindex(vmt_fee_categories.res_cat, parcels.zone_id)\n\n\[email protected]('parcels', cache=True)\ndef vmt_res_fees(parcels, settings):\n vmt_settings = settings[\"acct_settings\"][\"vmt_settings\"]\n return parcels.vmt_res_cat.map(vmt_settings[\"res_fee_amounts\"])\n\n\[email protected]('parcels', cache=True)\ndef vmt_com_fees(parcels, settings):\n vmt_settings = settings[\"acct_settings\"][\"vmt_settings\"]\n return parcels.vmt_res_cat.map(vmt_settings[\"com_fee_amounts\"])\n\n\n# compute the fees per unit for each parcel\n# (since feees are specified spatially)\[email protected]('parcels', cache=True)\ndef fees_per_unit(parcels, settings, scenario):\n s = pd.Series(0, index=parcels.index)\n\n if scenario == \"3\":\n s += parcels.vmt_res_fees\n\n return s\n\n\n# since this is by sqft this implies commercial\[email protected]('parcels', cache=True)\ndef fees_per_sqft(parcels, settings, scenario):\n s = pd.Series(0, index=parcels.index)\n\n if scenario == \"1\" or scenario == \"4\":\n s += parcels.vmt_com_fees\n\n return s\n\n\[email protected]('parcels', cache=True)\ndef pda(parcels, parcels_geography):\n return parcels_geography.pda_id.reindex(parcels.index)\n\n\[email protected]('parcels', cache=True)\ndef superdistrict(parcels, taz):\n return misc.reindex(taz.sd, parcels.zone_id)\n\n\n# perffoot is a dummy indicating the FOOTprint for the PERFormance targets\[email protected]('parcels', cache=True)\ndef urban_footprint(parcels, parcels_geography):\n return parcels_geography.perffoot.reindex(parcels.index)\n\n\n# perfzone is a dummy for geography for a performance target\[email protected]('parcels', cache=True)\ndef performance_zone(parcels, parcels_geography):\n return parcels_geography.perfarea.reindex(parcels.index)\n\n\[email protected]('parcels', cache=True)\ndef juris(parcels, parcels_geography):\n s = parcels_geography.juris_name.reindex(parcels.index)\n s.loc[2054504] = \"Marin County\"\n s.loc[2054505] = \"Santa Clara County\"\n s.loc[2054506] = \"Marin County\"\n s.loc[572927] = \"Contra Costa County\"\n # assert no empty juris values\n assert True not in s.isnull().value_counts()\n return s\n\n\[email protected]('parcels', 'ave_sqft_per_unit', cache=True)\ndef ave_sqft_per_unit(parcels, zones, settings):\n s = misc.reindex(zones.ave_unit_sqft, parcels.zone_id)\n\n clip = settings.get(\"ave_sqft_per_unit_clip\", None)\n if clip is not None:\n s = s.clip(lower=clip['lower'], upper=clip['upper'])\n\n cfg = settings.get(\"clip_sqft_per_unit_based_on_dua\", None)\n if cfg is not None:\n for clip in cfg:\n s[parcels.max_dua >= clip[\"threshold\"]] = clip[\"max\"]\n\n return s\n\n\n# these are actually functions that take parameters, but are parcel-related\n# so are defined here\[email protected]('parcel_average_price', autocall=False)\ndef parcel_average_price(use, quantile=.5):\n # I'm testing out a zone aggregation rather than a network aggregation\n # because I want to be able to determine the quantile of the distribution\n # I also want more spreading in the development and not keep it localized\n if use == \"residential\":\n\n # get node price average and put it on parcels\n s = misc.reindex(orca.get_table('nodes')[use],\n orca.get_table('parcels').node_id) * 1.3\n\n # apply shifters\n cost_shifters = orca.get_table(\"parcels\").cost_shifters\n price_shifters = orca.get_table(\"parcels\").price_shifters\n s = s / cost_shifters * price_shifters\n\n # just to make sure\n s = s.fillna(0).clip(150, 1250)\n return s\n\n if 'nodes' not in orca.list_tables():\n return pd.Series(0, orca.get_table('parcels').index)\n\n return misc.reindex(orca.get_table('nodes')[use],\n orca.get_table('parcels').node_id)\n\n\[email protected]('parcel_sales_price_sqft_func', autocall=False)\ndef parcel_sales_price_sqft(use):\n s = parcel_average_price(use)\n if use == \"residential\":\n s *= 1.0\n return s\n\n\[email protected](\"parcels\")\ndef residential_sales_price_sqft(parcel_sales_price_sqft_func):\n return parcel_sales_price_sqft_func(\"residential\")\n\n\n#############################\n# Functions for Checking\n# Allowed Uses and Building\n# Types on Parcels\n#############################\n\n\[email protected]('parcel_is_allowed_func', autocall=False)\ndef parcel_is_allowed(form):\n settings = orca.get_injectable('settings')\n form_to_btype = settings[\"form_to_btype\"]\n # we have zoning by building type but want\n # to know if specific forms are allowed\n allowed = [orca.get_table('zoning_baseline')\n ['type%d' % typ] > 0 for typ in form_to_btype[form]]\n\n # also check if the scenario based zoning adds the building type\n allowed2 = [orca.get_table('zoning_scenario')\n ['add-type%d' % typ] > 0 for typ in form_to_btype[form]]\n\n allowed = allowed + allowed2\n\n allowed = pd.concat(allowed, axis=1).max(axis=1).\\\n reindex(orca.get_table('parcels').index).fillna(False)\n\n # also check if the scenario based zoning drops the building type\n # NOTE THAT DROPPING OVERRIDES ADDING!\n disallowed = [orca.get_table('zoning_scenario')\n ['drop-type%d' % typ] > 0 for typ in form_to_btype[form]]\n\n disallowed = pd.concat(disallowed, axis=1).max(axis=1).\\\n reindex(orca.get_table('parcels').index).fillna(False)\n\n allowed = allowed.astype('bool') & ~disallowed\n\n settings = orca.get_injectable(\"settings\")\n if \"eliminate_retail_zoning_from_juris\" in settings and form == \"retail\":\n allowed *= ~orca.get_table(\"parcels\").juris.isin(\n settings[\"eliminate_retail_zoning_from_juris\"])\n\n return allowed.astype(\"bool\")\n\n\[email protected]('parcels', 'first_building_type_id')\ndef first_building_type_id(buildings, parcels):\n df = buildings.to_frame(\n columns=['building_type_id', 'parcel_id'])\n return df.groupby('parcel_id').building_type_id.first()\n\n\[email protected]('parcel_first_building_type_is', autocall=False)\ndef parcel_first_building_type_is(form):\n settings = orca.get_injectable('settings')\n form_to_btype = settings[\"form_to_btype\"]\n parcels = orca.get_table('parcels')\n s = parcels.first_building_type_id.isin(form_to_btype[form])\n return s\n\n#############################\n# Summary by TAZ for\n# Output to Travel Model\n#############################\n\n\[email protected]('zones')\ndef ave_unit_sqft(buildings):\n return buildings.sqft_per_unit.groupby(buildings.zone_id).quantile(.6)\n\n\[email protected]('taz', 'gqpop')\ndef gqpop(zones, zone_forecast_inputs, year):\n # need the following conditional b/c `year` is used to pull a column from\n # a csv of group quarter population based on a string of the year\n # and 2009 is the 'base'/pre-simulation year, as is the 2010 value\n # in the csv. this value, gqpop is small, esp between one single sim years\n year = 2010 if year == 2009 else year\n str1 = \"gqpop\" + str(year)[-2:]\n s = zone_forecast_inputs[str1]\n return s\n\n\[email protected]('taz', 'totacre')\ndef totacre(zone_forecast_inputs):\n s = zone_forecast_inputs.totacre_abag\n return s\n\n\[email protected]('taz', 'shpop62p')\ndef shpop62p(zone_forecast_inputs):\n s = zone_forecast_inputs.sh_62plus\n return s\n\n\[email protected]('buildings_subset')\ndef buildings_subset(buildings):\n df = buildings.to_frame(columns=['zone_id',\n 'building_type_id',\n 'residential_units',\n 'building_sqft',\n 'lot_size_per_unit'])\n return df\n\n\[email protected]('taz', 'newdevacres')\ndef newdevacres(buildings_subset):\n df = buildings_subset.to_frame()\n s = (df.query(\"building_sqft > 0\").\n groupby('zone_id').lot_size_per_unit.sum()) / 43560\n return s\n\n\[email protected]('taz', 'resunits')\ndef resunits(buildings_subset):\n df = buildings_subset.to_frame()\n s = df.groupby('zone_id').residential_units.sum()\n return s\n\n\[email protected]('taz', 'mfdu')\ndef mfdu(buildings_subset):\n df = buildings_subset.to_frame()\n s = df.query(\"building_type_id == 3 or building_type_id == 12\").\\\n groupby('zone_id').residential_units.sum()\n return s\n\n\[email protected]('taz', 'sfdu')\ndef sfdu(buildings_subset):\n df = buildings_subset.to_frame()\n s = df.query(\"building_type_id == 1 or building_type_id == 2\").\\\n groupby('zone_id').residential_units.sum()\n return s\n\n\[email protected]('households_subset')\ndef households_subset(households):\n return households.to_frame(columns=['zone_id',\n 'base_income_quartile',\n 'income',\n 'persons'])\n\n\[email protected]('taz', 'hhinq1')\ndef hhinq1(households_subset):\n df = households_subset.to_frame()\n s = df.query(\"base_income_quartile == 1\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'hhinq2')\ndef hhinq2(households_subset):\n df = households_subset.to_frame()\n s = df.query(\"base_income_quartile == 2\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'hhinq3')\ndef hhinq3(households_subset):\n df = households_subset.to_frame()\n s = df.query(\"base_income_quartile == 3\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'hhinq4')\ndef hhinq4(households_subset):\n df = households_subset.to_frame()\n s = df.query(\"base_income_quartile == 4\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'veryhighinc')\ndef hhinq4(households_subset, taz):\n df = households_subset.to_frame()\n s = df.query(\"income >= 75000\").\\\n groupby('zone_id').size()\n return s / taz.tothh.replace(0, 1)\n\n\[email protected]('taz', 'tothh')\ndef tothh(households_subset):\n df = households_subset.to_frame()\n s = df.groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'hhpop')\ndef hhpop(households_subset):\n df = households_subset.to_frame()\n s = df.groupby('zone_id').persons.sum()\n return s\n\n\[email protected]('taz', 'resvacancy')\ndef resvacancy(taz):\n s = (taz.resunits - taz.tothh) / \\\n taz.resunits.replace(0, 1)\n return s\n\n\[email protected]('jobs_subset')\ndef jobs_subset(jobs):\n zone_id = jobs.zone_id\n empsix = jobs.empsix\n df = pd.DataFrame(data={'zone_id': zone_id, 'empsix': empsix})\n return df\n\n\[email protected]('taz', 'totemp')\ndef totemp(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'agrempn')\ndef agrempn(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.query(\"empsix == 'AGREMPN'\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'mwtempn')\ndef mwtempn(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.query(\"empsix == 'MWTEMPN'\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'retempn')\ndef retempn(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.query(\"empsix == 'RETEMPN'\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'fsempn')\ndef fsempn(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.query(\"empsix == 'FPSEMPN'\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'herempn')\ndef herempn(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.query(\"empsix == 'HEREMPN'\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'othempn')\ndef othempn(jobs_subset):\n df = jobs_subset.to_frame()\n s = df.query(\"empsix == 'OTHEMPN'\").\\\n groupby('zone_id').size()\n return s\n\n\[email protected]('taz', 'sd')\ndef sd(taz_geography):\n s = taz_geography.superdistrict\n return s\n\n\[email protected]('taz', 'county')\ndef sd(taz_geography):\n s = taz_geography.county\n return s\n\n# @orca.column('taz','totpop')\n# def totpop(taz):\n# s = taz.gqpop\n# s1 = taz.hhpop\n# s2 = s1+s\n# return s2\n\n\[email protected]('taz', 'density')\ndef density(taz):\n s = (taz.totpop + (2.5 * taz.totemp)) / taz.totacre\n return s\n\n\[email protected]('taz', 'areatype')\ndef density(taz):\n import numpy as np\n s = pd.cut(taz.density, bins=[0, 6, 30, 55,\n 100, 300, np.inf], labels=[5, 4, 3, 2, 1, 0])\n return s\n\n\[email protected]('taz', 'ciacre')\ndef ciacre(parcels, taz):\n f = orca.get_injectable('parcel_first_building_type_is')\n s = f('select_non_residential')\n s1 = parcels.get_column('zone_id')\n s2 = parcels.parcel_acres * s\n df = pd.DataFrame(data={'zone_id': s1, 'ciacre': s2})\n s3 = df.groupby('zone_id').ciacre.sum()\n return s3\n\n\[email protected]('taz', 'resacre')\ndef resacre(parcels):\n f = orca.get_injectable('parcel_first_building_type_is')\n s = f('residential') | f('mixedresidential')\n s1 = parcels.get_column('zone_id')\n s2 = parcels.parcel_acres * s\n df = pd.DataFrame(data={'zone_id': s1, 'residential_acres': s2})\n s3 = df.groupby('zone_id').residential_acres.sum()\n return s3\n\n\[email protected]('parcels', 'juris_ave_income', cache=True)\ndef juris_ave_income(households, buildings, parcels_geography, parcels):\n h = orca.merge_tables(\"households\",\n [households, buildings, parcels_geography],\n columns=[\"jurisdiction_id\", \"income\"])\n s = h.groupby(h.jurisdiction_id).income.quantile(.5)\n return misc.reindex(s, parcels_geography.jurisdiction_id).\\\n reindex(parcels.index).fillna(s.median()).apply(np.log1p)\n\n\n# returns the newest building on the land and fills missing values with 1800 -\n# for use with development limits\[email protected]('parcels', 'newest_building')\ndef newest_building(parcels, buildings):\n return buildings.year_built.groupby(buildings.parcel_id).max().\\\n reindex(parcels.index).fillna(1800)\n\n\[email protected]('parcels', cache=True)\ndef manual_nodev(parcel_rejections, parcels):\n df1 = parcels.to_frame(['x', 'y']).dropna(subset=['x', 'y'])\n df2 = parcel_rejections.to_frame(['lng', 'lat'])\n df2 = df2[parcel_rejections.state == \"denied\"]\n df2 = df2[[\"lng\", \"lat\"]] # need to change the order\n ind = nearest_neighbor(df1, df2)\n\n s = pd.Series(False, parcels.index)\n s.loc[ind.flatten()] = True\n return s.astype('int')\n\n\[email protected]('parcels', 'oldest_building_age')\ndef oldest_building_age(parcels, year):\n return year - parcels.oldest_building.replace(9999, 0)\n\n\[email protected]('parcels', 'is_sanfran', cache=True)\ndef is_sanfran(parcels_geography, buildings, parcels):\n return (parcels_geography.juris_name == \"San Francisco\").\\\n reindex(parcels.index).fillna(False).astype('int')\n\n\[email protected]('parcels')\ndef built_far(parcels):\n # compute the actually built farn on a parcel\n s = parcels.total_sqft / parcels.parcel_size\n # if the parcel size is too small to get an accurate reading, remove it\n s[parcels.parcel_acres < .1] = np.nan\n return s\n\n\n# actual columns start here\[email protected]('parcels')\ndef max_far(parcels_zoning_calculations, parcels, scenario, settings):\n # first we combine the zoning columns\n s = parcels_zoning_calculations.effective_max_far * ~parcels.nodev\n\n if scenario in [\"2\", \"3\", \"4\", \"5\"]:\n # we had trouble with the zoning outside of the footprint\n # make sure we have rural zoning outside of the footprint\n s2 = parcels.urban_footprint.map({0: 0, 1: np.nan})\n s = pd.concat([s, s2], axis=1).min(axis=1)\n\n if settings[\"dont_build_most_dense_building\"]:\n # in this case we shrink the zoning such that we don't built the\n # tallest building in a given zone\n # if there no building in the zone currently, we make the max_far = .2\n s2 = parcels.built_far.groupby(parcels.zone_id).max()\n s2 = misc.reindex(s2, parcels.zone_id).fillna(.2)\n s = pd.concat([s, s2], axis=1).min(axis=1)\n\n return s\n\n\n# returns a vector where parcels are ALLOWED to be built\[email protected]('parcels')\ndef parcel_rules(parcels):\n # removes parcels with buildings < 1940,\n # and single family homes on less then half an acre\n s = (parcels.oldest_building < 1940) | \\\n ((parcels.total_residential_units == 1) &\n (parcels.parcel_acres < .5)) | \\\n (parcels.parcel_size < 2000)\n s = (~s.reindex(parcels.index).fillna(False)).astype('int')\n return s\n\n\[email protected]('parcels', 'total_non_residential_sqft', cache=True)\ndef total_non_residential_sqft(parcels, buildings):\n return buildings.non_residential_sqft.groupby(buildings.parcel_id).sum().\\\n reindex(parcels.index).fillna(0)\n\n\[email protected]('parcels')\ndef nodev(zoning_baseline, parcels, static_parcels):\n # nodev from zoning\n s1 = zoning_baseline.nodev.reindex(parcels.index).\\\n fillna(0).astype('bool')\n # nodev from static parcels\n s2 = parcels.index.isin(static_parcels)\n return s1 | s2\n\n\[email protected]('parcels')\ndef built_dua(parcels):\n # compute the actually built dua on a parcel\n s = parcels.total_residential_units / parcels.parcel_acres\n # if the parcel size is too small to get an accurate reading, remove it\n s[parcels.parcel_acres < .1] = np.nan\n return s\n\n\[email protected]('parcels', 'max_dua')\ndef max_dua(parcels_zoning_calculations, parcels, scenario, settings):\n # first we combine the zoning columns\n s = parcels_zoning_calculations.effective_max_dua * ~parcels.nodev\n\n if scenario in [\"2\", \"3\", \"4\", \"5\"]:\n # we had trouble with the zoning outside of the footprint\n # make sure we have rural zoning outside of the footprint\n s2 = parcels.urban_footprint.map({0: .01, 1: np.nan})\n s = pd.concat([s, s2], axis=1).min(axis=1)\n\n if settings[\"dont_build_most_dense_building\"]:\n # in this case we shrink the zoning such that we don't built the\n # tallest building in a given zone\n # if there no building in the zone currently, we make the max_dua = 4\n s2 = parcels.built_dua.groupby(parcels.zone_id).max()\n s2 = misc.reindex(s2, parcels.zone_id).fillna(4)\n s = pd.concat([s, s2], axis=1).min(axis=1)\n\n return s\n\n\n# these next two are just indicators put into the output\[email protected]('parcels', 'residential_purchase_price_sqft')\ndef residential_purchase_price_sqft(parcels):\n return parcels.building_purchase_price_sqft\n\n\[email protected]('parcels', 'residential_sales_price_sqft')\ndef residential_sales_price_sqft(parcel_sales_price_sqft_func):\n return parcel_sales_price_sqft_func(\"residential\")\n\n\[email protected]('parcels', 'general_type')\ndef general_type(parcels, buildings):\n s = buildings.general_type.groupby(buildings.parcel_id).first()\n return s.reindex(parcels.index).fillna(\"Vacant\")\n\n\n# for debugging reasons this is split out into its own function\[email protected]('parcels', 'building_purchase_price_sqft')\ndef building_purchase_price_sqft(parcels):\n price = pd.Series(0, parcels.index)\n gentype = parcels.general_type\n for form in [\"Office\", \"Retail\", \"Industrial\", \"Residential\"]:\n # convert to yearly\n factor = 1.4 if form == \"Residential\" else 20.0\n # raise cost to convert from industrial\n if form == \"Industrial\":\n factor *= 3.0\n if form == \"Retail\":\n factor *= 2.0\n if form == \"Office\":\n factor *= 1.4\n tmp = parcel_average_price(form.lower())\n price += tmp * (gentype == form) * factor\n\n return price.clip(150, 2500)\n\n\[email protected]('parcels', 'building_purchase_price')\ndef building_purchase_price(parcels):\n # the .8 is because we assume the building only charges for 80% of the\n # space - when a good portion of the building is parking, this probably\n # overestimates the part of the building that you can charge for\n # the second .85 is because the price is likely to be lower for existing\n # buildings than for one that is newly built\n return (parcels.total_sqft * parcels.building_purchase_price_sqft *\n .8 * .85).reindex(parcels.index).fillna(0)\n\n\[email protected]('parcels', 'land_cost')\ndef land_cost(parcels):\n s = (parcels.building_purchase_price_sqft / 40).clip(5, 20)\n # industrial is an exception as cleanup is likely to be done - would\n # be nice to have data on superfund sites and such\n s[parcels.general_type == \"Industrial\"] = 100\n return parcels.building_purchase_price + parcels.parcel_size * s\n\n\[email protected]('parcels', 'county')\ndef county(parcels, settings):\n return parcels.county_id.map(settings[\"county_id_map\"])\n\n\[email protected]('parcels', 'cost_shifters')\ndef cost_shifters(parcels, settings):\n return parcels.county.map(settings[\"cost_shifters\"])\n\n\[email protected]('parcels', 'price_shifters')\ndef price_shifters(parcels, settings):\n return parcels.pda.map(settings[\"pda_price_shifters\"]).fillna(1.0)\n\n\[email protected]('parcels', 'node_id', cache=True)\ndef node_id(parcels, net):\n s = net[\"walk\"].get_node_ids(parcels.x, parcels.y)\n fill_val = s.value_counts().index[0]\n s = s.reindex(parcels.index).fillna(fill_val).astype('int')\n return s\n\n\[email protected]('parcels', 'tmnode_id', cache=True)\ndef node_id(parcels, net):\n s = net[\"drive\"].get_node_ids(parcels.x, parcels.y)\n fill_val = s.value_counts().index[0]\n s = s.reindex(parcels.index).fillna(fill_val).astype('int')\n return s\n\n\[email protected]('parcels', 'subregion', cache=True)\ndef subregion(taz_geography, parcels):\n return misc.reindex(taz_geography.subregion, parcels.zone_id)\n\n\[email protected]('parcels', 'vmt_res_cat', cache=True)\ndef vmt_code(parcels, vmt_fee_categories):\n return misc.reindex(vmt_fee_categories.res_cat, parcels.zone_id)\n\n\n# This is an all computed table which takes calculations from the below and\n# puts it in a computed dataframe. The catch here is that UrbanSim only\n# needs one scenario's zoning at a time. This dataframe gives you the\n# zoning for all 4 scenarios at the same time for comparison sake. Therefore\n# it switches scenarios, clears the cache and recomputes the columns - this\n# is not really normal UrbanSim operation but it immensely useful for debugging\[email protected]('parcels_zoning_by_scenario')\ndef parcels_zoning_by_scenario(parcels, parcels_zoning_calculations,\n zoning_baseline):\n\n df = pd.DataFrame(index=parcels.index)\n df[\"baseline_dua\"] = zoning_baseline.max_dua\n df[\"baseline_far\"] = zoning_baseline.max_far\n df[\"baseline_height\"] = zoning_baseline.max_height\n df[\"zoning_name\"] = zoning_baseline[\"name\"]\n df[\"zoning_source\"] = zoning_baseline[\"tablename\"]\n\n for scenario in [str(i) for i in range(4)]:\n orca.clear_cache()\n orca.add_injectable(\"scenario\", scenario)\n z = orca.get_table(\"parcels_zoning_calculations\")\n df[\"max_dua_%s\" % scenario] = z.effective_max_dua\n df[\"max_far_%s\" % scenario] = z.effective_max_far\n df[\"du_underbuild_%s\" % scenario] = z.zoned_du_underbuild\n df[\"non_res_cat_%s\" % scenario] = z.non_res_categories\n\n return df\n\n\nGROSS_AVE_UNIT_SIZE = 1000.0\nPARCEL_USE_EFFICIENCY = .8\nHEIGHT_PER_STORY = 12.0\n\n###################################\n# Zoning Capacity Variables\n###################################\n\n\[email protected]('parcels_zoning_calculations', 'zoned_du', cache=True)\ndef zoned_du(parcels, parcels_zoning_calculations):\n return parcels_zoning_calculations.effective_max_dua * parcels.parcel_acres\n\n\[email protected]('parcels_zoning_calculations', 'effective_max_dua', cache=True)\ndef effective_max_dua(zoning_baseline, parcels, scenario):\n\n max_dua_from_far = zoning_baseline.max_far * 43560 / GROSS_AVE_UNIT_SIZE\n\n max_far_from_height = (zoning_baseline.max_height / HEIGHT_PER_STORY) * \\\n PARCEL_USE_EFFICIENCY\n\n max_dua_from_height = max_far_from_height * 43560 / GROSS_AVE_UNIT_SIZE\n\n s = pd.concat([\n zoning_baseline.max_dua,\n max_dua_from_far,\n max_dua_from_height\n ], axis=1).min(axis=1)\n\n # take the max dua IFF the upzone value is greater than the current value\n # i.e. don't let the upzoning operation accidentally downzone\n\n scenario_max_dua = orca.get_table(\"zoning_scenario\").dua_up\n\n s = pd.concat([\n s,\n scenario_max_dua\n ], axis=1).max(axis=1)\n\n # take the min dua IFF the upzone value is less than the current value\n # i.e. don't let the downzoning operation accidentally upzone\n\n scenario_min_dua = orca.get_table(\"zoning_scenario\").dua_down\n\n s = pd.concat([\n s,\n scenario_min_dua\n ], axis=1).min(axis=1)\n\n s3 = parcel_is_allowed('residential')\n\n return (s.fillna(0) * s3).reindex(parcels.index).fillna(0).astype('float')\n\n\[email protected]('parcels_zoning_calculations',\n 'effective_max_far', cache=True)\ndef effective_max_far(zoning_baseline, parcels, scenario):\n\n max_far_from_height = (zoning_baseline.max_height / HEIGHT_PER_STORY) * \\\n PARCEL_USE_EFFICIENCY\n\n s = pd.concat([\n zoning_baseline.max_far,\n max_far_from_height\n ], axis=1).min(axis=1)\n\n # take the max far IFF the upzone value is greater than the current value\n # i.e. don't let the upzoning operation accidentally downzone\n\n scenario_max_far = orca.get_table(\"zoning_scenario\").far_up\n\n s = pd.concat([\n s,\n scenario_max_far\n ], axis=1).max(axis=1)\n\n # take the max far IFF the downzone value is less than the current value\n # i.e. don't let the downzoning operation accidentally upzone\n\n scenario_min_far = orca.get_table(\"zoning_scenario\").far_down\n\n s = pd.concat([\n s,\n scenario_min_far\n ], axis=1).min(axis=1)\n\n return s.reindex(parcels.index).fillna(0).astype('float')\n\n\[email protected]('parcels_zoning_calculations',\n 'effective_max_office_far', cache=True)\ndef effective_max_office_far(parcels_zoning_calculations):\n return parcels_zoning_calculations.effective_max_far * \\\n parcel_is_allowed('office')\n\n\n########################################\n# there are a number of variables\n# here that try to get at zoned capacity\n########################################\n\n\[email protected]('parcels_zoning_calculations', 'zoned_du_underbuild')\ndef zoned_du_underbuild(parcels, parcels_zoning_calculations):\n # subtract from zoned du, the total res units, but also the equivalent\n # of non-res sqft in res units\n s = (parcels_zoning_calculations.zoned_du -\n parcels.total_residential_units -\n parcels.total_non_residential_sqft /\n GROSS_AVE_UNIT_SIZE).clip(lower=0)\n ratio = (s / parcels.total_residential_units).replace(np.inf, 1)\n # if the ratio of additional units to existing units is not at least .5\n # we don't build it - I mean we're not turning a 10 story building into an\n # 11 story building\n s = s[ratio > .5].reindex(parcels.index).fillna(0)\n return s.astype('int')\n\n\[email protected]('parcels_zoning_calculations')\ndef zoned_du_build_ratio(parcels, parcels_zoning_calculations):\n # ratio of existing res built space to zoned res built space\n s = parcels.total_residential_units / \\\n (parcels_zoning_calculations.effective_max_dua * parcels.parcel_acres)\n return s.replace(np.inf, 1).clip(0, 1)\n\n\[email protected]('parcels_zoning_calculations')\ndef zoned_far_build_ratio(parcels, parcels_zoning_calculations):\n # ratio of existing nonres built space to zoned nonres built space\n s = parcels.total_non_residential_sqft / \\\n (parcels_zoning_calculations.effective_max_far *\n parcels.parcel_size)\n return s.replace(np.inf, 1).clip(0, 1)\n\n\[email protected]('parcels_zoning_calculations')\ndef zoned_build_ratio(parcels_zoning_calculations):\n # add them together in order to get the sum of residential and commercial\n # build space\n return parcels_zoning_calculations.zoned_du_build_ratio + \\\n parcels_zoning_calculations.zoned_far_build_ratio\n\n\[email protected]('parcels_zoning_calculations')\ndef zoned_du_underbuild_nodev(parcels, parcels_zoning_calculations):\n return (parcels_zoning_calculations.zoned_du_underbuild *\n parcels.parcel_rules).astype('int')\n\n\[email protected]('parcels_zoning_calculations', 'office_allowed')\ndef office_allowed(parcels):\n office_allowed = parcel_is_allowed('office')\n return office_allowed\n\n\[email protected]('parcels_zoning_calculations', 'retail_allowed')\ndef retail_allowed(parcels):\n retail_allowed = parcel_is_allowed('retail')\n return retail_allowed\n\n\[email protected]('parcels_zoning_calculations', 'industrial_allowed')\ndef industrial_allowed(parcels):\n industrial_allowed = parcel_is_allowed('industrial')\n return industrial_allowed\n\n\[email protected]('parcels_zoning_calculations', 'cat_r')\ndef cat_r(parcels_zoning_calculations):\n s = ~parcels_zoning_calculations.office_allowed &\\\n parcels_zoning_calculations.retail_allowed\n s2 = pd.Series(index=parcels_zoning_calculations.index).fillna('R')\n return s * s2\n\n\[email protected]('parcels_zoning_calculations', 'cat_ind')\ndef cat_ind(parcels_zoning_calculations):\n s = ~parcels_zoning_calculations.office_allowed &\\\n ~parcels_zoning_calculations.retail_allowed &\\\n parcels_zoning_calculations.industrial_allowed\n s2 = pd.Series(index=parcels_zoning_calculations.index).fillna('I')\n return s * s2\n\n\[email protected]('parcels_zoning_calculations', 'office_high')\ndef office_high(parcels_zoning_calculations):\n s = parcels_zoning_calculations.effective_max_office_far > 4\n s2 = pd.Series(index=parcels_zoning_calculations.index).fillna('OH')\n s3 = s * s2\n return s3\n\n\[email protected]('parcels_zoning_calculations', 'office_medium')\ndef office_medium(parcels_zoning_calculations):\n s = parcels_zoning_calculations.effective_max_office_far > 1\n s2 = parcels_zoning_calculations.effective_max_office_far <= 4\n s3 = pd.Series(index=parcels_zoning_calculations.index).fillna('OM')\n return (s & s2) * s3\n\n\[email protected]('parcels_zoning_calculations', 'office_low')\ndef office_low(parcels_zoning_calculations):\n s = parcels_zoning_calculations.effective_max_office_far < 1\n s2 = parcels_zoning_calculations.office_allowed\n s3 = pd.Series(index=parcels_zoning_calculations.index).fillna('OL')\n return (s & s2) * s3\n\n\[email protected]('parcels_zoning_calculations', 'non_res_categories')\ndef non_res_categories(parcels_zoning_calculations):\n pzc = parcels_zoning_calculations\n s = pzc.office_high + pzc.office_medium + \\\n pzc.office_low + pzc.cat_r + pzc.cat_ind\n return s\n", "id": "1331152", "language": "Python", "matching_score": 6.621090412139893, "max_stars_count": 0, "path": "bayarea_urbansim/baus/variables.py" }, { "content": "import sys\nimport os\nimport orca\nimport pandas as pd\nimport numpy as np\nfrom utils import random_indexes, round_series_match_target,\\\n scale_by_target, simple_ipf\nfrom urbansim.utils import misc\nfrom scripts.output_csv_utils import format_df\n\n\[email protected](\"topsheet\")\ndef topsheet(households, jobs, buildings, parcels, zones, year,\n run_number, taz_geography, parcels_zoning_calculations,\n summary, settings, parcels_geography):\n\n hh_by_subregion = misc.reindex(taz_geography.subregion,\n households.zone_id).value_counts()\n\n households_df = orca.merge_tables(\n 'households',\n [parcels_geography, buildings, households],\n columns=['pda_id', 'tpp_id', 'income'])\n\n hh_by_inpda = households_df.pda_id.notnull().value_counts()\n\n hhincome_by_intpp = households_df.income.groupby(\n households_df.tpp_id.notnull()).mean()\n # round to nearest 100s\n hhincome_by_intpp = (hhincome_by_intpp/100).round()*100\n\n jobs_by_subregion = misc.reindex(taz_geography.subregion,\n jobs.zone_id).value_counts()\n\n jobs_df = orca.merge_tables(\n 'jobs',\n [parcels, buildings, jobs],\n columns=['pda'])\n\n jobs_by_inpda = jobs_df.pda.notnull().value_counts()\n\n capacity = parcels_zoning_calculations.\\\n zoned_du_underbuild_nodev.groupby(parcels.subregion).sum()\n\n if year == 2010:\n # save some info for computing growth measures\n orca.add_injectable(\"base_year_measures\", {\n \"hh_by_subregion\": hh_by_subregion,\n \"jobs_by_subregion\": jobs_by_subregion,\n \"hh_by_inpda\": hh_by_inpda,\n \"jobs_by_inpda\": jobs_by_inpda,\n \"hhincome_by_intpp\": hhincome_by_intpp,\n \"capacity\": capacity\n })\n\n # if year != 2040:\n # return\n\n base_year_measures = orca.get_injectable(\"base_year_measures\")\n\n f = open(os.path.join(\"runs\", \"run%d_topsheet_%d.log\" %\n (run_number, year)), \"w\")\n\n def write(s):\n # print s\n f.write(s + \"\\n\\n\")\n\n def norm_and_round(s):\n # normalize and round a series\n return str((s/s.sum()).round(2))\n\n nhh = len(households)\n write(\"Number of households = %d\" % nhh)\n nj = len(jobs)\n write(\"Number of jobs = %d\" % nj)\n\n n = len(households.building_id[households.building_id == -1])\n write(\"Number of unplaced households = %d\" % n)\n\n n = len(jobs.building_id[jobs.building_id == -1])\n write(\"Number of unplaced jobs = %d\" % n)\n\n du = buildings.residential_units.sum()\n write(\"Number of residential units = %d\" % du)\n write(\"Residential vacancy rate = %.2f\" % (1-0 - float(nhh)/du))\n\n du = buildings.deed_restricted_units.sum()\n write(\"Number of deed restricted units = %d\" % du)\n\n write(\"Base year mean income by whether household is in tpp:\\n%s\" %\n base_year_measures[\"hhincome_by_intpp\"])\n\n write(\"Horizon year mean income by whether household is in tpp:\\n%s\" %\n hhincome_by_intpp)\n\n jsp = buildings.job_spaces.sum()\n write(\"Number of job spaces = %d\" % jsp)\n write(\"Non-residential vacancy rate = %.2f\" % (1-0 - float(nj)/jsp))\n\n tmp = base_year_measures[\"hh_by_subregion\"]\n write(\"Households base year share by subregion:\\n%s\" %\n norm_and_round(tmp))\n\n write(\"Households share by subregion:\\n%s\" %\n norm_and_round(hh_by_subregion))\n diff = hh_by_subregion - base_year_measures[\"hh_by_subregion\"]\n\n write(\"Households pct of regional growth by subregion:\\n%s\" %\n norm_and_round(diff))\n\n tmp = base_year_measures[\"jobs_by_subregion\"]\n write(\"Jobs base year share by subregion:\\n%s\" %\n norm_and_round(tmp))\n\n write(\"Jobs share by subregion:\\n%s\" %\n norm_and_round(jobs_by_subregion))\n diff = jobs_by_subregion - base_year_measures[\"jobs_by_subregion\"]\n\n write(\"Jobs pct of regional growth by subregion:\\n%s\" %\n norm_and_round(diff))\n\n tmp = base_year_measures[\"hh_by_inpda\"]\n write(\"Households base year share in pdas:\\n%s\" %\n norm_and_round(tmp))\n\n write(\"Households share in pdas:\\n%s\" %\n norm_and_round(hh_by_inpda))\n diff = hh_by_inpda - base_year_measures[\"hh_by_inpda\"]\n\n write(\"Households pct of regional growth in pdas:\\n%s\" %\n norm_and_round(diff))\n\n tmp = base_year_measures[\"jobs_by_inpda\"]\n write(\"Jobs base year share in pdas:\\n%s\" %\n norm_and_round(tmp))\n\n write(\"Jobs share in pdas:\\n%s\" %\n norm_and_round(jobs_by_inpda))\n diff = jobs_by_inpda - base_year_measures[\"jobs_by_inpda\"]\n\n write(\"Jobs pct of regional growth in pdas:\\n%s\" %\n norm_and_round(diff))\n\n write(\"Base year dwelling unit raw capacity:\\n%s\" %\n base_year_measures[\"capacity\"])\n\n write(\"Dwelling unit raw capacity:\\n%s\" % capacity)\n\n if summary.parcel_output is not None:\n df = summary.parcel_output\n # we mark greenfield as a parcel with less than 500 current sqft\n greenfield = df.total_sqft < 500\n\n write(\"Current share of projects which are greenfield development:\\n%s\"\n % norm_and_round(greenfield.value_counts()))\n\n write(\"Current share of units which are greenfield development:\\n%s\" %\n norm_and_round(df.residential_units.groupby(greenfield).sum()))\n\n cmap = settings[\"county_id_tm_map\"]\n jobs_by_county = jobs.zone_id.map(taz_geography.county)\\\n .map(cmap).value_counts()\n households_by_county = households.zone_id.map(taz_geography.county)\\\n .map(cmap).value_counts()\n jobs_by_housing = jobs_by_county / households_by_county.replace(0, 1)\n write(\"Jobs/housing balance:\\n\" + str(jobs_by_housing))\n\n f.close()\n\n\[email protected](\"diagnostic_output\")\ndef diagnostic_output(households, buildings, parcels, taz, jobs,\n zones, year, summary, run_number):\n households = households.to_frame()\n buildings = buildings.to_frame()\n parcels = parcels.to_frame()\n zones = zones.to_frame()\n\n zones['ave_unit_sqft'] = zones.ave_unit_sqft\n\n zones['zoned_du'] = parcels.groupby('zone_id').zoned_du.sum()\n zones['zoned_du_underbuild'] = parcels.groupby('zone_id').\\\n zoned_du_underbuild.sum()\n zones['zoned_du_underbuild_ratio'] = zones.zoned_du_underbuild /\\\n zones.zoned_du\n\n zones['residential_units'] = buildings.groupby('zone_id').\\\n residential_units.sum()\n zones['job_spaces'] = buildings.groupby('zone_id').\\\n job_spaces.sum()\n tothh = households.zone_id.value_counts().reindex(zones.index).fillna(0)\n zones['residential_vacancy'] = \\\n 1.0 - tothh / zones.residential_units.replace(0, 1)\n zones['non_residential_sqft'] = buildings.groupby('zone_id').\\\n non_residential_sqft.sum()\n totjobs = jobs.zone_id.value_counts().reindex(zones.index).fillna(0)\n zones['non_residential_vacancy'] = \\\n 1.0 - totjobs / zones.job_spaces.replace(0, 1)\n\n zones['retail_sqft'] = buildings.query('general_type == \"Retail\"').\\\n groupby('zone_id').non_residential_sqft.sum()\n zones['office_sqft'] = buildings.query('general_type == \"Office\"').\\\n groupby('zone_id').non_residential_sqft.sum()\n zones['industrial_sqft'] = buildings.query(\n 'general_type == \"Industrial\"').\\\n groupby('zone_id').non_residential_sqft.sum()\n\n zones['average_income'] = households.groupby('zone_id').income.quantile()\n zones['household_size'] = households.groupby('zone_id').persons.quantile()\n\n zones['building_count'] = buildings.\\\n query('general_type == \"Residential\"').groupby('zone_id').size()\n zones['residential_price'] = buildings.\\\n query('general_type == \"Residential\"').groupby('zone_id').\\\n residential_price.quantile()\n zones['retail_rent'] = buildings[buildings.general_type == \"Retail\"].\\\n groupby('zone_id').non_residential_price.quantile()\n zones['office_rent'] = buildings[buildings.general_type == \"Office\"].\\\n groupby('zone_id').non_residential_price.quantile()\n zones['industrial_rent'] = \\\n buildings[buildings.general_type == \"Industrial\"].\\\n groupby('zone_id').non_residential_price.quantile()\n\n zones['retail_sqft'] = buildings[buildings.general_type == \"Retail\"].\\\n groupby('zone_id').non_residential_sqft.sum()\n\n zones['retail_to_res_units_ratio'] = \\\n zones.retail_sqft / zones.residential_units.replace(0, 1)\n\n summary.add_zone_output(zones, \"diagnostic_outputs\", year)\n\n # save the dropped buildings to a csv\n if \"dropped_buildings\" in orca.orca._TABLES:\n df = orca.get_table(\"dropped_buildings\").to_frame()\n print \"Dropped buildings\", df.describe()\n df.to_csv(\n \"runs/run{}_dropped_buildings.csv\".format(run_number)\n )\n\n\[email protected]()\ndef geographic_summary(parcels, households, jobs, buildings, taz_geography,\n run_number, year, summary, final_year):\n # using the following conditional b/c `year` is used to pull a column\n # from a csv based on a string of the year in add_population()\n # and in add_employment() and 2009 is the\n # 'base'/pre-simulation year, as is the 2010 value in the csv.\n if year == 2009:\n year = 2010\n base = True\n else:\n base = False\n\n households_df = orca.merge_tables(\n 'households',\n [parcels, buildings, households],\n columns=['pda', 'zone_id', 'juris', 'superdistrict',\n 'persons', 'income', 'base_income_quartile'])\n\n jobs_df = orca.merge_tables(\n 'jobs',\n [parcels, buildings, jobs],\n columns=['pda', 'superdistrict', 'juris', 'zone_id', 'empsix'])\n\n buildings_df = orca.merge_tables(\n 'buildings',\n [parcels, buildings],\n columns=['pda', 'superdistrict', 'juris', 'building_type_id',\n 'zone_id', 'residential_units', 'building_sqft',\n 'non_residential_sqft'])\n\n parcel_output = summary.parcel_output\n\n # because merge_tables returns multiple zone_id_'s, but not the one we need\n buildings_df = buildings_df.rename(columns={'zone_id_x': 'zone_id'})\n\n geographies = ['superdistrict', 'pda', 'juris']\n\n if year in [2010, 2015, 2020, 2025, 2030, 2035, 2040]:\n\n for geography in geographies:\n\n # create table with household/population summaries\n\n summary_table = pd.pivot_table(households_df,\n values=['persons'],\n index=[geography],\n aggfunc=[np.size])\n\n summary_table.columns = ['tothh']\n\n # income quartile counts\n summary_table['hhincq1'] = \\\n households_df.query(\"base_income_quartile == 1\").\\\n groupby(geography).size()\n summary_table['hhincq2'] = \\\n households_df.query(\"base_income_quartile == 2\").\\\n groupby(geography).size()\n summary_table['hhincq3'] = \\\n households_df.query(\"base_income_quartile == 3\").\\\n groupby(geography).size()\n summary_table['hhincq4'] = \\\n households_df.query(\"base_income_quartile == 4\").\\\n groupby(geography).size()\n\n # residential buildings by type\n summary_table['sfdu'] = buildings_df.\\\n query(\"building_type_id == 1 or building_type_id == 2\").\\\n groupby(geography).residential_units.sum()\n summary_table['mfdu'] = buildings_df.\\\n query(\"building_type_id == 3 or building_type_id == 12\").\\\n groupby(geography).residential_units.sum()\n\n # employees by sector\n summary_table['totemp'] = jobs_df.\\\n groupby(geography).size()\n summary_table['agrempn'] = jobs_df.query(\"empsix == 'AGREMPN'\").\\\n groupby(geography).size()\n summary_table['mwtempn'] = jobs_df.query(\"empsix == 'MWTEMPN'\").\\\n groupby(geography).size()\n summary_table['retempn'] = jobs_df.query(\"empsix == 'RETEMPN'\").\\\n groupby(geography).size()\n summary_table['fpsempn'] = jobs_df.query(\"empsix == 'FPSEMPN'\").\\\n groupby(geography).size()\n summary_table['herempn'] = jobs_df.query(\"empsix == 'HEREMPN'\").\\\n groupby(geography).size()\n summary_table['othempn'] = jobs_df.query(\"empsix == 'OTHEMPN'\").\\\n groupby(geography).size()\n\n # summary columns\n summary_table['occupancy_rate'] = summary_table['tothh'] / \\\n (summary_table['sfdu'] + summary_table['mfdu'])\n summary_table['non_residential_sqft'] = buildings_df.\\\n groupby(geography)['non_residential_sqft'].sum()\n summary_table['sq_ft_per_employee'] = \\\n summary_table['non_residential_sqft'] / summary_table['totemp']\n\n if parcel_output is not None:\n parcel_output['subsidized_units'] = \\\n parcel_output.deed_restricted_units - \\\n parcel_output.inclusionary_units\n\n # columns re: affordable housing\n summary_table['deed_restricted_units'] = \\\n parcel_output.groupby(geography).\\\n deed_restricted_units.sum()\n summary_table['inclusionary_units'] = \\\n parcel_output.groupby(geography).inclusionary_units.sum()\n summary_table['subsidized_units'] = \\\n parcel_output.groupby(geography).subsidized_units.sum()\n summary_table['inclusionary_revenue_reduction'] = \\\n parcel_output.groupby(geography).\\\n policy_based_revenue_reduction.sum()\n summary_table['inclusionary_revenue_reduction_per_unit'] = \\\n summary_table.inclusionary_revenue_reduction / \\\n summary_table.inclusionary_units\n summary_table['total_subsidy'] = \\\n parcel_output[parcel_output.subsidized_units > 0].\\\n groupby(geography).max_profit.sum() * -1\n summary_table['subsidy_per_unit'] = \\\n summary_table.total_subsidy / \\\n summary_table.subsidized_units\n\n # fill in 0 values where there are NA's so that summary table\n # outputs are the same over the years otherwise a PDA or summary\n # geography would be dropped if it had no employment or housing\n if geography == 'superdistrict':\n all_summary_geographies = buildings_df[geography].unique()\n else:\n all_summary_geographies = parcels[geography].unique()\n summary_table = \\\n summary_table.reindex(all_summary_geographies).fillna(0)\n\n if base is False:\n summary_csv = \"runs/run{}_{}_summaries_{}.csv\".\\\n format(run_number, geography, year)\n elif base is True:\n summary_csv = \"runs/run{}_{}_summaries_{}.csv\".\\\n format(run_number, geography, 2009)\n summary_table.to_csv(summary_csv)\n\n # ##############################\n # ##############################\n # ##Write Summary of Accounts###\n # ##############################\n # ##############################\n\n if year == final_year:\n for acct_name, acct in orca.get_injectable(\"coffer\").iteritems():\n fname = \"runs/run{}_acctlog_{}_{}.csv\".\\\n format(run_number, acct_name, year)\n acct.to_frame().to_csv(fname)\n\n # ##############################\n # ##############################\n # ####Write Urban Footprint#####\n # #########Summary##############\n # ##############################\n # ##############################\n\n buildings_uf_df = orca.merge_tables(\n 'buildings',\n [parcels, buildings],\n columns=['urban_footprint', 'year_built',\n 'acres', 'residential_units',\n 'non_residential_sqft'])\n\n buildings_uf_df['count'] = 1\n\n s1 = buildings_uf_df['residential_units'] / buildings_uf_df['acres']\n s2 = s1 > 1\n s3 = (buildings_uf_df['urban_footprint'] == 0) * 1\n buildings_uf_df['denser_greenfield'] = s3 * s2\n\n df = buildings_uf_df.\\\n loc[buildings_uf_df['year_built'] > 2010].\\\n groupby('urban_footprint').sum()\n df = df[['count', 'residential_units', 'non_residential_sqft',\n 'acres']]\n\n df2 = buildings_uf_df.\\\n loc[buildings_uf_df['year_built'] > 2010].\\\n groupby('denser_greenfield').sum()\n df2 = df2[['count', 'residential_units', 'non_residential_sqft',\n 'acres']]\n\n formatters = {'count': '{:.0f}',\n 'residential_units': '{:.0f}',\n 'non_residential_sqft': '{:.0f}',\n 'acres': '{:.0f}'}\n\n df = format_df(df, formatters)\n\n df2 = format_df(df2, formatters)\n\n df = df.transpose()\n\n df2 = df2.transpose()\n\n df[2] = df2[1]\n\n df.columns = ['urban_footprint_0', 'urban_footprint_1',\n 'denser_greenfield']\n uf_summary_csv = \"runs/run{}_urban_footprint_summary_{}.csv\".\\\n format(run_number, year)\n df.to_csv(uf_summary_csv)\n\n\[email protected]()\ndef building_summary(parcels, run_number, year,\n buildings,\n initial_year, final_year):\n\n if year not in [initial_year, final_year]:\n return\n\n df = orca.merge_tables(\n 'buildings',\n [parcels, buildings],\n columns=['performance_zone', 'year_built', 'residential_units',\n 'unit_price', 'zone_id', 'non_residential_sqft',\n 'deed_restricted_units', 'job_spaces', 'x', 'y'])\n\n df.to_csv(\n os.path.join(\"runs\", \"run%d_building_data_%d.csv\" %\n (run_number, year))\n )\n\n\[email protected]()\ndef parcel_summary(parcels, run_number, year,\n parcels_zoning_calculations,\n initial_year, final_year):\n\n if year not in [initial_year, final_year]:\n return\n\n df = parcels.to_frame([\n \"x\", \"y\",\n \"total_residential_units\",\n \"total_job_spaces\",\n \"first_building_type_id\"\n ])\n\n df2 = parcels_zoning_calculations.to_frame([\n \"zoned_du\",\n \"zoned_du_underbuild\",\n \"zoned_du_underbuild_nodev\"\n ])\n\n df = df.join(df2)\n\n df.to_csv(\n os.path.join(\"runs\", \"run%d_parcel_data_%d.csv\" %\n (run_number, year))\n )\n\n if year == final_year:\n\n # do diff with initial year\n\n df2 = pd.read_csv(\n os.path.join(\"runs\", \"run%d_parcel_data_%d.csv\" %\n (run_number, initial_year)), index_col=\"parcel_id\")\n\n for col in df.columns:\n\n if col in [\"x\", \"y\", \"first_building_type_id\"]:\n continue\n\n df[col] = df[col] - df2[col]\n\n df.to_csv(\n os.path.join(\"runs\", \"run%d_parcel_data_diff.csv\" %\n run_number)\n )\n\n\[email protected](\"travel_model_output\")\ndef travel_model_output(parcels, households, jobs, buildings,\n zones, homesales, year, summary, coffer,\n zone_forecast_inputs, run_number,\n taz, base_year_summary_taz):\n # using the following conditional b/c `year` is used to pull a column\n # from a csv based on a string of the year in add_population()\n # and in add_employment() and 2009 is the\n # 'base'/pre-simulation year, as is the 2010 value in the csv.\n if year == 2009:\n year = 2010\n base = True\n else:\n base = False\n\n if year in [2010, 2015, 2020, 2025, 2030, 2035, 2040]:\n df = taz\n taz_df = pd.DataFrame(index=zones.index)\n taz_df[\"sd\"] = df.sd\n taz_df[\"zone\"] = df.index\n taz_df[\"county\"] = df.county\n taz_df[\"agrempn\"] = df.agrempn\n taz_df[\"fpsempn\"] = df.fsempn\n taz_df[\"herempn\"] = df.herempn\n taz_df[\"retempn\"] = df.retempn\n taz_df[\"totemp\"] = df.totemp\n taz_df[\"mwtempn\"] = df.mwtempn\n taz_df[\"othempn\"] = df.othempn\n taz_df[\"hhincq1\"] = df.hhinq1\n taz_df[\"hhincq2\"] = df.hhinq2\n taz_df[\"hhincq3\"] = df.hhinq3\n taz_df[\"hhincq4\"] = df.hhinq4\n taz_df[\"shpop62p\"] = df.shpop62p\n taz_df[\"tothh\"] = df.tothh\n taz_df[\"gqpop\"] = df.gqpop.fillna(0)\n taz_df[\"mfdu\"] = df.mfdu\n taz_df[\"sfdu\"] = df.sfdu\n taz_df[\"area_type\"] = df.areatype\n taz_df[\"ciacre_unweighted\"] = df.ciacre\n taz_df[\"resacre_unweighted\"] = df.resacre\n taz_df[\"ciacre\"] = scaled_ciacre(\n base_year_summary_taz.CIACRE_UNWEIGHTED,\n df.ciacre)\n taz_df[\"resacre\"] = scaled_resacre(\n base_year_summary_taz.RESACRE_UNWEIGHTED,\n df.resacre)\n taz_df[\"totacre\"] = df.totacre\n taz_df[\"totemp\"] = df.totemp\n taz_df[\"tothh\"] = df.tothh\n taz_df[\"zone\"] = df.index\n\n taz_df = add_population(taz_df, year)\n # total population = group quarters plus households population\n taz_df[\"totpop\"] = taz_df.hhpop + taz_df.gqpop\n taz_df[\"totpop\"] = taz_df.totpop.fillna(0)\n taz_df = add_employment(taz_df, year)\n taz_df = add_age_categories(taz_df, year)\n\n orca.add_table(\"travel_model_output\", taz_df, year)\n summary.add_zone_output(taz_df, \"travel_model_output\", year)\n if sys.platform != 'win32':\n summary.write_zone_output()\n\n add_xy_config = {\n \"xy_table\": \"parcels\",\n \"foreign_key\": \"parcel_id\",\n \"x_col\": \"x\",\n \"y_col\": \"y\"\n }\n # otherwise it loses precision\n if summary.parcel_output is not None and \\\n \"geom_id\" in summary.parcel_output:\n summary.parcel_output[\"geom_id\"] = \\\n summary.parcel_output.geom_id.astype('str')\n summary.write_parcel_output(add_xy=add_xy_config)\n\n # travel model csv\n if base is False:\n travel_model_csv = \\\n \"runs/run{}_taz_summaries_{}.csv\".format(run_number, year)\n elif base is True:\n travel_model_csv = \\\n \"runs/run{}_taz_summaries_{}.csv\".format(run_number, 2009)\n\n # uppercase columns to match travel model template\n taz_df.columns = \\\n [x.upper() for x in taz_df.columns]\n\n taz_df.fillna(0).to_csv(travel_model_csv)\n\n\ndef scaled_ciacre(mtcc, us_outc):\n zfi = zone_forecast_inputs()\n abgc = zfi.ciacre10_abag\n sim_difference = [us_outc - mtcc][0]\n sim_difference[sim_difference < 0] = 0\n combined_acres = abgc + sim_difference\n return combined_acres\n\n\ndef scaled_resacre(mtcr, us_outr):\n zfi = zone_forecast_inputs()\n abgr = zfi.resacre10_abag\n sim_difference = [us_outr - mtcr][0]\n sim_difference[sim_difference < 0] = 0\n combined_acres = abgr + sim_difference\n return combined_acres\n\n\ndef zone_forecast_inputs():\n return pd.read_csv(os.path.join('data', 'zone_forecast_inputs.csv'),\n index_col=\"zone_id\")\n\n\ndef regional_controls():\n return pd.read_csv(os.path.join('data', 'regional_controls.csv'),\n index_col=\"year\")\n\n\ndef add_population(df, year):\n rc = regional_controls()\n target = rc.totpop.loc[year] - df.gqpop.sum()\n\n zfi = zone_forecast_inputs()\n s = df.tothh * zfi.meanhhsize\n\n s = scale_by_target(s, target, .15)\n\n df[\"hhpop\"] = round_series_match_target(s, target, 0)\n df[\"hhpop\"] = df.hhpop.fillna(0)\n return df\n\n# add employemnt to the dataframe - this uses a regression with\n# estimated coefficients done by @mkreilly\n\n\ndef add_employment(df, year):\n\n hhs_by_inc = df[[\"hhincq1\", \"hhincq2\", \"hhincq3\", \"hhincq4\"]]\n hh_shares = hhs_by_inc.divide(hhs_by_inc.sum(axis=1), axis=\"index\")\n\n zfi = zone_forecast_inputs()\n\n empshare = 0.46381 * hh_shares.hhincq1 + 0.49361 * hh_shares.hhincq2 +\\\n 0.56938 * hh_shares.hhincq3 + 0.29818 * hh_shares.hhincq4 +\\\n zfi.zonal_emp_sh_resid10\n\n # I really don't think more than 70% of people should be employed\n # in a given zone - this also makes sure that the employed residents\n # is less then the total population (after scaling) - if the\n # assertion below is triggered you can fix it by reducing this\n # .7 even a little bit more\n empshare = empshare.fillna(0).clip(.3, .7)\n\n empres = empshare * df.totpop\n\n rc = regional_controls()\n target = rc.empres.loc[year]\n\n empres = scale_by_target(empres, target)\n\n df[\"empres\"] = round_series_match_target(empres, target, 0)\n\n # this should really make the assertion below pass, but this now\n # only occurs very infrequently\n df[\"empres\"] = df[[\"empres\", \"totpop\"]].min(axis=1)\n\n # make sure employed residents is less than total residents\n assert (df.empres <= df.totpop).all()\n\n return df\n\n\n# add age categories necessary for the TM\ndef add_age_categories(df, year):\n zfi = zone_forecast_inputs()\n rc = regional_controls()\n\n seed_matrix = zfi[[\"sh_age0004\", \"sh_age0519\", \"sh_age2044\",\n \"sh_age4564\", \"sh_age65p\"]].\\\n mul(df.totpop, axis='index').as_matrix()\n\n row_marginals = df.totpop.values\n agecols = [\"age0004\", \"age0519\", \"age2044\", \"age4564\", \"age65p\"]\n col_marginals = rc[agecols].loc[year].values\n\n target = df.totpop.sum()\n col_marginals = scale_by_target(pd.Series(col_marginals),\n target).round().astype('int')\n\n seed_matrix[seed_matrix == 0] = .1\n seed_matrix[row_marginals == 0, :] = 0\n\n mat = simple_ipf(seed_matrix, col_marginals, row_marginals)\n agedf = pd.DataFrame(mat)\n agedf.columns = [col.upper() for col in agecols]\n agedf.index = zfi.index\n\n for ind, row in agedf.iterrows():\n target = df.totpop.loc[ind]\n row = row.round()\n agedf.loc[ind] = round_series_match_target(row, target, 0)\n\n for col in agedf.columns:\n df[col] = agedf[col]\n\n return df\n", "id": "8029984", "language": "Python", "matching_score": 3.865678548812866, "max_stars_count": 0, "path": "bayarea_urbansim/baus/summaries.py" }, { "content": "import pandas as pd\nfrom pandas import ExcelWriter\n\ncounties_numbers_to_names = {\n 3: \"Santa Clara\",\n 4: \"Alameda\",\n 5: \"Contra Costa\",\n 2: \"San Mateo\",\n 8: \"Sonoma\",\n 1: \"San Francisco\",\n 6: \"Solano\",\n 9: \"Marin\",\n 7: \"Napa\"\n}\n\ncounties_map = pd.read_csv(\"data/taz_geography.csv\", index_col=\"zone\").\\\n\tcounty.map(counties_numbers_to_names)\n\nwriter = ExcelWriter('county_output.xlsx')\n\nparcels_to_counties = pd.HDFStore(\"data/2015_09_01_bayarea_v3.h5\", \"r\").\\\n\tparcels.zone_id.map(counties_map)\n\nfor run in range(1308, 1312):\n\n\tdf = pd.read_csv(\"http://urbanforecast.com/runs/\"\\\n\t\t\"run%d_parcel_output.csv\" % run)\n\tdf[\"county\"] = df.parcel_id.map(parcels_to_counties)\n\tgrowthinpdas = df[(df.building_type_id <= 3) & (df.pda.notnull())].\\\n\t groupby(\"county\").net_units.sum()\n\tgrowthnotinpdas = df[(df.building_type_id <= 3) & (df.pda.isnull())].\\\n\t groupby(\"county\").net_units.sum()\n\tpctgrowthinpdas = growthinpdas / (growthnotinpdas+growthinpdas)\n\tprint pctgrowthinpdas\n\n\tbaseyear = pd.read_csv(\"output/baseyear_taz_summaries_2010.csv\")\n\tbaseyear[\"county\"] = baseyear.zone_id.map(counties_map)\n\n\toutyear = pd.read_csv(\"http://urbanforecast.com/runs/\"\\\n\t\t\"run%d_taz_summaries_2040.csv\" % run)\n\toutyear[\"county\"] = outyear.zone_id.map(counties_map)\n\n\thhpctgrowth = outyear.groupby(\"county\").TOTPOP.sum() / \\\n\t\tbaseyear.groupby(\"county\").TOTPOP.sum() - 1\n\n\ts = outyear.groupby(\"county\").TOTPOP.sum() - \\\n\t\tbaseyear.groupby(\"county\").TOTPOP.sum()\n\thhgrowthshare = s / s.sum()\n\n\temppctgrowth = outyear.groupby(\"county\").TOTEMP.sum() / \\\n\t\tbaseyear.groupby(\"county\").TOTEMP.sum() - 1\n\n\ts = outyear.groupby(\"county\").TOTEMP.sum() - \\\n\t\tbaseyear.groupby(\"county\").TOTEMP.sum()\n\tempgrowthshare = s / s.sum()\n\n\tgrowthinunits = outyear.eval(\"SFDU + MFDU\").groupby(outyear.county).sum() - \\\n\t\tbaseyear.eval(\"SFDU + MFDU\").groupby(baseyear.county).sum()\n\n\tgrowthinmultifamily = outyear.groupby(outyear.county).MFDU.sum() - \\\n\t\tbaseyear.groupby(baseyear.county).MFDU.sum()\n\tpct_multifamily_growth = growthinmultifamily / growthinunits\n\n\tdf = pd.DataFrame({\n\t\t\"pct_growth_in_pdas\": pctgrowthinpdas,\n\t\t\"hh_pct_growth\": hhpctgrowth,\n\t\t\"hh_growth_share\": hhgrowthshare,\n\t\t\"emp_pct_growth\": emppctgrowth,\n\t\t\"emp_growth_share\": empgrowthshare,\n\t\t\"growth_in_units\": growthinunits.astype('int'),\n\t\t\"pct_multifamily_growth\": pct_multifamily_growth.clip(upper=1.0)\n\t})\n\n\tdf.index.name = None\n\n\tdf.to_excel(writer, 'run%d' % run, float_format=\"%.2f\")\n\n", "id": "9093586", "language": "Python", "matching_score": 1.201983094215393, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/county_summaries.py" }, { "content": "import osmnx as ox\nimport os\nimport requests\nimport zipfile\nimport geopandas as gpd\n\nox.config(use_cache=True, log_console=True, data_folder='../data')\n\n# point to the shapefile for counties\ncounties_shapefile_url = 'http://www2.census.gov/geo/tiger/' + \\\n 'GENZ2016/shp/cb_2016_us_county_500k.zip'\n\n# identify bay area counties by fips code\nbayarea = {'Alameda': '001',\n 'Contra Costa': '013',\n 'Marin': '041',\n 'Napa': '055',\n 'San Francisco': '075',\n 'San Mateo': '081',\n 'Santa Clara': '085',\n 'Solano': '095',\n 'Sonoma': '097'}\n\ncounties_shapefile_zip = counties_shapefile_url[\n counties_shapefile_url.rfind('/') + 1:]\ncounties_shapefile_dir = counties_shapefile_zip[\n : counties_shapefile_zip.rfind('.zip')]\nif not os.path.exists(counties_shapefile_dir):\n response = requests.get(counties_shapefile_url)\n with open(counties_shapefile_zip, 'wb') as f:\n f.write(response.content)\n with zipfile.ZipFile(counties_shapefile_zip, 'r') as zip_file:\n zip_file.extractall(counties_shapefile_dir)\n os.remove(counties_shapefile_zip)\n\ncounties = gpd.read_file(counties_shapefile_dir)\n\n# retain only those tracts that are in the bay area counties\nmask = (counties['STATEFP'] == '06') & (counties['COUNTYFP'].isin(\n bayarea.values()))\ngdf_bay = counties[mask]\n\nbayarea_polygon = gdf_bay.unary_union\n\n# get the convex hull, otherwise we'll cut out bridges over the bay\nbayarea_polygon = bayarea_polygon.convex_hull\nbayarea_polygon_proj, crs = ox.project_geometry(bayarea_polygon)\n\n# get the simplified graph for the drive network\nG = ox.graph_from_polygon(\n bayarea_polygon, network_type='drive', simplify=False)\n\n# filter way types\ntypes = ['motorway', 'motorway_link', 'trunk', 'trunk_link',\n 'primary', 'primary_link', 'secondary', 'secondary_link',\n 'tertiary', 'tertiary_link', 'unclassified', 'road']\n\nminor_streets = [(u, v, k) for u, v, k, d in G.edges(\n keys=True, data=True) if d['highway'] not in types]\n\n# remove minor streets and retain only the largest connected component subgraph\nG_ter = G\nG_ter.remove_edges_from(minor_streets)\nG_ter = ox.remove_isolated_nodes(G_ter)\nG_ter_connected = ox.get_largest_component(G_ter, strongly=True)\n\n# then simplify the graph now that we have only the edge types we want\nG_ter_simp = ox.simplify_graph(G_ter_connected, strict=True)\n\n\n# create a unique ID for each edge because osmid can\n# hold multiple values due to topology simplification\ni = 0\nfor u, v, k, d in G_ter_simp.edges(data=True, keys=True):\n d['uniqueid'] = i\n i += 1\n\n# convert to two-way\nH = ox.get_undirected(G_ter_simp)\n\n# save graph as OSM\nox.save_graph_osm(\n H, oneway=False, filename='bay_area_simplified_tertiary_strongly_2_way_network.osm')\n", "id": "5157288", "language": "Python", "matching_score": 1.249389886856079, "max_stars_count": 1, "path": "spring-2019-models/scripts/generate_beam_osm.py" }, { "content": "import pandas as pd\nfrom lxml import etree\nfrom tqdm import tqdm\nimport sys\n\n\nnode_attrs = [\n 'id', 'timestamp', 'uid', 'user', 'version', 'changeset', 'lat', 'lon']\nnode_tags = ['highway']\nedge_attrs = ['id', 'timestamp', 'uid', 'user', 'version', 'changeset']\nedge_tags = ['highway', 'lanes', 'maxspeed', 'name', 'oneway']\n\n\ndef pre_process_nodes(nodes):\n\n # convert NaNs to string\n nodes.fillna('', inplace=True)\n\n # rename columns per osm specification\n nodes.rename(columns={'osmid': 'id', 'x': 'lon', 'y': 'lat'}, inplace=True)\n\n # add empty columns for attributes not already in the df\n for attr in node_attrs + node_tags:\n if attr not in nodes.columns:\n nodes[attr] = ''\n\n # convert all datatypes to str\n nodes = nodes.applymap(str)\n\n return nodes\n\n\ndef pre_process_edges(edges):\n\n # convert NaNs to string\n edges.fillna('', inplace=True)\n\n # rename columns per osm specification\n edges.rename(columns={'index': 'id'}, inplace=True)\n\n # add empty columns for attributes/tags not already in the df\n for attr in edge_attrs + edge_tags:\n if attr not in edges.columns:\n edges[attr] = ''\n\n # convert all datatypes to str\n edges = edges.applymap(str)\n\n return edges\n\n\ndef make_osm_root_element():\n root = etree.Element('osm')\n return root\n\n\ndef append_nodes(rootElement, nodes):\n\n for i, row in tqdm(nodes.iterrows(), total=len(nodes)):\n node = etree.SubElement(\n rootElement, 'node', attrib=row[node_attrs].to_dict())\n for tag in node_tags:\n etree.SubElement(\n node, 'tag', attrib={'k': tag, 'v': row[tag]})\n\n\ndef append_edges(rootElement, edges):\n\n for i, row in tqdm(edges.iterrows(), total=len(edges)):\n edge = etree.SubElement(\n rootElement, 'way', attrib=row[edge_attrs].to_dict())\n etree.SubElement(edge, 'nd', attrib={'ref': row['u']})\n etree.SubElement(edge, 'nd', attrib={'ref': row['v']})\n for tag in edge_tags:\n etree.SubElement(\n edge, 'tag', attrib={'k': tag, 'v': row[tag]})\n\n\ndef write_to_osm(rootElement, outpath):\n et = etree.ElementTree(rootElement)\n et.write(outpath, pretty_print=True)\n\n\nif __name__ == \"__main__\":\n\n nodes = pd.read_csv(sys.argv[1])\n edges = pd.read_csv(sys.argv[2]).reset_index()\n outpath = sys.argv[3]\n\n processed_nodes = pre_process_nodes(nodes)\n processed_edges = pre_process_edges(edges)\n root = make_osm_root_element()\n append_nodes(root, processed_nodes)\n append_edges(root, processed_edges)\n\n write_to_osm(root, outpath)\n", "id": "9908295", "language": "Python", "matching_score": 0.8746969699859619, "max_stars_count": 1, "path": "spring-2019-models/scripts/network_to_osm.py" }, { "content": "\n# phase one just gets rid of a bunch of extra field names and writes to geojson\n'''\nimport geopandas as gpd\nfrom fiona.crs import from_epsg\ngdf = gpd.GeoDataFrame.from_file('/home/ubuntu/data/avgload5period.shp')\ngdf.crs = from_epsg(3740)\ngdf = gdf.to_crs(epsg=4326)\n\ngdf = gdf[[\"A\", \"B\", \"CTIMAM\", \"CTIMEA\", \"CTIMEV\", \"CTIMMD\",\n \"CTIMPM\", \"geometry\"]]\n\nprint gdf.columns\n\nopen(\"out.json\", \"w\").write(gdf.to_json())\n'''\n\n# phase 2 reads in the json as json and converts to the expected hdf format\n# docs for format http://udst.github.io/pandana/tutorial.html\nimport pandas as pd\nimport json\n\nstore = pd.HDFStore('tmnet.h5')\n\nnodes = {}\nedges = []\n\nfor feature in json.load(open('out.json'))['features']:\n\n edge = feature[\"properties\"]\n edge[\"from\"] = edge[\"A\"]\n edge[\"to\"] = edge[\"B\"]\n del edge[\"A\"]\n del edge[\"B\"]\n edges.append(edge)\n\n p1 = feature[\"geometry\"][\"coordinates\"][0]\n p2 = feature[\"geometry\"][\"coordinates\"][1]\n\n nodes[edge[\"from\"]] = {\"x\": p1[0], \"y\": p1[1]}\n nodes[edge[\"to\"]] = {\"x\": p2[0], \"y\": p2[1]}\n\nstore[\"nodes\"] = pd.DataFrame(nodes.values(), index=nodes.keys())\nstore[\"edges\"] = pd.DataFrame(edges)\n\nprint store[\"nodes\"].describe()\nprint store[\"nodes\"].index\nprint store[\"edges\"].describe()\n", "id": "1525628", "language": "Python", "matching_score": 2.2590296268463135, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/make_net.py" }, { "content": "import pandas as pd\nimport geopandas as gpd\n\ndf = pd.read_csv('/var/www/html/scratchpad/bayarea_softsites.csv')\ndf.set_index('geom_id', inplace=True)\n\ngdf = gpd.GeoDataFrame.from_file('/home/ubuntu/data/sfr.shp')\n\ngdf.set_index('GEOM_ID', inplace=True)\ngdf = gdf.to_crs(epsg=4326)\n\nfor col in df.columns:\n gdf[col] = df[col]\n\ngdf = gdf[gdf.zoned_du > 0]\n\nprint len(gdf)\n\ngdf = gdf.reset_index()\ngdf[\"GEOM_ID\"] = gdf.GEOM_ID.astype('int')\n\nopen('out.json', 'w').write(gdf.to_json())\n\n\n# gdf[gdf.COUNTY_ID == 75].to_file('/home/ubuntu/data/sfr.shp')\n", "id": "5217736", "language": "Python", "matching_score": 0.7931978702545166, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/make_shapefile.py" }, { "content": "import os\nimport logging\nimport gzip\nimport shutil\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\n\ndef copy_plans_from_asim(settings, year, replanning_iteration_number=0):\n asim_output_data_dir = settings['asim_local_output_folder']\n beam_scenario_folder = os.path.join(\n settings['beam_local_input_folder'],\n settings['region'],\n settings['beam_scenario_folder'])\n\n def copy_with_compression_asim_file_to_beam(asim_file_name, beam_file_name):\n asim_file_path = os.path.join(asim_output_data_dir, asim_file_name)\n beam_file_path = os.path.join(beam_scenario_folder, beam_file_name)\n logger.info(\"Copying asim file %s to beam input scenario file %s\", asim_file_path, beam_file_path)\n\n with open(asim_file_path, 'rb') as f_in, gzip.open(\n beam_file_path, 'wb') as f_out:\n f_out.writelines(f_in)\n\n def merge_only_updated_households(asim_file_path, beam_file_path):\n original = pd.read_csv(beam_file_path)\n updated = pd.read_csv(asim_file_path)\n unchanged = original.loc[~original.household_id.isin(updated.household_id.unique()), :]\n final = pd.concat([updated, unchanged])\n final.to_csv(beam_file_path, compression='gzip')\n\n merge_only_updated_households('final_plans.csv', 'plans.csv.gz')\n if replanning_iteration_number == 0:\n copy_with_compression_asim_file_to_beam('final_households.csv', 'households.csv.gz')\n copy_with_compression_asim_file_to_beam('final_persons.csv', 'persons.csv.gz')\n\n if settings.get('final_asim_plans_folder', False):\n beam_local_plans = os.path.join(beam_scenario_folder, 'plans.csv.gz')\n final_plans_name = f\"final_plans_{year}_{replanning_iteration_number:02d}.csv.gz\"\n final_plans_location = os.path.join(settings['final_asim_plans_folder'], final_plans_name)\n logger.info(\"Copying asim plans %s to final asim folder %s\", beam_local_plans, final_plans_location)\n shutil.copyfile(beam_local_plans, final_plans_location)\n\n return\n", "id": "9629996", "language": "Python", "matching_score": 1.7859095335006714, "max_stars_count": 3, "path": "pilates/beam/preprocessor.py" }, { "content": "import os\nfrom pathlib import PurePath\nimport importlib\nfrom lcog.tests.config import config\n\nprint('Processing and running {} jupyter '\n 'notebook files: {}...'.format(len(config.TEST_NB_LIST),\n config.TEST_NB_LIST))\nfor jupyter_nb_path in config.TEST_NB_LIST:\n # TODO: jupyter_nb file name must not have any spaces, if they do process\n # will fail. Consider adding replace whitespace with underscores to\n # support spaces in file names\n nb_file_name = PurePath(jupyter_nb_path).parts[-1]\n if nb_file_name not in config.SKIP_NB:\n config.convert_nb_to_py(input_file=jupyter_nb_path,\n output_dir=config.PY_OUTPUT_DIR)\n input_file_name = nb_file_name.replace('.ipynb', '.py')\n input_file = os.path.join(config.PY_OUTPUT_DIR, input_file_name)\n output_file_name = config.remove_magic_lines(input_file=input_file)\n py_file_to_run = output_file_name.replace('.py', '')\n module_name = 'lcog.{}'.format(py_file_to_run)\n print('----- Running file: {} -----'.format(output_file_name))\n importlib.import_module(module_name, package=None)\n print('----- Completed running file: {} -----'.format(\n output_file_name))\nprint('Completed processing and running jupyter notebook files')\n", "id": "10179271", "language": "Python", "matching_score": 0.39676570892333984, "max_stars_count": 2, "path": "bayarea/convert_run_nb.py" }, { "content": "from urbansim_explorer import sim_explorer as se\nimport sys\nrun_num = int(sys.argv[1])\nse.start(\n 'runs/run%d_simulation_output.json' % run_num,\n 'runs/run%d_parcel_output.csv' % run_num,\n write_static_file='/var/www/html/sim_explorer%d.html' % run_num\n)\n", "id": "11617163", "language": "Python", "matching_score": 2.3660547733306885, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/sim_explorer.py" }, { "content": "from urbansim_explorer import sim_explorer as se\nimport sys\n\nrunnum = int(sys.argv[1])\n\nparcel_output = 'runs/run%d_parcel_output.csv' % runnum\nzone_output = 'runs/run%d_simulation_output.json' % runnum\noutfile = '/var/www/html/sim_explorer%d.html' % runnum\n\nse.start(\n zone_output,\n parcel_output,\n port=8080,\n host='0.0.0.0',\n write_static_file=outfile\n)\n", "id": "3127845", "language": "Python", "matching_score": 0.051864590495824814, "max_stars_count": 0, "path": "bayarea_urbansim/scripts/explorer.py" }, { "content": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as f:\n requirements = f.readlines()\nrequirements = [item.strip() for item in requirements]\n\nsetup(\n name='activitysynth',\n version='0.1.dev0',\n description='Lightweight activity plan generation',\n author='UAL',\n author_email='<EMAIL>',\n url='https://github.com/ual/activitysynth',\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: BSD License'\n ],\n packages=find_packages(),\n install_requires=requirements,\n dependency_links=[\n 'git+https://github.com/udst/choicemodels/archive/master.zip#egg=choicemodels',\n 'git+https://github.com/udst/urbansim_templates/archive/master.zip#egg=urbansim_templates']\n)", "id": "6675547", "language": "Python", "matching_score": 3.389937400817871, "max_stars_count": 1, "path": "setup.py" }, { "content": "# Install setuptools if not installed.\ntry:\n import setuptools\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n\nfrom setuptools import setup, find_packages\n\n\n# read README as the long description\nwith open('README.md', 'r') as f:\n long_description = f.read()\n\nwith open('bayarea/requirements.txt') as f:\n requirements_lines = f.readlines()\ninstall_requires = [r.strip() for r in requirements_lines]\n\nsetup(\n name='bayarea',\n version='0.1dev',\n description='Bay Area UrbanSim implementation',\n long_description=long_description,\n author='Urban Analytics Lab / UrbanSim Inc.',\n author_email='<EMAIL>',\n license='BSD',\n url='https://github.com/ual/bayarea',\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: BSD License'\n ],\n packages=find_packages(exclude=['*.tests']),\n install_require=install_requires\n)\n", "id": "4718351", "language": "Python", "matching_score": 0.6378797888755798, "max_stars_count": 2, "path": "setup.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\n__version__ = version = '0.3dev'\n", "id": "8157414", "language": "Python", "matching_score": 2.2255938053131104, "max_stars_count": 0, "path": "activitysim/activitysim/__init__.py" }, { "content": "# ActivitySim\n# See full license in LICENSE.txt.\n\n__version__ = version = '0.1dev'\n", "id": "1703136", "language": "Python", "matching_score": 2.2255938053131104, "max_stars_count": 0, "path": "activitysim/activitysim/core/__init__.py" } ]
2.756227
Bulat-Gumerov
[ { "content": "# Copyright 2021 Cloudera, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function\nfrom typing import Optional\nimport re\n\n__metaclass__ = type\n\n\nclass FilterModule(object):\n\n def filters(self):\n return {\n 'flatten_dict_list': self.flatten_dict_list,\n 'extract_custom_roles': self.extract_custom_roles,\n 'extract_custom_role_groups': self.extract_custom_role_groups,\n 'extract_products_from_manifests': self.extract_products_from_manifests,\n 'extract_role_and_group': self.extract_role_and_group,\n 'format_database_type': self.format_database_type,\n 'get_product_version': self.get_product_version,\n 'get_major_version': self.get_major_version, # Unused\n 'append_database_port': self.append_database_port,\n 'default_database_port': self.default_database_port,\n 'get_database_encoding_mysql': self.get_database_encoding_mysql,\n 'get_database_collation_mysql': self.get_database_collation_mysql,\n 'filter_null_configs': self.filter_null_configs,\n 'to_ldap_type_enum': self.to_ldap_type_enum,\n 'extract_parcel_urls': self.extract_parcel_urls,\n 'cluster_service_role_hosts': self.cluster_service_role_hosts,\n 'find_clusters': self.find_clusters\n }\n\n def flatten_dict_list(self, item, level=2, sep='_', show_index=False):\n \"\"\" flatten a structure of dicts and lists into a flat array\n\n e.g. { \"a\": [1, 2, 3], \"b\": { \"c\": \"d\", \"e\": \"f\" } }\n with level=2\n becomes [\"a_1\", \"a_2\", \"a_3\", \"b_c\", \"b_d\"]\n \"\"\"\n\n state = []\n\n def _flatten_dict_list(i, l, parents):\n if l > 0:\n if isinstance(i, dict):\n for key, value in i.items():\n _flatten_dict_list(value, l - 1, parents + [str(key)])\n\n elif isinstance(i, list):\n for index, value in enumerate(i):\n if show_index:\n _flatten_dict_list(value, l, parents + [str(index)])\n else:\n _flatten_dict_list(value, l, parents)\n\n else:\n state.append(sep.join(parents + [str(i)]))\n\n if l == 0 and len(parents) > 0:\n state.append(sep.join(parents))\n\n _flatten_dict_list(item, level, [])\n\n return state\n\n def extract_products_from_manifests(self, manifests, os_distribution: Optional[str] = None):\n products = dict()\n for manifest in manifests:\n for parcel in manifest[\"parcels\"]:\n # fetch the full parcel name from the manifest\n full_parcel_name = str(parcel[\"parcelName\"])\n # the parcel OS distribution is between the last \"-\" and the \".parcel\" extension\n parcel_os_distribution = full_parcel_name[\n full_parcel_name.rindex(\"-\")\n + 1: full_parcel_name.rindex(\".parcel\")\n ]\n # take first parcel, strip off OS name and file extension\n parcel_name = re.sub(r\"-[a-z0-9]+\\.parcel$\", \"\", full_parcel_name)\n # the product name is before the first dash\n product = parcel_name[: parcel_name.index(\"-\")]\n if product not in products and (\n os_distribution == parcel_os_distribution or os_distribution is None\n ):\n # the version string is everything after the first dash\n version = parcel_name[parcel_name.index(\"-\") + 1:]\n products[product] = version\n return products\n\n def extract_parcel_urls(self, manifest_results):\n parcels = list()\n for result in manifest_results:\n manifest_url = result['invocation']['module_args']['url']\n base_url = '/'.join(manifest_url.rsplit('/')[:-1])\n parcel_names = [x['parcelName'] for x in result['json']['parcels']]\n parcels += ['/'.join([str(base_url), str(y)]) for y in parcel_names]\n return parcels\n\n def format_database_type(self, database_type):\n if database_type == \"mariadb\":\n return \"mysql\"\n return database_type.lower()\n\n def get_product_version(self, products, product_name):\n for product in products:\n if product['product'] == product_name:\n version = product['version']\n return version[:version.index('-')] if \"-\" in version else version\n\n def get_major_version(self, products, product_name):\n version = self.get_product_version(products, product_name)\n if version:\n return version.split('.')[0]\n\n def append_database_port(self, database_host, database_port=None):\n if \":\" not in database_host and database_port:\n return database_host + \":\" + database_port\n return database_host\n\n def default_database_port(self, database_type):\n if database_type == \"postgresql\":\n return 5432\n if database_type == \"mysql\" or database_type == \"mariadb\":\n return 3306\n if database_type == \"oracle\":\n return 1521\n return None\n\n def get_database_encoding_mysql(self, service_name):\n # workaround for https://jira.cloudera.com/browse/CDPD-9290\n if service_name == \"RANGER\":\n database_encoding = \"latin1\"\n else:\n database_encoding = \"utf8\"\n return database_encoding\n\n def get_database_collation_mysql(self, service_name):\n # workaround for https://jira.cloudera.com/browse/CDPD-9290\n if service_name == \"RANGER\":\n database_collation = \"latin1_swedish_ci\"\n else:\n database_collation = \"utf8_general_ci\"\n return database_collation\n\n def filter_null_configs(self, configs, existing_configs):\n filtered_configs = dict(configs)\n for item, value in configs.items():\n if item not in existing_configs and not value:\n del filtered_configs[item]\n return filtered_configs\n\n def to_ldap_type_enum(self, s):\n if s == \"AD\":\n return \"ACTIVE_DIRECTORY\"\n return s.replace(\" \", \"_\").upper()\n\n def cluster_service_role_hosts(self, cluster, hostvars, service, roles=None):\n candidate_templates = []\n\n if 'host_templates' in cluster:\n templates = cluster['host_templates']\n\n if roles:\n for role in roles:\n for t_name, t_services in templates.items():\n if service in t_services and role in t_services[service]:\n if t_name not in candidate_templates:\n candidate_templates.append(t_name)\n\n else:\n for t_name, t_services in templates.items():\n if service in t_services:\n candidate_templates.append(t_name)\n\n hosts = []\n for t_name in candidate_templates:\n t_hosts = [\n host\n for host, hostvar in hostvars.items()\n if host not in hosts\n if hostvar.get('host_template') == t_name]\n\n hosts = hosts + t_hosts\n\n return hosts\n\n def find_clusters(self, clusters, name):\n return [\n cluster\n for cluster in clusters\n if cluster.get('name') == name]\n\n def extract_role_and_group(self, role_spec):\n role = None\n template_group = \"BASE\"\n if '/' in role_spec:\n role = role_spec[:role_spec.index('/')]\n template_group = role_spec[role_spec.index('/')+1:]\n else:\n role = role_spec\n return (role, template_group)\n\n def extract_custom_roles(self, host_templates, service):\n custom_roles = set([])\n for role_mapping in host_templates.values():\n if service in role_mapping:\n for custom_role in filter(lambda x: '/' in x, role_mapping[service]):\n custom_roles.add(custom_role)\n return list(custom_roles)\n\n def extract_custom_role_groups(self, host_templates):\n custom_role_groups = set([])\n for role_mapping in host_templates.values():\n for (service, roles) in role_mapping.items():\n for custom_role in filter(lambda x: '/' in x, roles):\n custom_role_groups.add(\"-\".join([service.lower()] + custom_role.split(\"/\")))\n return list(custom_role_groups)\n", "id": "12822746", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "plugins/filter/filters.py" } ]
0
predator-1-ml
[ { "content": "import keras\r\nfrom keras.datasets import mnist\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten\r\nfrom keras.layers import Conv2D, MaxPooling2D\r\nfrom keras import backend as K\r\n\r\n# the data, split between train and test sets\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n\r\nprint(x_train.shape, y_train.shape)\r\n\r\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\r\nx_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\r\ninput_shape = (28, 28, 1)\r\n\r\n# convert class vectors to binary class matrices\r\ny_train = keras.utils.to_categorical(y_train, 10)\r\ny_test = keras.utils.to_categorical(y_test, 10)\r\n\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\nx_train /= 255\r\nx_test /= 255\r\nprint('x_train shape:', x_train.shape)\r\nprint(x_train.shape[0], 'train samples')\r\nprint(x_test.shape[0], 'test samples')\r\n\r\nbatch_size = 128\r\nnum_classes = 10\r\nepochs = 10\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(5, 5),activation='relu',input_shape=input_shape))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.3))\r\nmodel.add(Dense(64, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(num_classes, activation='softmax'))\r\n\r\nmodel.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adadelta(),metrics=['accuracy'])\r\n\r\nhist = model.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test, y_test))\r\nprint(\"The model has successfully trained\")\r\n\r\nscore = model.evaluate(x_test, y_test, verbose=0)\r\nprint('Test loss:', score[0])\r\nprint('Test accuracy:', score[1])\r\n\r\nmodel.save('mnist.h5')\r\nprint(\"Saving the model as mnist.h5\")\r\n\r\n", "id": "7332414", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "train_digit_recognizer.py" } ]
0
the-bets
[ { "content": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport functools\nimport http\nimport itertools\nimport json\nimport random\nimport sys\nfrom urllib.parse import quote, unquote_plus\n\nimport pytest\n\nimport falcon\nfrom falcon import media\nfrom falcon import testing\nfrom falcon import util\nfrom falcon.constants import MEDIA_JSON, MEDIA_MSGPACK, MEDIA_URLENCODED, MEDIA_YAML\nfrom falcon.util import deprecation, misc, structures, uri\n\nfrom _util import create_app, to_coroutine # NOQA\n\n\[email protected]\ndef app(asgi):\n return create_app(asgi)\n\n\ndef _arbitrary_uris(count, length):\n return (\n ''.join([random.choice(uri._ALL_ALLOWED) for _ in range(length)])\n for __ in range(count)\n )\n\n\[email protected](params=['bytearray', 'join_list'])\ndef decode_approach(request, monkeypatch):\n method = uri._join_tokens_list\n if request.param == 'bytearray':\n method = uri._join_tokens_bytearray\n monkeypatch.setattr(uri, '_join_tokens', method)\n return method\n\n\nclass TrackingJSONHandler(media.JSONHandler):\n def __init__(self):\n super().__init__()\n self.deserialize_count = 0\n\n def deserialize(self, *args, **kwargs):\n result = super().deserialize(*args, **kwargs)\n self.deserialize_count += 1\n return result\n\n async def deserialize_async(self, *args, **kwargs):\n result = await super().deserialize_async(*args, **kwargs)\n self.deserialize_count += 1\n return result\n\n\nclass TrackingMessagePackHandler(media.MessagePackHandler):\n def __init__(self):\n super().__init__()\n self.deserialize_count = 0\n\n def deserialize(self, *args, **kwargs):\n result = super().deserialize(*args, **kwargs)\n self.deserialize_count += 1\n return result\n\n async def deserialize_async(self, *args, **kwargs):\n result = await super().deserialize_async(*args, **kwargs)\n self.deserialize_count += 1\n return result\n\n\nclass TrackingFormHandler(media.URLEncodedFormHandler):\n def __init__(self):\n super().__init__()\n self.deserialize_count = 0\n\n def deserialize(self, *args, **kwargs):\n result = super().deserialize(*args, **kwargs)\n self.deserialize_count += 1\n return result\n\n async def deserialize_async(self, *args, **kwargs):\n result = await super().deserialize_async(*args, **kwargs)\n self.deserialize_count += 1\n return result\n\n\nclass TestFalconUtils:\n def setup_method(self, method):\n # NOTE(cabrera): for DRYness - used in uri.[de|en]code tests\n # below.\n self.uris = _arbitrary_uris(count=100, length=32)\n\n def test_deprecated_decorator(self):\n msg = 'Please stop using this thing. It is going away.'\n\n @util.deprecated(msg)\n def old_thing():\n pass\n\n with pytest.warns(UserWarning) as rec:\n old_thing()\n\n warn = rec.pop()\n assert msg in str(warn.message)\n\n def test_http_now(self):\n expected = datetime.utcnow()\n actual = falcon.http_date_to_dt(falcon.http_now())\n\n delta = actual - expected\n delta_sec = abs(delta.days * 86400 + delta.seconds)\n\n assert delta_sec <= 1\n\n def test_dt_to_http(self):\n assert (\n falcon.dt_to_http(datetime(2013, 4, 4)) == 'Thu, 04 Apr 2013 00:00:00 GMT'\n )\n\n assert (\n falcon.dt_to_http(datetime(2013, 4, 4, 10, 28, 54))\n == 'Thu, 04 Apr 2013 10:28:54 GMT'\n )\n\n def test_http_date_to_dt(self):\n assert falcon.http_date_to_dt('Thu, 04 Apr 2013 00:00:00 GMT') == datetime(\n 2013, 4, 4\n )\n\n assert falcon.http_date_to_dt('Thu, 04 Apr 2013 10:28:54 GMT') == datetime(\n 2013, 4, 4, 10, 28, 54\n )\n\n with pytest.raises(ValueError):\n falcon.http_date_to_dt('Thu, 04-Apr-2013 10:28:54 GMT')\n\n assert falcon.http_date_to_dt(\n 'Thu, 04-Apr-2013 10:28:54 GMT', obs_date=True\n ) == datetime(2013, 4, 4, 10, 28, 54)\n\n with pytest.raises(ValueError):\n falcon.http_date_to_dt('Sun Nov 6 08:49:37 1994')\n\n with pytest.raises(ValueError):\n falcon.http_date_to_dt('Nov 6 08:49:37 1994', obs_date=True)\n\n assert falcon.http_date_to_dt(\n 'Sun Nov 6 08:49:37 1994', obs_date=True\n ) == datetime(1994, 11, 6, 8, 49, 37)\n\n assert falcon.http_date_to_dt(\n 'Sunday, 06-Nov-94 08:49:37 GMT', obs_date=True\n ) == datetime(1994, 11, 6, 8, 49, 37)\n\n def test_pack_query_params_none(self):\n assert falcon.to_query_str({}) == ''\n\n def test_pack_query_params_one(self):\n assert falcon.to_query_str({'limit': 10}) == '?limit=10'\n\n assert falcon.to_query_str({'things': [1, 2, 3]}) == '?things=1,2,3'\n\n assert falcon.to_query_str({'things': ['a']}) == '?things=a'\n\n assert falcon.to_query_str({'things': ['a', 'b']}) == '?things=a,b'\n\n expected = (\n '?things=a&things=b&things=&things=None'\n '&things=true&things=false&things=0'\n )\n\n actual = falcon.to_query_str(\n {'things': ['a', 'b', '', None, True, False, 0]},\n comma_delimited_lists=False,\n )\n\n assert actual == expected\n\n def test_pack_query_params_several(self):\n garbage_in = {'limit': 17, 'echo': True, 'doit': False, 'x': 'val', 'y': 0.2}\n\n query_str = falcon.to_query_str(garbage_in)\n fields = query_str[1:].split('&')\n\n garbage_out = {}\n for field in fields:\n k, v = field.split('=')\n garbage_out[k] = v\n\n expected = {\n 'echo': 'true',\n 'limit': '17',\n 'x': 'val',\n 'y': '0.2',\n 'doit': 'false',\n }\n\n assert expected == garbage_out\n\n @pytest.mark.parametrize('csv', [True, False])\n @pytest.mark.parametrize(\n 'params',\n [\n {'a & b': 'a and b', 'b and c': 'b & c'},\n {'apples and oranges': '🍏 & 🍊'},\n {'garbage': ['&', '&+&', 'a=1&b=2', 'c=4&'], 'one': '1'},\n {'&': '&amp;', '™': '&trade;', '&&&': ['&amp;', '&amp;', '&amp;']},\n {'&': '%26', '&&': '%26', '&&&': ['%26', '%2', '%']},\n ],\n )\n def test_to_query_str_encoding(self, params, csv):\n query_str = falcon.to_query_str(params, comma_delimited_lists=csv, prefix=False)\n\n assert uri.parse_query_string(query_str, csv=csv) == params\n\n def test_uri_encode(self):\n url = 'http://example.com/v1/fizbit/messages?limit=3&echo=true'\n assert uri.encode(url) == url\n\n url = 'http://example.com/v1/fiz bit/messages'\n expected = 'http://example.com/v1/fiz%20bit/messages'\n assert uri.encode(url) == expected\n\n url = 'http://example.com/v1/fizbit/messages?limit=3&e\\u00e7ho=true'\n expected = 'http://example.com/v1/fizbit/messages?limit=3&e%C3%A7ho=true'\n assert uri.encode(url) == expected\n\n # NOTE(minesja): Addresses #1872\n assert uri.encode('%26') == '%2526'\n assert uri.decode(uri.encode('%26')) == '%26'\n\n def test_uri_encode_double(self):\n url = 'http://example.com/v1/fiz bit/messages'\n expected = 'http://example.com/v1/fiz%20bit/messages'\n assert uri.encode_check_escaped(uri.encode_check_escaped(url)) == expected\n\n url = 'http://example.com/v1/fizbit/messages?limit=3&e\\u00e7ho=true'\n expected = 'http://example.com/v1/fizbit/messages?limit=3&e%C3%A7ho=true'\n assert uri.encode_check_escaped(uri.encode_check_escaped(url)) == expected\n\n url = 'http://example.com/v1/fiz%bit/mess%ages/%'\n expected = 'http://example.com/v1/fiz%25bit/mess%25ages/%25'\n assert uri.encode_check_escaped(uri.encode_check_escaped(url)) == expected\n\n url = 'http://example.com/%%'\n expected = 'http://example.com/%25%25'\n assert uri.encode_check_escaped(uri.encode_check_escaped(url)) == expected\n\n # NOTE(kgriffs): Specific example cited in GH issue\n url = 'http://something?redirect_uri=http%3A%2F%2Fsite'\n assert uri.encode_check_escaped(url) == url\n\n hex_digits = 'abcdefABCDEF0123456789'\n for c1 in hex_digits:\n for c2 in hex_digits:\n url = 'http://example.com/%' + c1 + c2\n encoded = uri.encode_check_escaped(uri.encode_check_escaped(url))\n assert encoded == url\n\n def test_uri_encode_value(self):\n assert uri.encode_value('abcd') == 'abcd'\n assert uri.encode_value('abcd') == 'abcd'\n assert uri.encode_value('ab cd') == 'ab%20cd'\n assert uri.encode_value('\\u00e7') == '%C3%A7'\n assert uri.encode_value('\\u00e7\\u20ac') == '%C3%A7%E2%82%AC'\n assert uri.encode_value('ab/cd') == 'ab%2Fcd'\n assert uri.encode_value('ab+cd=42,9') == 'ab%2Bcd%3D42%2C9'\n\n # NOTE(minesja): Addresses #1872\n assert uri.encode_value('%26') == '%2526'\n assert uri.decode(uri.encode_value('%26')) == '%26'\n\n def test_uri_decode(self, decode_approach):\n assert uri.decode('abcd') == 'abcd'\n assert uri.decode('ab%20cd') == 'ab cd'\n\n assert uri.decode('This thing is %C3%A7') == 'This thing is \\u00e7'\n\n assert (\n uri.decode('This thing is %C3%A7%E2%82%AC') == 'This thing is \\u00e7\\u20ac'\n )\n\n assert uri.decode('ab%2Fcd') == 'ab/cd'\n\n assert (\n uri.decode('http://example.com?x=ab%2Bcd%3D42%2C9')\n == 'http://example.com?x=ab+cd=42,9'\n )\n\n @pytest.mark.parametrize(\n 'encoded,expected',\n [\n ('ab%2Gcd', 'ab%2Gcd'),\n ('ab%2Fcd: 100% coverage', 'ab/cd: 100% coverage'),\n ('%s' * 100, '%s' * 100),\n ],\n )\n def test_uri_decode_bad_coding(self, encoded, expected, decode_approach):\n assert uri.decode(encoded) == expected\n\n @pytest.mark.parametrize(\n 'encoded,expected',\n [\n ('+%80', ' �'),\n ('+++%FF+++', ' � '), # impossible byte\n ('%fc%83%bf%bf%bf%bf', '������'), # overlong sequence\n ('%ed%ae%80%ed%b0%80', '������'), # paired UTF-16 surrogates\n ],\n )\n def test_uri_decode_bad_unicode(self, encoded, expected, decode_approach):\n assert uri.decode(encoded) == expected\n\n def test_uri_decode_unquote_plus(self, decode_approach):\n assert uri.decode('/disk/lost+found/fd0') == '/disk/lost found/fd0'\n assert uri.decode('/disk/lost+found/fd0', unquote_plus=True) == (\n '/disk/lost found/fd0'\n )\n assert uri.decode('/disk/lost+found/fd0', unquote_plus=False) == (\n '/disk/lost+found/fd0'\n )\n\n assert uri.decode('http://example.com?x=ab%2Bcd%3D42%2C9') == (\n 'http://example.com?x=ab+cd=42,9'\n )\n assert uri.decode(\n 'http://example.com?x=ab%2Bcd%3D42%2C9', unquote_plus=True\n ) == ('http://example.com?x=ab+cd=42,9')\n assert uri.decode(\n 'http://example.com?x=ab%2Bcd%3D42%2C9', unquote_plus=False\n ) == ('http://example.com?x=ab+cd=42,9')\n\n def test_prop_uri_encode_models_stdlib_quote(self):\n equiv_quote = functools.partial(quote, safe=uri._ALL_ALLOWED)\n for case in self.uris:\n expect = equiv_quote(case)\n actual = uri.encode(case)\n assert expect == actual\n\n def test_prop_uri_encode_value_models_stdlib_quote_safe_tilde(self):\n equiv_quote = functools.partial(quote, safe='~')\n for case in self.uris:\n expect = equiv_quote(case)\n actual = uri.encode_value(case)\n assert expect == actual\n\n def test_prop_uri_decode_models_stdlib_unquote_plus(self):\n stdlib_unquote = unquote_plus\n for case in self.uris:\n case = uri.encode_value(case)\n\n expect = stdlib_unquote(case)\n actual = uri.decode(case)\n assert expect == actual\n\n def test_unquote_string(self):\n assert uri.unquote_string('v') == 'v'\n assert uri.unquote_string('not-quoted') == 'not-quoted'\n assert uri.unquote_string('partial-quoted\"') == 'partial-quoted\"'\n assert uri.unquote_string('\"partial-quoted') == '\"partial-quoted'\n assert uri.unquote_string('\"partial-quoted\"') == 'partial-quoted'\n\n def test_parse_query_string(self):\n query_string = (\n 'a=http%3A%2F%2Ffalconframework.org%3Ftest%3D1'\n '&b=%7B%22test1%22%3A%20%22data1%22%'\n '2C%20%22test2%22%3A%20%22data2%22%7D'\n '&c=1,2,3'\n '&d=test'\n '&e=a,,%26%3D%2C'\n '&f=a&f=a%3Db'\n '&%C3%A9=a%3Db'\n )\n decoded_url = 'http://falconframework.org?test=1'\n decoded_json = '{\"test1\": \"data1\", \"test2\": \"data2\"}'\n\n result = uri.parse_query_string(query_string)\n assert result['a'] == decoded_url\n assert result['b'] == decoded_json\n assert result['c'] == ['1', '2', '3']\n assert result['d'] == 'test'\n assert result['e'] == ['a', '&=,']\n assert result['f'] == ['a', 'a=b']\n assert result['é'] == 'a=b'\n\n result = uri.parse_query_string(query_string, True)\n assert result['a'] == decoded_url\n assert result['b'] == decoded_json\n assert result['c'] == ['1', '2', '3']\n assert result['d'] == 'test'\n assert result['e'] == ['a', '', '&=,']\n assert result['f'] == ['a', 'a=b']\n assert result['é'] == 'a=b'\n\n @pytest.mark.parametrize(\n 'query_string,keep_blank,expected',\n [\n ('', True, {}),\n ('', False, {}),\n ('flag1&&&&&flag2&&&', True, {'flag1': '', 'flag2': ''}),\n ('flag1&&&&&flag2&&&', False, {}),\n ('malformed=%FG%1%Hi%%%a', False, {'malformed': '%FG%1%Hi%%%a'}),\n ('=', False, {}),\n ('==', False, {'': '='}),\n (\n '%==+==&&&&&&&&&%%==+=&&&&&&%0g%=%=',\n False,\n {'%': '= ==', '%%': '= =', '%0g%': '%='},\n ),\n ('%=&%%=&&%%%=', False, {}),\n ('%=&%%=&&%%%=', True, {'%': '', '%%': '', '%%%': ''}),\n ('+=&%+=&&%++=', True, {' ': '', '% ': '', '% ': ''}),\n ('=x=&=x=+1=x=&%=x', False, {'': ['x=', 'x= 1=x='], '%': 'x'}),\n (\n ''.join(\n itertools.chain.from_iterable(itertools.permutations('%=+&', 4))\n ),\n False,\n {\n '': ['%', ' %', '%', ' ', ' =%', '%', '% ', ' %'],\n ' ': ['=% ', ' %', '%'],\n '%': [' ', ' ', ' '],\n },\n ),\n # NOTE(vytas): Sanity check that we do not accidentally use C-strings\n # anywhere in the cythonized variant.\n ('%%%\\x00%\\x00==\\x00\\x00==', True, {'%%%\\x00%\\x00': '=\\x00\\x00=='}),\n ('spade=♠&spade=♠', False, {'spade': ['♠', '♠']}), # Unicode query\n ],\n )\n def test_parse_query_string_edge_cases(self, query_string, keep_blank, expected):\n assert uri.parse_query_string(query_string, keep_blank=keep_blank) == (expected)\n\n def test_parse_host(self):\n assert uri.parse_host('::1') == ('::1', None)\n assert uri.parse_host('2001:ODB8:AC10:FE01::') == (\n '2001:ODB8:AC10:FE01::',\n None,\n )\n assert uri.parse_host('2001:ODB8:AC10:FE01::', default_port=80) == (\n '2001:ODB8:AC10:FE01::',\n 80,\n )\n\n ipv6_addr = 'fc00:e968:6179::de52:7100'\n\n assert uri.parse_host(ipv6_addr) == (ipv6_addr, None)\n assert uri.parse_host('[' + ipv6_addr + ']') == (ipv6_addr, None)\n assert uri.parse_host('[' + ipv6_addr + ']:28080') == (ipv6_addr, 28080)\n assert uri.parse_host('[' + ipv6_addr + ']:8080') == (ipv6_addr, 8080)\n assert uri.parse_host('[' + ipv6_addr + ']:123') == (ipv6_addr, 123)\n assert uri.parse_host('[' + ipv6_addr + ']:42') == (ipv6_addr, 42)\n\n assert uri.parse_host('172.16.31.10') == ('172.16.31.10', None)\n assert uri.parse_host('172.16.31.10', default_port=80) == (\n '172.16.31.10',\n 80,\n )\n assert uri.parse_host('172.16.31.10:27070') == ('172.16.31.10', 27070)\n assert uri.parse_host('172.16.31.10:123') == ('172.16.31.10', 123)\n assert uri.parse_host('172.16.31.10:42') == ('172.16.31.10', 42)\n\n assert uri.parse_host('example.com') == ('example.com', None)\n assert uri.parse_host('example.com', default_port=443) == ('example.com', 443)\n assert uri.parse_host('falcon.example.com') == ('falcon.example.com', None)\n assert uri.parse_host('falcon.example.com:9876') == ('falcon.example.com', 9876)\n assert uri.parse_host('falcon.example.com:42') == ('falcon.example.com', 42)\n\n def test_get_http_status_warns(self):\n with pytest.warns(UserWarning, match='Please use falcon'):\n falcon.get_http_status(400)\n\n @pytest.mark.filterwarnings('ignore')\n def test_get_http_status(self):\n assert falcon.get_http_status(404) == falcon.HTTP_404\n assert falcon.get_http_status(404.3) == falcon.HTTP_404\n assert falcon.get_http_status('404.3') == falcon.HTTP_404\n assert falcon.get_http_status(404.9) == falcon.HTTP_404\n assert falcon.get_http_status('404') == falcon.HTTP_404\n assert falcon.get_http_status(123) == '123 Unknown'\n with pytest.raises(ValueError):\n falcon.get_http_status('not_a_number')\n with pytest.raises(ValueError):\n falcon.get_http_status(0)\n with pytest.raises(ValueError):\n falcon.get_http_status(0)\n with pytest.raises(ValueError):\n falcon.get_http_status(99)\n with pytest.raises(ValueError):\n falcon.get_http_status(-404.3)\n with pytest.raises(ValueError):\n falcon.get_http_status('-404')\n with pytest.raises(ValueError):\n falcon.get_http_status('-404.3')\n assert falcon.get_http_status(123, 'Go Away') == '123 Go Away'\n\n @pytest.mark.parametrize(\n 'v_in,v_out',\n [\n (703, falcon.HTTP_703),\n (404, falcon.HTTP_404),\n (404.9, falcon.HTTP_404),\n (falcon.HTTP_200, falcon.HTTP_200),\n (falcon.HTTP_307, falcon.HTTP_307),\n (falcon.HTTP_404, falcon.HTTP_404),\n (123, '123 Unknown'),\n ('123 Wow Such Status', '123 Wow Such Status'),\n (b'123 Wow Such Status', '123 Wow Such Status'),\n (b'200 OK', falcon.HTTP_OK),\n (http.HTTPStatus(200), falcon.HTTP_200),\n (http.HTTPStatus(307), falcon.HTTP_307),\n (http.HTTPStatus(401), falcon.HTTP_401),\n (http.HTTPStatus(410), falcon.HTTP_410),\n (http.HTTPStatus(429), falcon.HTTP_429),\n (http.HTTPStatus(500), falcon.HTTP_500),\n ],\n )\n def test_code_to_http_status(self, v_in, v_out):\n assert falcon.code_to_http_status(v_in) == v_out\n\n @pytest.mark.parametrize('v', [0, 13, 99, 1000, 1337.01, -99, -404.3, -404, -404.3])\n def test_code_to_http_status_value_error(self, v):\n with pytest.raises(ValueError):\n falcon.code_to_http_status(v)\n\n @pytest.mark.parametrize(\n 'v_in,v_out',\n [\n # NOTE(kgriffs): Include some codes not used elsewhere so that\n # we get past the LRU.\n (http.HTTPStatus(505), 505),\n (712, 712),\n ('712', 712),\n (b'404 Not Found', 404),\n (b'712 NoSQL', 712),\n ('404 Not Found', 404),\n ('123 Wow Such Status', 123),\n # NOTE(kgriffs): Test LRU\n (http.HTTPStatus(505), 505),\n ('123 Wow Such Status', 123),\n ],\n )\n def test_http_status_to_code(self, v_in, v_out):\n assert falcon.http_status_to_code(v_in) == v_out\n\n @pytest.mark.parametrize('v', ['', ' ', '1', '12', '99', 'catsup', b'', 5.2])\n def test_http_status_to_code_neg(self, v):\n with pytest.raises(ValueError):\n falcon.http_status_to_code(v)\n\n def test_etag_dumps_to_header_format(self):\n etag = structures.ETag('67ab43')\n\n assert etag.dumps() == '\"67ab43\"'\n\n etag.is_weak = True\n assert etag.dumps() == 'W/\"67ab43\"'\n\n assert structures.ETag('67a b43').dumps() == '\"67a b43\"'\n\n def test_etag_strong_vs_weak_comparison(self):\n strong_67ab43_one = structures.ETag.loads('\"67ab43\"')\n strong_67ab43_too = structures.ETag.loads('\"67ab43\"')\n strong_67aB43 = structures.ETag.loads('\"67aB43\"')\n weak_67ab43_one = structures.ETag.loads('W/\"67ab43\"')\n weak_67ab43_two = structures.ETag.loads('W/\"67ab43\"')\n weak_67aB43 = structures.ETag.loads('W/\"67aB43\"')\n\n assert strong_67aB43 == strong_67aB43\n assert weak_67aB43 == weak_67aB43\n assert strong_67aB43 == weak_67aB43\n assert weak_67aB43 == strong_67aB43\n assert strong_67ab43_one == strong_67ab43_too\n assert weak_67ab43_one == weak_67ab43_two\n\n assert strong_67aB43 != strong_67ab43_one\n assert strong_67ab43_one != strong_67aB43\n\n assert strong_67aB43.strong_compare(strong_67aB43)\n assert strong_67ab43_one.strong_compare(strong_67ab43_too)\n assert not strong_67aB43.strong_compare(strong_67ab43_one)\n assert not strong_67ab43_one.strong_compare(strong_67aB43)\n\n assert not strong_67ab43_one.strong_compare(weak_67ab43_one)\n assert not weak_67ab43_one.strong_compare(strong_67ab43_one)\n\n assert not weak_67aB43.strong_compare(weak_67aB43)\n assert not weak_67ab43_one.strong_compare(weak_67ab43_two)\n\n assert not weak_67ab43_one.strong_compare(weak_67aB43)\n assert not weak_67aB43.strong_compare(weak_67ab43_one)\n\n @pytest.mark.parametrize(\n 'filename,expected',\n [\n ('.', '_'),\n ('..', '_.'),\n ('hello.txt', 'hello.txt'),\n ('Ąžuolai žaliuos.jpeg', 'A_z_uolai_z_aliuos.jpeg'),\n ('/etc/shadow', '_etc_shadow'),\n ('. ⬅ a dot', '____a_dot'),\n ('C:\\\\Windows\\\\kernel32.dll', 'C__Windows_kernel32.dll'),\n ],\n )\n def test_secure_filename(self, filename, expected):\n assert misc.secure_filename(filename) == expected\n\n def test_secure_filename_empty_value(self):\n with pytest.raises(ValueError):\n misc.secure_filename('')\n\n @pytest.mark.parametrize(\n 'string,expected_ascii',\n [\n ('', True),\n ('/', True),\n ('/api', True),\n ('/data/items/something?query=apples%20and%20oranges', True),\n ('/food?item=ð\\x9f\\x8d\\x94', False),\n ('\\x00\\x00\\x7F\\x00\\x00\\x7F\\x00', True),\n ('\\x00\\x00\\x7F\\x00\\x00\\x80\\x00', False),\n ],\n )\n def test_misc_isascii(self, string, expected_ascii):\n if expected_ascii:\n assert misc.isascii(string)\n else:\n assert not misc.isascii(string)\n\n\[email protected](\n 'protocol,method',\n zip(\n ['https'] * len(falcon.HTTP_METHODS) + ['http'] * len(falcon.HTTP_METHODS),\n falcon.HTTP_METHODS * 2,\n ),\n)\ndef test_simulate_request_protocol(asgi, protocol, method):\n sink_called = [False]\n\n def sink(req, resp):\n sink_called[0] = True\n assert req.protocol == protocol\n\n if asgi:\n sink = to_coroutine(sink)\n\n app = create_app(asgi)\n app.add_sink(sink, '/test')\n\n client = testing.TestClient(app)\n\n try:\n simulate = client.getattr('simulate_' + method.lower())\n simulate('/test', protocol=protocol)\n assert sink_called[0]\n except AttributeError:\n # NOTE(kgriffs): simulate_* helpers do not exist for all methods\n pass\n\n\[email protected](\n 'simulate',\n [\n testing.simulate_get,\n testing.simulate_head,\n testing.simulate_post,\n testing.simulate_put,\n testing.simulate_options,\n testing.simulate_patch,\n testing.simulate_delete,\n ],\n)\ndef test_simulate_free_functions(asgi, simulate):\n sink_called = [False]\n\n def sink(req, resp):\n sink_called[0] = True\n\n if asgi:\n sink = to_coroutine(sink)\n\n app = create_app(asgi)\n app.add_sink(sink, '/test')\n\n simulate(app, '/test')\n assert sink_called[0]\n\n\nclass TestFalconTestingUtils:\n \"\"\"Verify some branches not covered elsewhere.\"\"\"\n\n def test_path_escape_chars_in_create_environ(self):\n env = testing.create_environ('/hello%20world%21')\n assert env['PATH_INFO'] == '/hello world!'\n\n def test_no_prefix_allowed_for_query_strings_in_create_environ(self):\n with pytest.raises(ValueError):\n testing.create_environ(query_string='?foo=bar')\n\n def test_plus_in_path_in_create_environ(self):\n env = testing.create_environ('/mnt/grub2/lost+found/inode001')\n assert env['PATH_INFO'] == '/mnt/grub2/lost+found/inode001'\n\n def test_none_header_value_in_create_environ(self):\n env = testing.create_environ('/', headers={'X-Foo': None})\n assert env['HTTP_X_FOO'] == ''\n\n def test_decode_empty_result(self, app):\n client = testing.TestClient(app)\n response = client.simulate_request(path='/')\n assert response.json == falcon.HTTPNotFound().to_dict()\n\n def test_httpnow_alias_for_backwards_compat(self):\n assert testing.httpnow is util.http_now\n\n def test_default_headers(self, app):\n resource = testing.SimpleTestResource()\n app.add_route('/', resource)\n\n headers = {\n 'Authorization': 'Bearer 123',\n }\n\n client = testing.TestClient(app, headers=headers)\n\n client.simulate_get()\n assert resource.captured_req.auth == headers['Authorization']\n\n client.simulate_get(headers=None)\n assert resource.captured_req.auth == headers['Authorization']\n\n def test_default_headers_with_override(self, app):\n resource = testing.SimpleTestResource()\n app.add_route('/', resource)\n\n override_before = 'something-something'\n override_after = 'something-something'[::-1]\n\n headers = {\n 'Authorization': 'Bearer XYZ',\n 'Accept': 'application/vnd.siren+json',\n 'X-Override-Me': override_before,\n }\n\n client = testing.TestClient(app, headers=headers)\n client.simulate_get(headers={'X-Override-Me': override_after})\n\n assert resource.captured_req.auth == headers['Authorization']\n assert resource.captured_req.accept == headers['Accept']\n assert resource.captured_req.get_header('X-Override-Me') == override_after\n\n def test_status(self, app):\n resource = testing.SimpleTestResource(status=falcon.HTTP_702)\n app.add_route('/', resource)\n client = testing.TestClient(app)\n\n result = client.simulate_get()\n assert result.status == falcon.HTTP_702\n\n def test_wsgi_iterable_not_closeable(self):\n result = testing.Result([], falcon.HTTP_200, [])\n assert not result.content\n assert result.json is None\n\n def test_path_must_start_with_slash(self, app):\n app.add_route('/', testing.SimpleTestResource())\n client = testing.TestClient(app)\n with pytest.raises(ValueError):\n client.simulate_get('foo')\n\n def test_cached_text_in_result(self, app):\n app.add_route('/', testing.SimpleTestResource(body='test'))\n client = testing.TestClient(app)\n\n result = client.simulate_get()\n assert result.text == result.text\n\n @pytest.mark.parametrize(\n 'resource_type',\n [\n testing.SimpleTestResource,\n testing.SimpleTestResourceAsync,\n ],\n )\n def test_simple_resource_body_json_xor(self, resource_type):\n with pytest.raises(ValueError):\n resource_type(body='', json={})\n\n def test_query_string(self, app):\n class SomeResource:\n def on_get(self, req, resp):\n doc = {}\n\n doc['oid'] = req.get_param_as_int('oid')\n doc['detailed'] = req.get_param_as_bool('detailed')\n doc['things'] = req.get_param_as_list('things', int)\n doc['query_string'] = req.query_string\n\n resp.text = json.dumps(doc)\n\n app.req_options.auto_parse_qs_csv = True\n app.add_route('/', SomeResource())\n client = testing.TestClient(app)\n\n result = client.simulate_get(query_string='oid=42&detailed=no&things=1')\n assert result.json['oid'] == 42\n assert not result.json['detailed']\n assert result.json['things'] == [1]\n\n params = {'oid': 42, 'detailed': False}\n result = client.simulate_get(params=params)\n assert result.json['oid'] == params['oid']\n assert not result.json['detailed']\n assert result.json['things'] is None\n\n params = {'oid': 1978, 'detailed': 'yes', 'things': [1, 2, 3]}\n result = client.simulate_get(params=params)\n assert result.json['oid'] == params['oid']\n assert result.json['detailed']\n assert result.json['things'] == params['things']\n\n expected_qs = 'things=1&things=2&things=3'\n result = client.simulate_get(params={'things': [1, 2, 3]})\n assert result.json['query_string'] == expected_qs\n\n expected_qs = 'things=1,2,3'\n result = client.simulate_get(params={'things': [1, 2, 3]}, params_csv=True)\n assert result.json['query_string'] == expected_qs\n\n def test_query_string_no_question(self, app):\n app.add_route('/', testing.SimpleTestResource())\n client = testing.TestClient(app)\n with pytest.raises(ValueError):\n client.simulate_get(query_string='?x=1')\n\n def test_query_string_in_path(self, app):\n resource = testing.SimpleTestResource()\n app.add_route('/thing', resource)\n client = testing.TestClient(app)\n\n with pytest.raises(ValueError):\n client.simulate_get(path='/thing?x=1', query_string='things=1,2,3')\n with pytest.raises(ValueError):\n client.simulate_get(path='/thing?x=1', params={'oid': 1978})\n with pytest.raises(ValueError):\n client.simulate_get(\n path='/thing?x=1', query_string='things=1,2,3', params={'oid': 1978}\n )\n\n client.simulate_get(path='/thing?detailed=no&oid=1337')\n assert resource.captured_req.path == '/thing'\n assert resource.captured_req.query_string == 'detailed=no&oid=1337'\n\n @pytest.mark.parametrize(\n 'document',\n [\n # NOTE(vytas): using an exact binary fraction here to avoid special\n # code branch for approximate equality as it is not the focus here\n 16.0625,\n 123456789,\n True,\n '',\n 'I am a \\u1d0a\\ua731\\u1d0f\\u0274 string.',\n [1, 3, 3, 7],\n {'message': '\\xa1Hello Unicode! \\U0001F638'},\n {\n 'count': 4,\n 'items': [\n {'number': 'one'},\n {'number': 'two'},\n {'number': 'three'},\n {'number': 'four'},\n ],\n 'next': None,\n },\n ],\n )\n def test_simulate_json_body(self, asgi, document):\n resource = (\n testing.SimpleTestResourceAsync() if asgi else testing.SimpleTestResource()\n )\n app = create_app(asgi)\n app.add_route('/', resource)\n\n json_types = ('application/json', 'application/json; charset=UTF-8')\n client = testing.TestClient(app)\n client.simulate_post(\n '/', json=document, headers={'capture-req-body-bytes': '-1'}\n )\n assert json.loads(resource.captured_req_body.decode()) == document\n assert resource.captured_req.content_type in json_types\n\n headers = {\n 'Content-Type': 'x-falcon/peregrine',\n 'X-Falcon-Type': 'peregrine',\n 'capture-req-media': 'y',\n }\n body = 'If provided, `json` parameter overrides `body`.'\n client.simulate_post('/', headers=headers, body=body, json=document)\n assert resource.captured_req_media == document\n assert resource.captured_req.content_type in json_types\n assert resource.captured_req.get_header('X-Falcon-Type') == 'peregrine'\n\n @pytest.mark.parametrize(\n 'remote_addr',\n [\n None,\n '127.0.0.1',\n '8.8.8.8',\n '192.168.3.11',\n 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:6455',\n ],\n )\n def test_simulate_remote_addr(self, app, remote_addr):\n class ShowMyIPResource:\n def on_get(self, req, resp):\n resp.text = req.remote_addr\n resp.content_type = falcon.MEDIA_TEXT\n\n app.add_route('/', ShowMyIPResource())\n\n client = testing.TestClient(app)\n resp = client.simulate_get('/', remote_addr=remote_addr)\n assert resp.status_code == 200\n\n if remote_addr is None:\n assert resp.text == '127.0.0.1'\n else:\n assert resp.text == remote_addr\n\n def test_simulate_hostname(self, app):\n resource = testing.SimpleTestResource()\n app.add_route('/', resource)\n\n client = testing.TestClient(app)\n client.simulate_get('/', protocol='https', host='falcon.readthedocs.io')\n assert resource.captured_req.uri == 'https://falcon.readthedocs.io/'\n\n @pytest.mark.parametrize(\n 'extras,expected_headers',\n [\n (\n {},\n (('user-agent', 'falcon-client/' + falcon.__version__),),\n ),\n (\n {'HTTP_USER_AGENT': 'URL/Emacs', 'HTTP_X_FALCON': 'peregrine'},\n (('user-agent', 'URL/Emacs'), ('x-falcon', 'peregrine')),\n ),\n ],\n )\n def test_simulate_with_environ_extras(self, extras, expected_headers):\n app = falcon.App()\n resource = testing.SimpleTestResource()\n app.add_route('/', resource)\n\n client = testing.TestClient(app)\n client.simulate_get('/', extras=extras)\n\n for header, value in expected_headers:\n assert resource.captured_req.get_header(header) == value\n\n def test_override_method_with_extras(self, asgi):\n app = create_app(asgi)\n app.add_route('/', testing.SimpleTestResource(body='test'))\n client = testing.TestClient(app)\n\n with pytest.raises(ValueError):\n if asgi:\n client.simulate_get('/', extras={'method': 'PATCH'})\n else:\n client.simulate_get('/', extras={'REQUEST_METHOD': 'PATCH'})\n\n result = client.simulate_get('/', extras={'REQUEST_METHOD': 'GET'})\n assert result.status_code == 200\n assert result.text == 'test'\n\n @pytest.mark.parametrize(\n 'content_type',\n [\n 'application/json',\n 'application/json; charset=UTF-8',\n 'application/yaml',\n ],\n )\n def test_simulate_content_type(self, content_type):\n class MediaMirror:\n def on_post(self, req, resp):\n resp.media = req.media\n\n app = create_app(asgi=False)\n app.add_route('/', MediaMirror())\n\n client = testing.TestClient(app)\n headers = {'Content-Type': content_type}\n payload = b'{\"hello\": \"world\"}'\n\n resp = client.simulate_post('/', headers=headers, body=payload)\n\n if MEDIA_JSON in content_type:\n assert resp.status_code == 200\n assert resp.json == {'hello': 'world'}\n else:\n # JSON handler should not have been called for YAML\n assert resp.status_code == 415\n\n @pytest.mark.parametrize(\n 'content_type',\n [\n MEDIA_JSON,\n MEDIA_JSON + '; charset=UTF-8',\n MEDIA_YAML,\n MEDIA_MSGPACK,\n MEDIA_URLENCODED,\n ],\n )\n def test_simulate_content_type_extra_handler(self, asgi, content_type):\n class TestResourceAsync(testing.SimpleTestResourceAsync):\n def __init__(self):\n super().__init__()\n\n async def on_post(self, req, resp):\n await super().on_post(req, resp)\n\n resp.media = {'hello': 'back'}\n resp.content_type = content_type\n\n class TestResource(testing.SimpleTestResource):\n def __init__(self):\n super().__init__()\n\n def on_post(self, req, resp):\n super().on_post(req, resp)\n\n resp.media = {'hello': 'back'}\n resp.content_type = content_type\n\n resource = TestResourceAsync() if asgi else TestResource()\n app = create_app(asgi)\n app.add_route('/', resource)\n\n json_handler = TrackingJSONHandler()\n msgpack_handler = TrackingMessagePackHandler()\n form_handler = TrackingFormHandler()\n\n # NOTE(kgriffs): Do not use MEDIA_* so that we can sanity-check that\n # our constants that are used in the pytest parametrization match\n # up to what we expect them to be.\n extra_handlers = {\n 'application/json': json_handler,\n 'application/msgpack': msgpack_handler,\n 'application/x-www-form-urlencoded': form_handler,\n }\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n client = testing.TestClient(app)\n headers = {\n 'Content-Type': content_type,\n 'capture-req-media': 'y',\n }\n\n if MEDIA_JSON in content_type:\n payload = b'{\"hello\": \"world\"}'\n elif content_type == MEDIA_MSGPACK:\n payload = b'\\x81\\xa5hello\\xa5world'\n elif content_type == MEDIA_URLENCODED:\n payload = b'hello=world'\n else:\n payload = None\n\n resp = client.simulate_post('/', headers=headers, body=payload)\n\n if MEDIA_JSON in content_type:\n assert resp.status_code == 200\n assert resp.json == {'hello': 'back'}\n\n # Test that our custom deserializer was called\n assert json_handler.deserialize_count == 1\n assert resource.captured_req_media == {'hello': 'world'}\n\n # Verify that other handlers were not called\n assert msgpack_handler.deserialize_count == 0\n assert form_handler.deserialize_count == 0\n\n elif content_type == MEDIA_MSGPACK:\n assert resp.status_code == 200\n assert resp.content == b'\\x81\\xa5hello\\xa4back'\n\n # Test that our custom deserializer was called\n assert msgpack_handler.deserialize_count == 1\n assert resource.captured_req_media == {'hello': 'world'}\n\n # Verify that other handlers were not called\n assert json_handler.deserialize_count == 0\n assert form_handler.deserialize_count == 0\n\n elif content_type == MEDIA_URLENCODED:\n assert resp.status_code == 200\n assert resp.content == b'hello=back'\n\n # Test that our custom deserializer was called\n assert form_handler.deserialize_count == 1\n assert resource.captured_req_media == {'hello': 'world'}\n\n # Verify that other handlers were not called\n assert json_handler.deserialize_count == 0\n assert msgpack_handler.deserialize_count == 0\n\n else:\n # YAML should not get handled\n for handler in (json_handler, msgpack_handler):\n assert handler.deserialize_count == 0\n\n assert resource.captured_req_media is None\n assert resp.status_code == 415\n\n\nclass TestNoApiClass(testing.TestCase):\n def test_something(self):\n self.assertTrue(isinstance(self.app, falcon.App))\n\n\nclass TestSetupApi(testing.TestCase):\n def setUp(self):\n super(TestSetupApi, self).setUp()\n with pytest.warns(UserWarning, match='API class may be removed in a future'):\n self.app = falcon.API()\n self.app.add_route('/', testing.SimpleTestResource(body='test'))\n\n def test_something(self):\n self.assertTrue(isinstance(self.app, falcon.API))\n self.assertTrue(isinstance(self.app, falcon.App))\n\n result = self.simulate_get()\n assert result.status_code == 200\n assert result.text == 'test'\n\n\ndef test_get_argnames():\n def foo(a, b, c):\n pass\n\n class Bar:\n def __call__(self, a, b):\n pass\n\n assert misc.get_argnames(foo) == ['a', 'b', 'c']\n assert misc.get_argnames(Bar()) == ['a', 'b']\n assert misc.get_argnames(functools.partial(foo, 42)) == ['b', 'c']\n\n\nclass TestContextType:\n class CustomContextType(structures.Context):\n def __init__(self):\n pass\n\n @pytest.mark.parametrize(\n 'context_type',\n [\n CustomContextType,\n structures.Context,\n ],\n )\n def test_attributes(self, context_type):\n ctx = context_type()\n\n ctx.foo = 'bar'\n ctx.details = None\n ctx._cache = {}\n\n assert ctx.foo == 'bar'\n assert ctx.details is None\n assert ctx._cache == {}\n\n with pytest.raises(AttributeError):\n ctx.cache_strategy\n\n @pytest.mark.parametrize(\n 'context_type',\n [\n CustomContextType,\n structures.Context,\n ],\n )\n def test_items_from_attributes(self, context_type):\n ctx = context_type()\n\n ctx.foo = 'bar'\n ctx.details = None\n ctx._cache = {}\n\n assert ctx['foo'] == 'bar'\n assert ctx['details'] is None\n assert ctx['_cache'] == {}\n\n with pytest.raises(KeyError):\n ctx['cache_strategy']\n\n assert 'foo' in ctx\n assert '_cache' in ctx\n assert 'cache_strategy' not in ctx\n\n @pytest.mark.parametrize(\n 'context_type',\n [\n CustomContextType,\n structures.Context,\n ],\n )\n def test_attributes_from_items(self, context_type):\n ctx = context_type()\n\n ctx['foo'] = 'bar'\n ctx['details'] = None\n ctx['_cache'] = {}\n ctx['cache_strategy'] = 'lru'\n\n assert ctx['cache_strategy'] == 'lru'\n del ctx['cache_strategy']\n\n assert ctx['foo'] == 'bar'\n assert ctx['details'] is None\n assert ctx['_cache'] == {}\n\n with pytest.raises(KeyError):\n ctx['cache_strategy']\n\n @pytest.mark.parametrize(\n 'context_type,type_name',\n [\n (CustomContextType, 'CustomContextType'),\n (structures.Context, 'Context'),\n ],\n )\n def test_dict_interface(self, context_type, type_name):\n ctx = context_type()\n\n ctx['foo'] = 'bar'\n ctx['details'] = None\n ctx[1] = 'one'\n ctx[2] = 'two'\n\n assert ctx == {'foo': 'bar', 'details': None, 1: 'one', 2: 'two'}\n assert ctx != {'bar': 'foo', 'details': None, 1: 'one', 2: 'two'}\n assert ctx != {}\n\n copy = ctx.copy()\n assert isinstance(copy, context_type)\n assert copy == ctx\n assert copy == {'foo': 'bar', 'details': None, 1: 'one', 2: 'two'}\n copy.pop('foo')\n assert copy != ctx\n\n assert set(key for key in ctx) == {'foo', 'details', 1, 2}\n\n assert ctx.get('foo') == 'bar'\n assert ctx.get('bar') is None\n assert ctx.get('bar', frozenset('hello')) == frozenset('hello')\n false = ctx.get('bar', False)\n assert isinstance(false, bool)\n assert not false\n\n assert len(ctx) == 4\n assert ctx.pop(3) is None\n assert ctx.pop(3, 'not found') == 'not found'\n assert ctx.pop('foo') == 'bar'\n assert ctx.pop(1) == 'one'\n assert ctx.pop(2) == 'two'\n assert len(ctx) == 1\n\n assert repr(ctx) == type_name + \"({'details': None})\"\n assert str(ctx) == type_name + \"({'details': None})\"\n assert '{}'.format(ctx) == type_name + \"({'details': None})\"\n\n with pytest.raises(TypeError):\n {ctx: ctx}\n\n ctx.clear()\n assert ctx == {}\n assert len(ctx) == 0\n\n ctx['key'] = 'value'\n assert ctx.popitem() == ('key', 'value')\n\n ctx.setdefault('numbers', []).append(1)\n ctx.setdefault('numbers', []).append(2)\n ctx.setdefault('numbers', []).append(3)\n assert ctx['numbers'] == [1, 2, 3]\n\n @pytest.mark.parametrize(\n 'context_type',\n [\n CustomContextType,\n structures.Context,\n ],\n )\n def test_keys_and_values(self, context_type):\n ctx = context_type()\n ctx.update((number, number ** 2) for number in range(1, 5))\n\n assert set(ctx.keys()) == {1, 2, 3, 4}\n assert set(ctx.values()) == {1, 4, 9, 16}\n assert set(ctx.items()) == {(1, 1), (2, 4), (3, 9), (4, 16)}\n\n\nclass TestDeprecatedArgs:\n def test_method(self, recwarn):\n class C:\n @deprecation.deprecated_args(allowed_positional=0)\n def a_method(self, a=1, b=2):\n pass\n\n C().a_method(a=1, b=2)\n assert len(recwarn) == 0\n C().a_method(1, b=2)\n assert len(recwarn) == 1\n\n def test_function(self, recwarn):\n @deprecation.deprecated_args(allowed_positional=0, is_method=False)\n def a_function(a=1, b=2):\n pass\n\n a_function(a=1, b=2)\n assert len(recwarn) == 0\n a_function(1, b=2)\n assert len(recwarn) == 1\n\n\[email protected](\n sys.version_info < (3, 7), reason='module __getattr__ requires python 3.7'\n)\ndef test_json_deprecation():\n with pytest.warns(deprecation.DeprecatedWarning, match='json'):\n util.json\n\n with pytest.raises(AttributeError):\n util.some_imaginary_module\n", "id": "1286623", "language": "Python", "matching_score": 5.556526184082031, "max_stars_count": 8217, "path": "tests/test_utils.py" }, { "content": "import asyncio\nfrom collections import Counter\nimport hashlib\nimport platform\nimport sys\nimport time\n\nimport falcon\nimport falcon.asgi\nimport falcon.errors\nimport falcon.util\n\nSSE_TEST_MAX_DELAY_SEC = 1\n_WIN32 = sys.platform.startswith('win')\n_X86_64 = platform.machine() == 'x86_64'\n\n\nclass Things:\n def __init__(self):\n self._counter = Counter()\n\n async def on_get(self, req, resp):\n await asyncio.sleep(0.01)\n resp.text = req.remote_addr\n\n async def on_post(self, req, resp):\n resp.data = await req.stream.read(req.content_length or 0)\n resp.set_header('X-Counter', str(self._counter['backround:things:on_post']))\n\n async def background_job_async():\n await asyncio.sleep(0.01)\n self._counter['backround:things:on_post'] += 1\n\n def background_job_sync():\n time.sleep(0.01)\n self._counter['backround:things:on_post'] += 1000\n\n resp.schedule(background_job_async)\n resp.schedule_sync(background_job_sync)\n resp.schedule(background_job_async)\n resp.schedule_sync(background_job_sync)\n\n async def on_put(self, req, resp):\n # NOTE(kgriffs): Test that reading past the end does\n # not hang.\n\n chunks = []\n for i in range(req.content_length + 1):\n # NOTE(kgriffs): In the ASGI interface, bounded_stream is an\n # alias for req.stream. We'll use the alias here just as\n # a sanity check.\n chunk = await req.bounded_stream.read(1)\n chunks.append(chunk)\n\n # NOTE(kgriffs): body should really be set to a string, but\n # Falcon is lenient and will allow bytes as well (although\n # it is slightly less performant).\n # TODO(kgriffs): Perhaps in Falcon 4.0 be more strict? We would\n # also have to change the WSGI behavior to match.\n resp.text = b''.join(chunks)\n\n # =================================================================\n # NOTE(kgriffs): Test the sync_to_async helpers here to make sure\n # they work as expected in the context of a real ASGI server.\n # =================================================================\n safely_tasks = []\n safely_values = []\n\n def callmesafely(a, b, c=None):\n # NOTE(kgriffs): Sleep to prove that there isn't another instance\n # running in parallel that is able to race ahead.\n time.sleep(0.001)\n safely_values.append((a, b, c))\n\n cms = falcon.util.wrap_sync_to_async(callmesafely, threadsafe=False)\n loop = falcon.util.get_running_loop()\n\n # NOTE(caselit): on windows it takes more time so create less tasks\n # NOTE(vytas): Tests on non-x86 platforms are run using software\n # emulation via single-thread QEMU Docker containers, making them\n # considerably slower as well.\n num_cms_tasks = 100 if _WIN32 or not _X86_64 else 1000\n\n for i in range(num_cms_tasks):\n # NOTE(kgriffs): create_task() is used here, so that the coroutines\n # are scheduled immediately in the order created; under Python\n # 3.6, asyncio.gather() does not seem to always schedule\n # them in order, so we do it this way to make it predictable.\n safely_tasks.append(loop.create_task(cms(i, i + 1, c=i + 2)))\n\n await asyncio.gather(*safely_tasks)\n\n assert len(safely_values) == num_cms_tasks\n for i, val in enumerate(safely_values):\n assert safely_values[i] == (i, i + 1, i + 2)\n\n def callmeshirley(a=42, b=None):\n return (a, b)\n\n assert (42, None) == await falcon.util.sync_to_async(callmeshirley)\n assert (1, 2) == await falcon.util.sync_to_async(callmeshirley, 1, 2)\n assert (5, None) == await falcon.util.sync_to_async(callmeshirley, 5)\n assert (3, 4) == await falcon.util.sync_to_async(callmeshirley, 3, b=4)\n\n\nclass Bucket:\n async def on_post(self, req, resp):\n resp.text = await req.stream.read()\n\n\nclass Feed:\n async def on_websocket(self, req, ws, feed_id):\n await ws.accept()\n await ws.send_text(feed_id)\n\n\nclass Events:\n async def on_get(self, req, resp):\n async def emit():\n s = 0\n while s <= SSE_TEST_MAX_DELAY_SEC:\n yield falcon.asgi.SSEvent(text='hello world')\n await asyncio.sleep(s)\n s += SSE_TEST_MAX_DELAY_SEC / 4\n\n resp.sse = emit()\n\n async def on_websocket(self, req, ws): # noqa: C901\n recv_command = req.get_header('X-Command') == 'recv'\n send_mismatched = req.get_header('X-Mismatch') == 'send'\n recv_mismatched = req.get_header('X-Mismatch') == 'recv'\n\n mismatch_type = req.get_header('X-Mismatch-Type', default='text')\n\n raise_error = req.get_header('X-Raise-Error')\n\n close = req.get_header('X-Close')\n close_code = req.get_header('X-Close-Code')\n if close_code:\n close_code = int(close_code)\n\n accept = req.get_header('X-Accept', default='accept')\n\n if accept == 'accept':\n subprotocol = req.get_header('X-Subprotocol')\n\n if subprotocol == '*':\n subprotocol = ws.subprotocols[0]\n\n if subprotocol:\n await ws.accept(subprotocol)\n else:\n await ws.accept()\n elif accept == 'reject':\n if close:\n await ws.close()\n return\n\n if send_mismatched:\n if mismatch_type == 'text':\n await ws.send_text(b'fizzbuzz')\n else:\n await ws.send_data('fizzbuzz')\n\n if recv_mismatched:\n if mismatch_type == 'text':\n await ws.receive_text()\n else:\n await ws.receive_data()\n\n start = time.time()\n while time.time() - start < 1:\n try:\n msg = None\n\n if recv_command:\n msg = await ws.receive_media()\n else:\n msg = None\n\n await ws.send_text('hello world')\n print('on_websocket:send_text')\n\n if msg and msg['command'] == 'echo':\n await ws.send_text(msg['echo'])\n\n await ws.send_data(b'hello\\x00world')\n await asyncio.sleep(0.2)\n except falcon.errors.WebSocketDisconnected:\n print('on_websocket:WebSocketDisconnected')\n raise\n\n if raise_error == 'generic':\n raise Exception('Test: Generic Unhandled Error')\n elif raise_error == 'http':\n raise falcon.HTTPBadRequest()\n\n if close:\n # NOTE(kgriffs): Tests that the default is used\n # when close_code is None.\n await ws.close(close_code)\n\n\nclass Multipart:\n async def on_post(self, req, resp):\n parts = {}\n\n form = await req.get_media()\n async for part in form:\n # NOTE(vytas): SHA1 is no longer recommended for cryptographic\n # purposes, but here we are only using it for integrity checking.\n sha1 = hashlib.sha1()\n async for chunk in part.stream:\n sha1.update(chunk)\n\n parts[part.name] = {\n 'filename': part.filename,\n 'sha1': sha1.hexdigest(),\n }\n\n resp.media = parts\n\n\nclass LifespanHandler:\n def __init__(self):\n self.startup_succeeded = False\n self.shutdown_succeeded = False\n\n async def process_startup(self, scope, event):\n assert scope['type'] == 'lifespan'\n assert event['type'] == 'lifespan.startup'\n self.startup_succeeded = True\n\n async def process_shutdown(self, scope, event):\n assert scope['type'] == 'lifespan'\n assert event['type'] == 'lifespan.shutdown'\n self.shutdown_succeeded = True\n\n\nclass TestJar:\n async def on_get(self, req, resp):\n # NOTE(myusko): In the future we shouldn't change the cookie\n # a test depends on the input.\n # NOTE(kgriffs): This is the only test that uses a single\n # cookie (vs. multiple) as input; if this input ever changes,\n # a separate test will need to be added to explicitly verify\n # this use case.\n resp.set_cookie('has_permission', 'true')\n\n async def on_post(self, req, resp):\n if req.cookies['has_permission'] == 'true':\n resp.status = falcon.HTTP_200\n else:\n resp.status = falcon.HTTP_403\n\n\ndef create_app():\n app = falcon.asgi.App()\n app.add_route('/', Things())\n app.add_route('/bucket', Bucket())\n app.add_route('/events', Events())\n app.add_route('/forms', Multipart())\n app.add_route('/jars', TestJar())\n app.add_route('/feeds/{feed_id}', Feed())\n lifespan_handler = LifespanHandler()\n app.add_middleware(lifespan_handler)\n\n async def _on_ws_error(req, resp, error, params, ws=None):\n if not ws:\n raise\n\n if ws.unaccepted:\n await ws.accept()\n\n if not ws.closed:\n await ws.send_text(error.__class__.__name__)\n await ws.close()\n\n app.add_error_handler(falcon.errors.OperationNotAllowed, _on_ws_error)\n app.add_error_handler(ValueError, _on_ws_error)\n\n return app\n\n\napplication = create_app()\n", "id": "12615552", "language": "Python", "matching_score": 1.4905598163604736, "max_stars_count": 8217, "path": "tests/asgi/_asgi_test_app.py" }, { "content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Routing utilities.\"\"\"\n\nimport re\n\nfrom falcon import constants\nfrom falcon import responders\n\n\nclass SuffixedMethodNotFoundError(Exception):\n def __init__(self, message):\n super(SuffixedMethodNotFoundError, self).__init__(message)\n self.message = message\n\n\n# NOTE(kgriffs): Published method; take care to avoid breaking changes.\ndef compile_uri_template(template):\n \"\"\"Compile the given URI template string into a pattern matcher.\n\n This function can be used to construct custom routing engines that\n iterate through a list of possible routes, attempting to match\n an incoming request against each route's compiled regular expression.\n\n Each field is converted to a named group, so that when a match\n is found, the fields can be easily extracted using\n :py:meth:`re.MatchObject.groupdict`.\n\n This function does not support the more flexible templating\n syntax used in the default router. Only simple paths with bracketed\n field expressions are recognized. For example::\n\n /\n /books\n /books/{isbn}\n /books/{isbn}/characters\n /books/{isbn}/characters/{name}\n\n Warning:\n If the template contains a trailing slash character, it will be\n stripped.\n\n Note that this is **different** from :ref:`the default behavior\n <trailing_slash_in_path>` of :func:`~falcon.App.add_route` used\n with the default :class:`~falcon.routing.CompiledRouter`.\n\n The :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash`\n request option is not considered by ``compile_uri_template()``.\n\n\n Args:\n template(str): The template to compile. Note that field names are\n restricted to ASCII a-z, A-Z, and the underscore character.\n\n Returns:\n tuple: (template_field_names, template_regex)\n \"\"\"\n\n if not isinstance(template, str):\n raise TypeError('uri_template is not a string')\n\n if not template.startswith('/'):\n raise ValueError(\"uri_template must start with '/'\")\n\n if '//' in template:\n raise ValueError(\"uri_template may not contain '//'\")\n\n if template != '/' and template.endswith('/'):\n template = template[:-1]\n\n # template names should be able to start with A-Za-z\n # but also contain 0-9_ in the remaining portion\n expression_pattern = r'{([a-zA-Z]\\w*)}'\n\n # Get a list of field names\n fields = set(re.findall(expression_pattern, template))\n\n # Convert Level 1 var patterns to equivalent named regex groups\n escaped = re.sub(r'[\\.\\(\\)\\[\\]\\?\\*\\+\\^\\|]', r'\\\\\\g<0>', template)\n pattern = re.sub(expression_pattern, r'(?P<\\1>[^/]+)', escaped)\n pattern = r'\\A' + pattern + r'\\Z'\n\n return fields, re.compile(pattern, re.IGNORECASE)\n\n\ndef map_http_methods(resource, suffix=None):\n \"\"\"Map HTTP methods (e.g., GET, POST) to methods of a resource object.\n\n Args:\n resource: An object with *responder* methods, following the naming\n convention *on_\\\\**, that correspond to each method the resource\n supports. For example, if a resource supports GET and POST, it\n should define ``on_get(self, req, resp)`` and\n ``on_post(self, req, resp)``.\n\n Keyword Args:\n suffix (str): Optional responder name suffix for this route. If\n a suffix is provided, Falcon will map GET requests to\n ``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,\n etc.\n\n Returns:\n dict: A mapping of HTTP methods to explicitly defined resource responders.\n\n \"\"\"\n\n method_map = {}\n\n for method in constants.COMBINED_METHODS:\n try:\n responder_name = 'on_' + method.lower()\n if suffix:\n responder_name += '_' + suffix\n\n responder = getattr(resource, responder_name)\n except AttributeError:\n # resource does not implement this method\n pass\n else:\n # Usually expect a method, but any callable will do\n if callable(responder):\n method_map[method] = responder\n\n # If suffix is specified and doesn't map to any methods, raise an error\n if suffix and not method_map:\n raise SuffixedMethodNotFoundError(\n 'No responders found for the specified suffix'\n )\n\n return method_map\n\n\ndef set_default_responders(method_map, asgi=False):\n \"\"\"Map HTTP methods not explicitly defined on a resource to default responders.\n\n Args:\n method_map: A dict with HTTP methods mapped to responders explicitly\n defined in a resource.\n asgi (bool): ``True`` if using an ASGI app, ``False`` otherwise\n (default ``False``).\n \"\"\"\n\n # Attach a resource for unsupported HTTP methods\n allowed_methods = [\n m for m in sorted(list(method_map.keys())) if m not in constants._META_METHODS\n ]\n\n if 'OPTIONS' not in method_map:\n # OPTIONS itself is intentionally excluded from the Allow header\n opt_responder = responders.create_default_options(allowed_methods, asgi=asgi)\n method_map['OPTIONS'] = opt_responder\n allowed_methods.append('OPTIONS')\n\n na_responder = responders.create_method_not_allowed(allowed_methods, asgi=asgi)\n\n for method in constants.COMBINED_METHODS:\n if method not in method_map:\n method_map[method] = na_responder\n", "id": "11194141", "language": "Python", "matching_score": 0.6587144136428833, "max_stars_count": 3, "path": "falcon/routing/util.py" }, { "content": "#!/usr/bin/env python\n\nimport atexit\nimport pathlib\nimport subprocess\n\nimport toml\n\nHERE = pathlib.Path(__file__).resolve().parent\nROOT = HERE.parent\n\n\ndef _write_changelog(target, data):\n with open(ROOT / target, 'wb') as rst:\n rst.write(data)\n\n\ndef get_target_filename():\n with open(ROOT / 'pyproject.toml') as pyproject_toml:\n project = toml.load(pyproject_toml)\n return project['tool']['towncrier']['filename']\n\n\ndef render_draft(target):\n with open(ROOT / target, 'rb') as rst:\n template = rst.read()\n # NOTE(vytas): Restore the template once we are done.\n atexit.register(_write_changelog, target, template)\n\n draft = subprocess.check_output(('towncrier', '--draft'), cwd=ROOT)\n\n # NOTE(vytas): towncrier does not seem to respect our preference for not\n # creating a title, so we remove it manually.\n # (See also: https://github.com/twisted/towncrier/issues/345)\n draft = draft.split(b'=============', 1)[-1]\n draft = draft.lstrip(b'=').lstrip()\n\n # NOTE(vytas): towncrier --draft does not seem to use the template,\n # so we substitute manually.\n rendered = template.replace(b'.. towncrier release notes start', draft, 1)\n _write_changelog(target, rendered)\n\n\ndef build_docs():\n subprocess.check_call(\n (\n 'sphinx-build',\n '-W',\n '-E',\n '-b',\n 'html',\n ROOT / 'docs',\n ROOT / 'docs/_build/html',\n )\n )\n\n\nif __name__ == '__main__':\n target = get_target_filename()\n render_draft(target)\n build_docs()\n", "id": "9033186", "language": "Python", "matching_score": 0.6396142244338989, "max_stars_count": 8217, "path": "tools/towncrier_draft.py" } ]
1.074637
studiogangster
[ { "content": "import hashlib\nimport json\n\nfrom dojo.models import Finding\n\n\nclass GitleaksParser(object):\n \"\"\"\n A class that can be used to parse the Gitleaks JSON report files\n \"\"\"\n\n def get_scan_types(self):\n return [\"Gitleaks Scan\"]\n\n def get_label_for_scan_types(self, scan_type):\n return scan_type\n\n def get_description_for_scan_types(self, scan_type):\n return \"Import Gitleaks Scan findings in JSON format.\"\n\n def get_findings(self, filename, test):\n \"\"\"\n Converts a Gitleaks report to DefectDojo findings\n \"\"\"\n issues = json.load(filename)\n # empty report are just null object\n if issues is None:\n return list()\n\n dupes = dict()\n for issue in issues:\n line = None\n file_path = issue[\"File\"]\n reason = issue[\"RuleID\"]\n titleText = \"Hard Coded \" + reason\n description = \"**Commit:** \" + issue[\"Message\"].rstrip(\"\\n\") + \"\\n\"\n description += \"**Commit Hash:** \" + issue[\"Commit\"] + \"\\n\"\n description += \"**Commit Date:** \" + issue[\"Date\"] + \"\\n\"\n description += \"**Author:** \" + issue[\"Author\"] + \" <\" + issue[\"Email\"] + \">\" + \"\\n\"\n description += \"**Reason:** \" + reason + \"\\n\"\n description += \"**Path:** \" + file_path + \"\\n\"\n if \"StartLine\" in issue:\n description += \"**StartLine:** %i\\n\" % issue[\"StartLine\"]\n line = issue[\"StartLine\"]\n if \"EndLine\" in issue:\n description += \"**EndLine:** %i\\n\" % issue[\"EndLine\"]\n if \"File\" in issue:\n description += \"**File:** \" + issue[\"File\"] + \"\\n\"\n\n description += \"\\n**String Found:**\\n\\n```\\n\" + issue[\"StartLine\"].replace(issue[\"Secret\"], \"REDACTED\") + \"\\n```\"\n\n severity = \"High\"\n if \"Github\" in reason or \"AWS\" in reason or \"Heroku\" in reason:\n severity = \"Critical\"\n\n finding = Finding(\n title=titleText,\n test=test,\n cwe=798,\n description=description,\n severity=severity,\n file_path=file_path,\n line=line,\n dynamic_finding=False,\n static_finding=True,\n )\n # manage tags\n finding.unsaved_tags = issue.get(\"Tags\", [])#.split(', ')\n\n dupe_key = hashlib.sha256((issue[\"Secret\"] + file_path + str(line)).encode(\"utf-8\")).hexdigest()\n\n if dupe_key not in dupes:\n dupes[dupe_key] = finding\n\n return list(dupes.values())\n", "id": "5227314", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "dojo/tools/gitleaks/parser.py" } ]
0
KyleLevi
[ { "content": "\"\"\"\r\nVERSION = 0.1b2\r\n10/23/2018\r\n\"\"\"\r\n\r\n\r\nimport sys\r\nimport os\r\nimport argparse\r\nimport subprocess\r\nimport operator\r\nimport pysam\r\n\r\nclass SamReader:\r\n\r\n def __init__(self, file_or_folder, **kwargs):\r\n \"\"\"\r\n Initialize with the path to a file or a folder. If a file is\r\n :param file_or_folder:\r\n \"\"\"\r\n check_files = kwargs.get('check_files', False)\r\n convert_files = kwargs.get('convert_files', False)\r\n\r\n # Generate a list of files in dir, and convert sam to bam\r\n if not os.path.isdir(file_or_folder):\r\n if file_or_folder.endswith('.sam'):\r\n file_or_folder = self.sam_to_bam(file_or_folder)\r\n input_files = [file_or_folder]\r\n else:\r\n if not file_or_folder.endswith('/'):\r\n file_or_folder = file_or_folder + '/'\r\n # Get the names of every SAM and BAM file in the input dir\r\n input_files = [file_or_folder + file_name for file_name in os.listdir(file_or_folder) if\r\n file_name.endswith(\".sam\") or file_name.endswith('.bam')]\r\n\r\n # Trim sam files from the list that have a bam file of the same name in the list\r\n input_files = [file_name for file_name in input_files if not\r\n (file_name.endswith('.sam') and file_name.replace('.sam','.bam') in input_files)]\r\n\r\n # Convert any sam files to bam files, sort, index and add the new file names to the input_files\r\n if convert_files and check_files:\r\n input_files = [file_name if file_name.endswith('.bam') else self.sam_to_bam(file_name) for file_name in input_files]\r\n\r\n #Finally, save the final list of input files after trimming and remove any .SAM files\r\n self.input_files = [x for x in input_files if not x.endswith('.sam')]\r\n\r\n # Check if every BAM files has an index\r\n if check_files:\r\n all_files = os.listdir(file_or_folder)\r\n for f in self.input_files:\r\n if f.replace('.bam', '') + '.bai' not in all_files:\r\n self.index_bam(f)\r\n\r\n # Check if every file can be opened and record genomes & lengths\r\n genome_lengths = {}\r\n removed_files = []\r\n for f in self.input_files:\r\n try:\r\n bamfile = pysam.AlignmentFile(f, 'rb')\r\n except Exception as e:\r\n sys.stderr.write('File {} could not be opened by pysam because...:\\n{}\\n'.format(f, e))\r\n sys.stderr.write('Removing {} from input list and continuing.\\n'.format(f))\r\n removed_files.append(f)\r\n continue\r\n\r\n for l, r in zip(bamfile.lengths, bamfile.references):\r\n genome_lengths[r] = l\r\n if not check_files:\r\n break\r\n\r\n removed_files = set(removed_files)\r\n self.input_files = [x for x in input_files if x not in removed_files]\r\n self.broken_files = removed_files\r\n self.genome_lengths = genome_lengths\r\n\r\n # Check to see if any files made it, if not, end and warn the user.\r\n print(self.input_files)\r\n if len(self.input_files) < 1:\r\n sys.stderr.write('No input files made it past screening, if this is my fault, use Sam_Reader(\\'my_files/\\', check_files=False, convert_riles=False)')\r\n\r\n def __str__(self):\r\n return \"{} BAM file(s): (use .input_files)\\n{} Organism(s)/Genome_Length {}\\n\".format(len(self.input_files), len(self.genome_lengths.keys()), str(self.genome_lengths))\r\n\r\n def remove_short_reads(self, new_dir = None, min_length = 50):\r\n \"\"\"\r\n #Probably will be absorbed into another def\r\n\r\n Reads in each bamfile and removes an reads less than min length and writes them to a new file\r\n :param min_length:\r\n :return:\r\n \"\"\"\r\n #TODO\r\n pass\r\n\r\n @staticmethod\r\n def sam_to_bam(infile, outdir = None):\r\n \"\"\"\r\n Converts a SAM file to a BAM file, sorts it, and Indexes it.\r\n :param infile: path to SAM file\r\n :param outdir: (optional) path to write BAM file to\r\n :return: path to new BAM file\r\n \"\"\"\r\n\r\n if infile.endswith('.sam'):\r\n # Changing the output file name and location\r\n bamfile = infile.replace('.sam', '.bam')\r\n if outdir:\r\n infile = infile.split('/')[-1].replace('.sam', '')\r\n bamfile = outdir + infile + '.bam'\r\n\r\n # These are the commands to be run, edit them here!\r\n convert_to_bam = [\"samtools\", \"view\", \"-bS\", infile]\r\n sort_bamfile = [\"samtools\", \"sort\", bamfile, bamfile.replace('.bam', '')]\r\n index_bamfile = [\"samtools\", \"index\", bamfile, bamfile.replace('.bam', '.bai')]\r\n\r\n sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))\r\n ret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(convert_to_bam)))\r\n return None\r\n ret_code = subprocess.call(sort_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(sort_bamfile)))\r\n return None\r\n ret_code = subprocess.call(index_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(index_bamfile)))\r\n return None\r\n\r\n return bamfile\r\n\r\n else:\r\n sys.stderr.write('File: \"{}\" does not end with .sam, cannot convert to .bam'.format(infile))\r\n return None\r\n\r\n @staticmethod\r\n def index_bam(infile):\r\n \"\"\"\r\n Only indexes a BAM file\r\n :param infile: path to BAM file\r\n :param outdir: (optional) path to write .bai file to\r\n :return: path to new .bai file\r\n \"\"\"\r\n\r\n if infile.lower().endswith('.sam'):\r\n sys.stderr.write('index_bam() was called on a SAM file, use sam_to_bam() instead to convert AND index')\r\n sys.exit(1)\r\n\r\n if not infile.lower().endswith('.bam'):\r\n sys.stderr.write('index_bam() was called on a non-BAM file. If this file is actually a BAM file, consider naming it correctly.')\r\n sys.exit(1)\r\n\r\n # These are the commands to be run, edit them here!\r\n index_bamfile = [\"samtools\", \"index\", infile, infile.replace('.bam', '.bai')]\r\n\r\n sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))\r\n ret_code = subprocess.call(index_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(index_bamfile)))\r\n return None\r\n return\r\n\r\n\r\n @staticmethod\r\n def read_counts(bam_file_name, n=50):\r\n\r\n bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)\r\n stats_dict = {} # {genome_name: [total_reads_mapped, reads > n base pairs long]}\r\n for read in bamfile.fetch():\r\n if not read.reference_name in stats_dict:\r\n stats_dict[read.reference_name] = [0, 0]# index 0 is count of all reads, index 1 is all reads > n length\r\n total_len = int(sum(read.get_cigar_stats()[0]))\r\n if total_len > n:\r\n stats_dict[read.reference_name][1] += 1\r\n stats_dict[read.reference_name][0] += 1\r\n if stats_dict == {}:\r\n return {'None': [0, 0]}\r\n return stats_dict\r\n\r\n def quick_percent_coverages(self, bam_file_name, organism=None, MIN_POSITIONAL_COVERAGE=1):\r\n \"\"\"\r\n Find the percent coverage of each organism in a single BAM file and returns a dictionary of\r\n {organism1: 0.1, organism2: 50.0, ..}\r\n :param bam_file_name: string\r\n :param organism: if this is specified, only this organism will be considered\r\n :param MIN_POSITIONAL_COVERAGE: does 1 read constitute coverage? if not, raise this number.\r\n :return: dict {org1: %cov1, org2: %cov2, ... }\r\n \"\"\"\r\n\r\n bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)\r\n\r\n # Loop over every read, and calculate coverage an organism if it's the first read found\r\n organism_coverage = {}\r\n for read in bamfile.fetch():\r\n genome_name = read.reference_name\r\n if genome_name in organism_coverage:\r\n # print('exists')\r\n continue\r\n if organism != None and organism != genome_name:\r\n # print('specified and not{}{}'.format(genome_name,organism))\r\n continue\r\n\r\n # Process one organism\r\n base_depth = []\r\n for p in bamfile.pileup(contig=genome_name):\r\n for pilups in p.pileups:\r\n if pilups.query_position:\r\n # Expand array while insert pos is out of list bounds\r\n if p.reference_pos >= len(base_depth):\r\n base_depth += [0] * (p.reference_pos - len(base_depth) + 1)\r\n # while p.reference_pos >= len(base_depth):\r\n # base_depth.append(0)\r\n base_depth[p.reference_pos] += 1\r\n if base_depth[p.reference_pos] > MIN_POSITIONAL_COVERAGE:\r\n continue\r\n\r\n bins_covered = len([x for x in base_depth if x > 0])\r\n organism_coverage[genome_name] = (bins_covered / self.genome_lengths[genome_name]) * 100\r\n if organism_coverage == {}:\r\n return {'None': 0}\r\n return organism_coverage\r\n\r\n def hits(self, **kwargs):\r\n \"\"\"\r\n File | Genome | Percent Coverage | Total Mapped Reads | Mapped Reads > 50 bp\r\n\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n # Setting Kwargs and defaults\r\n\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_read_len', 50)\r\n min_cov_depth = kwargs.get('min_coverage_depth', 1)\r\n\r\n header = ['file', 'genome', 'percent_coverage', 'total reads mapped', 'reads mapped > {} bp'.format(min_read_len)]\r\n results = []\r\n for f in self.input_files:\r\n # if a specific file is specified and this file isn't it, continue\r\n if only_this_file != None and f != only_this_file:\r\n continue\r\n f_coverages = self.quick_percent_coverages(f, organism, min_cov_depth)\r\n\r\n for genome, stats in Sam_Reader.read_counts(f, min_read_len).items():\r\n line = [f, genome, round(f_coverages.get(genome,0), 1), stats[0], stats[1]]\r\n results.append(line)\r\n\r\n if kwargs.get('write_file', False):\r\n if len(results) < 1:\r\n print(\"no results?\")\r\n return\r\n\r\n with open(kwargs['write_file'], 'w') as outfile:\r\n outfile.write('\\t'.join(header) + '\\n')\r\n for line in results:\r\n line = [str(x) for x in line]\r\n line = '\\t'.join(line)\r\n outfile.write(line + '\\n')\r\n return results\r\n\r\n def per_base_stats(self, **kwargs):\r\n \"\"\"\r\n Creates a 2d array of every position in the genome, the columns are:\r\n Position | Consensus | Percent | A | C | G | T | N | Gap\r\n --|--|--|--|--|--|--|--|--\r\n 0 | A | 90.0 | 900 | 83 | 8 | 4 | 5 | 0\r\n 1 | C | 100 | 0 | 870 | 0 | 0 | 0 | 0\r\n .. | .. | .. | .. | ..| .. | .. | .. | ..\r\n\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n # Setting Kwargs and defaults\r\n kwargs['write_file'] = kwargs.get('write_file', False)\r\n organism = kwargs.get('organism', None)\r\n file_name = kwargs.get('file_name', None)\r\n min_len = kwargs.get('min_read_len', 50)\r\n\r\n if organism == None and len(self.genome_lengths.keys()) > 1:\r\n sys.stderr.write(\"Available organism names are: {}\".format(', '.join(self.genome_lengths.keys())))\r\n # organism = input(\"\\n\\nOrganism name not specified for .per_base_stats(organism=...) and more than one organism is present,\\n\"+\r\n # \"Enter the name of an organism to analyze. (available names listed above):\\n\")\r\n organism = 'NC_000000.1'\r\n else:\r\n organism = list(self.genome_lengths.keys())[0]\r\n\r\n if organism == 'all':\r\n sys.stdout.write(\"All Organisms chosen, this could take a long time and a lot of memory. I hope you know what you are doing...\\n\")\r\n all_d = {}\r\n for organism in self.genome_lengths.keys():\r\n all_d[organism] = self.per_base_stats(organism=organism, write_file=kwargs['write_file'])\r\n return all_d\r\n\r\n # Initialize a list for every position in the genome, with an empty dictionary\r\n base_positions = [{\"A\": 0, \"C\": 0, \"G\": 0, \"T\": 0, \"N\": 0, \"Gap\": 0} for i in range(self.genome_lengths[organism])]\r\n is_empty = True\r\n # Loop over each file and add each base to the correct position in base_positions\r\n for f in self.input_files:\r\n try:\r\n # if a specific file is specified and this file isn't it, continue\r\n if file_name != None and f != file_name:\r\n continue\r\n\r\n bamfile = pysam.AlignmentFile(f, 'rb')\r\n for p in bamfile.pileup(contig=organism):\r\n for pilups in p.pileups:\r\n if pilups.query_position:\r\n bp = pilups.alignment.query_sequence[pilups.query_position]\r\n else:\r\n bp = '-'\r\n base_positions[p.reference_pos][bp] = base_positions[p.reference_pos].get(bp, 0) + 1\r\n is_empty = False\r\n except Exception as e:\r\n sys.stderr.write('{}\\nReading file: {} failed for Organism: {} -- skipping.\\n'.format(e, file_name, organism))\r\n continue\r\n\r\n if kwargs['write_file']:\r\n if is_empty:\r\n print('\\n\\nempty')\r\n with open(kwargs['write_file'] + organism + '.csv', 'w') as outfile:\r\n header = \"\\t\".join(['Position', 'Consensus', 'Percent', 'A', 'C', 'G', 'T', 'N', 'Gap\\n'])\r\n outfile.write(header)\r\n for index, pos_dict in enumerate(base_positions):\r\n consensus = max(pos_dict, key=pos_dict.get)\r\n try:\r\n percent = float(pos_dict[consensus]) / sum(list(pos_dict.values()))\r\n except:\r\n percent = 0.0\r\n line = [index, consensus, round(percent * 100, 2), pos_dict['A'], pos_dict['C'], pos_dict['G'],\r\n pos_dict['T'], pos_dict['N'], pos_dict['Gap']]\r\n line = [str(x) for x in line]\r\n line[-1] = line[-1] + '\\n'\r\n outfile.write('\\t'.join(line))\r\n\r\n return base_positions\r\n\r\n def reads(self, **kwargs):\r\n \"\"\"\r\n Yields 1 read at a time across all files.\r\n For a full list of things to do with yielded reads:\r\n http://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment\r\n :param kwargs: organism, min_read_len, only_this_file\r\n :return:\r\n \"\"\"\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_read_len', None)\r\n verb = kwargs.get('verbose', False)\r\n\r\n for bam_file_name in self.input_files:\r\n if only_this_file != None and bam_file_name != only_this_file:\r\n continue\r\n bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)\r\n if verb:\r\n print('Opening file: {}'.format(bam_file_name))\r\n for read in bamfile.fetch():\r\n if organism is not None and read.reference_name != organism:\r\n continue\r\n if min_read_len != None and read.infer_query_length() < min_read_len:\r\n continue\r\n yield read\r\n\r\n def cat(self, new_filename, **kwargs):\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_read_len', None)\r\n\r\n out = pysam.Samfile(new_filename, 'w', template=pysam.AlignmentFile(self.input_files[0]))\r\n for read in self.reads(min_len=min_read_len, organism=organism, only_this_file=only_this_file, verbose=True):\r\n if organism is not None and read.reference_name != organism:\r\n continue\r\n if min_read_len != None and read.infer_query_length() < min_read_len:\r\n continue\r\n out.write(read)\r\n\r\n if not new_filename.endswith('.sam'):\r\n new_filename = new_filename + '.sam'\r\n bamfile = new_filename.replace('.sam', '.bam')\r\n\r\n # These are the commands to be run, edit them here!\r\n convert_to_bam = [\"samtools\", \"view\", \"-bS\", new_filename]\r\n sort_bamfile = [\"samtools\", \"sort\", bamfile, bamfile.replace('.bam', '')]\r\n index_bamfile = [\"samtools\", \"index\", bamfile, bamfile.replace('.bam', '.bai')]\r\n\r\n sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))\r\n ret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(convert_to_bam)))\r\n return None\r\n ret_code = subprocess.call(sort_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(sort_bamfile)))\r\n return None\r\n ret_code = subprocess.call(index_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(index_bamfile)))\r\n return None\r\n\r\n\r\n def primers(self, primer_len, **kwargs):\r\n \"\"\"\r\n First: Creates a pileup of the whole genome using every BAM file, using .per_base_stats()\r\n Second: Calculates the rolling_scores (multiplied, not averaged) conservation of each bas in a window of size 'len'\r\n Third: Returns the most conserved\r\n :param kwargs: primer_len=int, organism=string, min_read_len=int\r\n :return:\r\n \"\"\"\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_read_len', None)\r\n write_file = kwargs.get('write_file', None)\r\n\r\n # PBS is a 2d array with the columns\r\n # Position | Consensus | Percent | A | C | G | T | N | Gap\r\n\r\n pbs = []\r\n\r\n for index, pos_dict in enumerate(self.per_base_stats(**kwargs)):\r\n consensus = max(pos_dict, key=pos_dict.get)\r\n try:\r\n percent = float(pos_dict[consensus]) / sum(list(pos_dict.values()))\r\n except:\r\n percent = 0.0\r\n line = [index, consensus, round(percent * 100, 2), pos_dict['A'], pos_dict['C'], pos_dict['G'],\r\n pos_dict['T'], pos_dict['N'], pos_dict['Gap']]\r\n line = [str(x) for x in line]\r\n line[-1] = line[-1] + '\\n'\r\n pbs.append(line)\r\n\r\n def score_array(my_list):\r\n \"\"\"\r\n This internal def will only be used to score each primer length based on conservation\r\n :param my_list: a list of ints\r\n :return: an int\r\n \"\"\"\r\n if len(my_list) < 1:\r\n return 0\r\n\r\n my_list = [float(x) for x in my_list]\r\n i = 1.0\r\n for j in my_list:\r\n i = i * j\r\n return i\r\n\r\n # part 2, calculate scores for all primers starting at [0 -> end-primer_len]\r\n conservations = [x[2] for x in pbs]\r\n rolling_scores = []\r\n for i in range(len(pbs) - primer_len):\r\n rolling_scores.append((i, score_array(conservations[i:i+primer_len])))\r\n\r\n rolling_scores.sort(key=operator.itemgetter(1))\r\n\r\n output = []\r\n # format FASTA output two lines at a time\r\n for i in range(100):\r\n score_tup = rolling_scores[i]\r\n least_cons_base = min([x[1] for x in pbs[score_tup[0] : score_tup[0] + primer_len]])\r\n seq = ''.join([pbs[score_tup[0]+j][1] for j in range(0,primer_len)])\r\n output.append('>start_position_{} [score={}][GC_content={}][least_conserved_base={}]'.format(score_tup[0], score_tup[1], seq.count('G') + seq.count('C'), least_cons_base))\r\n output.append(seq)\r\n\r\n if write_file:\r\n with open(write_file, 'r') as outfile:\r\n for line in output:\r\n outfile.write(line + '\\n')\r\n\r\n return output\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='')\r\n parser.add_argument('-i', '--input', help='Input File', required=True)\r\n parser.add_argument('-o', '--output', help='output directory')\r\n parser.add_argument('-n', help='Some Number', type=int)\r\n parser.add_argument('-v', help='Verbose', action='store_true')\r\n try:\r\n args = parser.parse_args()\r\n except:\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n data = SamReader(args.input)\r\n if not args.output:\r\n args.outpt=None\r\n\r\n data.per_base_stats(write_file=args.output)\r\n", "id": "6775108", "language": "Python", "matching_score": 8.425262451171875, "max_stars_count": 1, "path": "sam_reader.py" }, { "content": "import sys\r\nimport os\r\nimport argparse\r\nimport subprocess\r\nimport pysam\r\n\r\nclass Sam_Reader:\r\n\r\n def __init__(self, file_or_folder, **kwargs):\r\n \"\"\"\r\n Initialize with the path to a file or a folder. If a file is\r\n :param file_or_folder:\r\n \"\"\"\r\n convert = kwargs.get('convert', True)\r\n check_files = kwargs.get('check_files', True)\r\n\r\n # Generate a list of files in dir, and convert sam to bam\r\n if not os.path.isdir(file_or_folder):\r\n if file_or_folder.endswith('.sam'):\r\n file_or_folder = self.sam_to_bam(file_or_folder)\r\n input_files = [file_or_folder]\r\n else:\r\n if not file_or_folder.endswith('/'):\r\n file_or_folder = file_or_folder + '/'\r\n # Get the names of every SAM and BAM file in the input dir\r\n input_files = [file_or_folder + file_name for file_name in os.listdir(file_or_folder) if\r\n file_name.endswith(\".sam\") or file_name.endswith('.bam')]\r\n # Trim sam files from the list that have a bam file of the same name in the list\r\n input_files = [file_name for file_name in input_files if not\r\n (file_name.endswith('.sam') and file_name.replace('.sam','.bam') in input_files)]\r\n # Convert any sam files to bam files, sort, index and add the new file names to the input_files\r\n input_files = [file_name if file_name.endswith('.bam') else self.sam_to_bam(file_name) for file_name in input_files]\r\n self.input_files = input_files\r\n\r\n # Check if every BAM files has an index\r\n #TODO\r\n\r\n # Check if every file can be opened and record genomes & lengths\r\n genome_lengths = {}\r\n removed_files = []\r\n\r\n for f in self.input_files:\r\n try:\r\n bamfile = pysam.AlignmentFile(f, 'rb')\r\n except Exception as e:\r\n sys.stderr.write('File {} could not be opened by pysam because...:\\n{}\\n'.format(f, e))\r\n sys.stderr.write('Removing {} from input list and continuing.\\n'.format(f))\r\n removed_files.append(f)\r\n continue\r\n\r\n for l, r in zip(bamfile.lengths, bamfile.references):\r\n genome_lengths[r] = l\r\n if not check_files:\r\n break\r\n self.input_files = list(set(self.input_files)-set(removed_files))\r\n self.broken_files = removed_files\r\n self.genome_lengths = genome_lengths\r\n\r\n def __str__(self):\r\n return \"{} BAM file(s): (use .input_files)\\n{} Organism(s)/Genome_Length {}\\n\".format(len(self.input_files), len(self.genome_lengths.keys()), str(self.genome_lengths))\r\n\r\n def remove_short_reads(self, new_dir = None, min_length = 50):\r\n \"\"\"\r\n Reads in each bamfile and removes an reads less than min length and writes them to a new file\r\n :param min_length:\r\n :return:\r\n \"\"\"\r\n\r\n @staticmethod\r\n def sam_to_bam(infile, outdir = None):\r\n \"\"\"\r\n Converts a SAM file to a BAM file, sorts it, and Indexes it.\r\n :param infile: path to SAM file\r\n :param outdir: (optional) path to write BAM file to\r\n :return: path to new BAM file\r\n \"\"\"\r\n\r\n if infile.endswith('.sam'):\r\n # Changing the output file name and location\r\n bamfile = infile.replace('.sam', '.bam')\r\n if outdir:\r\n infile = infile.split('/')[-1].replace('.sam', '')\r\n bamfile = outdir + infile + '.bam'\r\n\r\n # These are the commands to be run, edit them here!\r\n convert_to_bam = [\"samtools\", \"view\", \"-bS\", infile]\r\n sort_bamfile = [\"samtools\", \"sort\", bamfile, bamfile.replace('.bam', '')]\r\n index_bamfile = [\"samtools\", \"index\", bamfile, bamfile.replace('.bam', '')]\r\n\r\n sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))\r\n ret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(convert_to_bam)))\r\n return None\r\n ret_code = subprocess.call(sort_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(sort_bamfile)))\r\n return None\r\n ret_code = subprocess.call(index_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(index_bamfile)))\r\n return None\r\n\r\n return bamfile\r\n\r\n else:\r\n sys.stderr.write('File: \"{}\" does not end with .sam, cannot convert to .bam'.format(infile))\r\n return None\r\n\r\n @staticmethod\r\n def read_counts(bam_file_name, n=50):\r\n\r\n bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)\r\n stats_dict = {} # {genome_name: [total_reads_mapped, reads > n base pairs long]}\r\n for read in bamfile.fetch():\r\n if not read.reference_name in stats_dict:\r\n stats_dict[read.reference_name] = [0, 0]# index 0 is count of all reads, index 1 is all reads > n length\r\n total_len = int(sum(read.get_cigar_stats()[0]))\r\n if total_len > n:\r\n stats_dict[read.reference_name][1] += 1\r\n stats_dict[read.reference_name][0] += 1\r\n if stats_dict == {}:\r\n return {'None': [0, 0]}\r\n return stats_dict\r\n\r\n def quick_percent_coverages(self, bam_file_name, organism=None, MIN_POSITIONAL_COVERAGE=1):\r\n bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)\r\n\r\n # Loop over every read, and calculate coverage an organism if it's the first read found\r\n organism_coverage = {}\r\n for read in bamfile.fetch():\r\n genome_name = read.reference_name\r\n if genome_name in organism_coverage:\r\n # print('exists')\r\n continue\r\n if organism != None and organism != genome_name:\r\n # print('specified and not{}{}'.format(genome_name,organism))\r\n continue\r\n\r\n # Process one organism\r\n base_depth = []\r\n for p in bamfile.pileup(contig=genome_name):\r\n for pilups in p.pileups:\r\n if pilups.query_position:\r\n # Expand array while insert pos is out of list bounds\r\n if p.reference_pos >= len(base_depth):\r\n base_depth += [0] * (p.reference_pos - len(base_depth) + 1)\r\n # while p.reference_pos >= len(base_depth):\r\n # base_depth.append(0)\r\n base_depth[p.reference_pos] += 1\r\n if base_depth[p.reference_pos] > MIN_POSITIONAL_COVERAGE:\r\n continue\r\n\r\n bins_covered = len([x for x in base_depth if x > 0])\r\n organism_coverage[genome_name] = (bins_covered / self.genome_lengths[genome_name]) * 100\r\n if organism_coverage == {}:\r\n return {'None': 0}\r\n return organism_coverage\r\n\r\n def hits(self, **kwargs):\r\n \"\"\"\r\n File | Genome | Percent Coverage | Total Mapped Reads | Mapped Reads > 50 bp\r\n\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n # Setting Kwargs and defaults\r\n\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_read_length', 50)\r\n min_cov_depth = kwargs.get('min_coverage_depth', 1)\r\n\r\n header = ['file', 'genome', 'percent_coverage', 'total reads mapped', 'reads mapped > {} bp'.format(min_read_len)]\r\n results = []\r\n for f in self.input_files:\r\n # if a specific file is specified and this file isn't it, continue\r\n if only_this_file != None and f != only_this_file:\r\n continue\r\n f_coverages = self.quick_percent_coverages(f, organism, min_cov_depth)\r\n\r\n for genome, stats in Sam_Reader.read_counts(f, min_read_len).items():\r\n line = [f, genome, round(f_coverages.get(genome,0), 1), stats[0], stats[1]]\r\n results.append(line)\r\n\r\n if kwargs.get('write_file', False):\r\n if len(results) < 1:\r\n print(\"no results?\")\r\n return\r\n\r\n with open(kwargs['write_file'], 'w') as outfile:\r\n outfile.write('\\t'.join(header) + '\\n')\r\n for line in results:\r\n line = [str(x) for x in line]\r\n line = '\\t'.join(line)\r\n outfile.write(line + '\\n')\r\n return results\r\n\r\n def per_base_stats(self, **kwargs):\r\n \"\"\"\r\n\r\n :param kwargs:\r\n :return:\r\n \"\"\"\r\n # Setting Kwargs and defaults\r\n kwargs['write_file'] = kwargs.get('write_file', False)\r\n organism = kwargs.get('organism', None)\r\n file_name = kwargs.get('file_name', None)\r\n min_len = kwargs.get('min_len', 50)\r\n\r\n if organism == None and len(self.genome_lengths.keys()) > 1:\r\n sys.stderr.write(\"Organism name not specified for per_base_stats and more than one organism is present,\\n\"\r\n \"Available organism names are: {}\".format(', '.join(self.genome_lengths.keys())))\r\n organism = input(\"\\n\\nOrganism name not specified for .per_base_stats(organism=...) and more than one organism is present,\\n\"+\r\n \"Enter the name of an organism to analyze. (available names listed above):\\n\")\r\n\r\n else:\r\n organism = list(self.genome_lengths.keys())[0]\r\n\r\n if organism == 'all':\r\n sys.stdout.write(\"All Organisms chosen, this could take a long time and a lot of memory. I hope you know what you are doing...\\n\")\r\n all_d = {}\r\n for organism in self.genome_lengths.keys():\r\n all_d[organism] = self.per_base_stats(organism=organism, write_file=kwargs['write_file'])\r\n return all_d\r\n\r\n # Initialize a list for every position in the genome, with an empty dictionary\r\n base_positions = [{\"A\": 0, \"C\": 0, \"G\": 0, \"T\": 0, \"N\": 0, \"Gap\": 0} for i in range(self.genome_lengths[organism])]\r\n empty = True\r\n # Loop over each file and add each base to the correct position in base_positions\r\n for f in self.input_files:\r\n try:\r\n # if a specific file is specified and this file isn't it, continue\r\n if file_name != None and f != file_name:\r\n continue\r\n\r\n bamfile = pysam.AlignmentFile(f, 'rb')\r\n for p in bamfile.pileup(contig=organism):\r\n for pilups in p.pileups:\r\n if pilups.query_position:\r\n bp = pilups.alignment.query_sequence[pilups.query_position]\r\n else:\r\n bp = '-'\r\n base_positions[p.reference_pos][bp] = base_positions[p.reference_pos].get(bp, 0) + 1\r\n empty = False\r\n except Exception as e:\r\n sys.stderr.write('{}\\nReading file: {} failed for Organism: {} -- skipping.\\n'.format(e, file_name, organism))\r\n continue\r\n\r\n if kwargs['write_file']:\r\n if empty:\r\n print('\\n\\nempty')\r\n with open(kwargs['write_file'] + organism + '.csv', 'w') as outfile:\r\n header = \"\\t\".join(['Position', 'Consensus', 'Percent', 'A', 'C', 'G', 'T', 'N', 'Gap\\n'])\r\n outfile.write(header)\r\n for index, pos_dict in enumerate(base_positions):\r\n consensus = max(pos_dict, key=pos_dict.get)\r\n try:\r\n percent = float(pos_dict[consensus]) / sum(list(pos_dict.values()))\r\n except:\r\n percent = 0.0\r\n line = [index, consensus, round(percent * 100, 2), pos_dict['A'], pos_dict['C'], pos_dict['G'],\r\n pos_dict['T'], pos_dict['N'], pos_dict['Gap']]\r\n line = [str(x) for x in line]\r\n line[-1] = line[-1] + '\\n'\r\n outfile.write('\\t'.join(line))\r\n\r\n return base_positions\r\n\r\n def reads(self, **kwargs):\r\n \"\"\"\r\n For a full list of things to do with yielded reads:\r\n http://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment\r\n :param kwargs: organism, min_read_len, only_this_file\r\n :return:\r\n \"\"\"\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_len', None)\r\n verb = kwargs.get('verbose', False)\r\n\r\n for bam_file_name in self.input_files:\r\n if only_this_file != None and bam_file_name != only_this_file:\r\n continue\r\n bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)\r\n if verb:\r\n print('Opening file: {}'.format(bam_file_name))\r\n for read in bamfile.fetch():\r\n if organism is not None and read.reference_name != organism:\r\n continue\r\n if min_read_len != None and read.infer_query_length() < min_read_len:\r\n continue\r\n yield read\r\n\r\n def write_reads(self, new_filename, **kwargs):\r\n organism = kwargs.get('organism', None)\r\n only_this_file = kwargs.get('file_name', None)\r\n min_read_len = kwargs.get('min_len', None)\r\n\r\n out = pysam.Samfile(new_filename, 'w', template=pysam.AlignmentFile(self.input_files[0]))\r\n for read in self.reads(min_len=30, organism=organism, only_this_file=only_this_file, verbose=True):\r\n out.write(read)\r\n\r\n if not new_filename.endswith('.sam'):\r\n new_filename = new_filename + '.sam'\r\n bamfile = new_filename.replace('.sam', '.bam')\r\n\r\n # These are the commands to be run, edit them here!\r\n convert_to_bam = [\"samtools\", \"view\", \"-bS\", new_filename]\r\n sort_bamfile = [\"samtools\", \"sort\", bamfile, bamfile.replace('.bam', '')]\r\n index_bamfile = [\"samtools\", \"index\", bamfile, bamfile.replace('.bam', '.bai')]\r\n\r\n sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))\r\n ret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(convert_to_bam)))\r\n return None\r\n ret_code = subprocess.call(sort_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(sort_bamfile)))\r\n return None\r\n ret_code = subprocess.call(index_bamfile)\r\n if ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(index_bamfile)))\r\n return None\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='')\r\n parser.add_argument('-i', '--input', help='Input File', required=True)\r\n parser.add_argument('-o', '--output', help='output directory')\r\n parser.add_argument('-n', help='Some Number', type=int)\r\n parser.add_argument('-v', help='Verbose', action='store_true')\r\n try:\r\n args = parser.parse_args()\r\n except:\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n data = Sam_Reader(args.input)\r\n if not args.output:\r\n args.outpt=None\r\n\r\n data.per_base_stats(write_file=args.output)\r\n", "id": "5842617", "language": "Python", "matching_score": 0.8537791967391968, "max_stars_count": 0, "path": "bin/sam_stats.py" }, { "content": "import numpy as np\nfrom numpy import vstack # VSTACK((a,b)) stacks B on TOP of A\nimport seaborn as sns; sns.set()\nimport matplotlib.pyplot as plt\nimport math\nimport argparse\nimport sys\n\n\ndef csv_to_numpy(csv_file):\n \"\"\"\n Reads in a CSV file and returns a numpy array\n :param csv_file: String, the location and name of the CSV file\n :return: a Numpy array the dimensions of the CSV\n \"\"\"\n with open(csv_file, 'r') as infile:\n lines = infile.readlines()\n stack = None\n for line in lines:\n newline = []\n for x in [int(y) for y in line.split(',')]:\n if x <= 0:\n newline.append(0)\n else:\n newline.append(math.log(x, 10))\n x = np.array(newline)\n if not stack:\n stack = x\n else:\n try:\n stack = vstack((stack, x)) # Double (( )) on purpose\n except Exception as e:\n if False:\n print('Error: cannot vstack len:{}'.format(len(x)))\n return stack\n\n\ndef sort_numpy_array(numpy_array):\n \"\"\"\n Sorts a 2d Numpy array by the sum of each row (highest sum is at the top)\n :param numpy_array: A 2d numpy array\n :return: a sorted 2d numpy array\n \"\"\"\n numpy_array = numpy_array.tolist()\n numpy_array.sort(key=sum, reverse=True)\n numpy_array = np.array(numpy_array)\n return numpy_array\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Reads in a CSV file and outputs a basic heatmap of the data')\n parser.add_argument('-i', '--input', help='The location of the CSV file to be made into a heatmap', required=True)\n parser.add_argument('-o', '--output', help='File name of the figure', required=True)\n\n try:\n args = parser.parse_args()\n except:\n parser.print_help()\n sys.exit(0)\n\n csv_array = csv_to_numpy(args.input)\n csv_array = sort_numpy_array(csv_array)\n\n # This is where you can customize your figure! --------\n # Documentation for the seaborn heatmap can be found here:\n # http://seaborn.pydata.org/generated/seaborn.heatmap.html\n # Feel free to change this to suit your needs, csv_array is a 2d numpy array\n # of the data to be plotted, and most graphing modules can read numpy arrays\n ax = sns.heatmap(csv_array, xticklabels=[], yticklabels=[])\n\n\n\n\n # plt.show() # Uncommenting this line will show the figure before saving\n plt.savefig(args.output)\n\n\n\n\n\n\n", "id": "12105085", "language": "Python", "matching_score": 0.5635267496109009, "max_stars_count": 0, "path": "bin/csv_to_heatmap.py" }, { "content": "import argparse\nimport sys\nimport operator\nfrom functools import reduce\nfrom conserved_regions_csv import bam_base_distribution\n\n\ndef score(conservation):\n \"\"\"\n This is the scoring function for sequences, right now it is just the multiplied\n conservation of every base in the sequence. Change this function for custom scoring.\n \"\"\"\n return reduce(lambda x, y: x*y, conservation)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-i', '--input', help='Directory containing bamfiles.')\n parser.add_argument('-l', '--length', help='The length of the sequence to be found.', required=True, type=int)\n parser.add_argument('-n', '--num-seqs', help='The top N sequences you want returned. Default is 50 sequences', type = int)\n parser.add_argument('-d', '--depth', help='Exclude sites with less than -d coverage depth. Default is 10', type = int)\n parser.add_argument('-s', '--start', help='Exclude sites that start before the -s value. Default is 0', type = int)\n parser.add_argument('-e', '--end', help='Exclude sites that start after the -e value. Default is 0', type = int)\n\n try:\n args = parser.parse_args()\n except:\n parser.print_help()\n sys.exit(1)\n if not args.start:\n args.start = 0\n\n if not args.num_seqs:\n args.num_seqs = 50\n if not args.depth:\n args.depth = 10\n\n base_lists = bam_base_distribution(args.input)\n conservation_list = [float(max(base_list.values()))/sum(base_list.values()) if sum(base_list.values()) > 0 else 0 for base_list in base_lists]\n depth_list = [sum(base_list.values()) for base_list in base_lists]\n consensus_genome = [max(base_list.iteritems(), key=operator.itemgetter(1))[0] if len(base_list) > 0 else \"-\" for base_list in base_lists]\n score_list = []\n if not args.end:\n args.end = len(depth_list)\n for i in range(args.start, args.end - args.length):\n if min(depth_list[i:i+args.length]) < args.depth:\n score_list.append(-1)\n else:\n score_list.append(score(conservation_list[i:i+args.length]))\n\n # sorted_scores is a sorted list of tuples [(.999, 100), (.998, 504)...]\n # The first value is the score, the second is the original position in the list (1 less than the genome position)\n sorted_scores = sorted(((e, i) for i, e in enumerate(score_list)), reverse=True)\n\n for i in range(0, args.num_seqs):\n loc = sorted_scores[i][1]\n print(\">Position_{0} [multiplied_conservation={1}] [avg_coverage_depth={2}]\\n{3}\\n\".format(loc, sorted_scores[i][0], sum(depth_list[loc:loc+args.length])/args.length, ''.join(consensus_genome[loc:loc+args.length])))\n\n\n", "id": "60867", "language": "Python", "matching_score": 1.6974077224731445, "max_stars_count": 0, "path": "bin/target_sites.py" }, { "content": "import sys\r\nimport pysam\r\nimport argparse\r\nimport os\r\nimport time\r\n\r\ndef genome_coverage_dict(infile):\r\n \"\"\"\r\n Creates a dictionary where keys are organism names, and values are the percent coverage of the genome from 0 to 1\r\n :param infile: Sorted and indexed BAM file\r\n :return: dictionary\r\n \"\"\"\r\n if not infile.endswith('.bam'):\r\n sys.stderr.write('{} does not end with .bam - skipping\\n'.format(infile))\r\n return\r\n try:\r\n bamfile = pysam.AlignmentFile(infile, 'rb')\r\n except Exception as e:\r\n sys.stderr.write(infile + ' Could not be read\\n')\r\n sys.stderr.write(str(e))\r\n return\r\n\r\n # Get a dict of genome lengths from the header\r\n genome_lengths = {}\r\n for l, r in zip(bamfile.lengths, bamfile.references):\r\n genome_lengths[r] = l\r\n\r\n # Loop over every read, and calculate coverage an organism if it's the first read found\r\n organism_coverage = {}\r\n for read in bamfile.fetch():\r\n genome_name = read.reference_name\r\n if genome_name in organism_coverage:\r\n continue\r\n\r\n #Process one genome\r\n base_depth = []\r\n for p in bamfile.pileup(contig=genome_name):\r\n for pilups in p.pileups:\r\n if pilups.query_position:\r\n #Expand array while insert pos is out of list bounds\r\n if p.reference_pos >= len(base_depth):\r\n while p.reference_pos >= len(base_depth):\r\n base_depth.append(0)\r\n base_depth[p.reference_pos] += 1\r\n\r\n bins_covered = len([x for x in base_depth if x > 0])\r\n organism_coverage[genome_name] = bins_covered/genome_lengths[genome_name]\r\n return organism_coverage\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(\r\n description='Writes to standard out \"Genome_Name\\tPercent_Coverage\" for each genome in a BAM file')\r\n parser.add_argument('-i', '--input', help='Name of the input .bam file to be read', required=True)\r\n parser.add_argument('-r', '--dir', help='Add this flag if input is a directory of BAM files', action='store_true')\r\n\r\n try:\r\n args = parser.parse_args()\r\n except:\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n if not args.dir:\r\n x = genome_coverage_dict(args.input)\r\n if x:\r\n for k,v in x.items():\r\n sys.stdout.write('\\t'.join([k,str(v),'\\n']))\r\n else:\r\n sys.stdout.write('Everything worked with this script but the BAM file is empty :(')\r\n else:\r\n all_files = {}\r\n for fname in os.listdir(args.input):\r\n if fname.endswith('.bai'):\r\n continue\r\n all_files[fname] = genome_coverage_dict(args.input + fname)\r\n for fname, dict in all_files.items():\r\n sys.stdout.write(fname + '\\n')\r\n if dict:\r\n for k,v in dict.items():\r\n sys.stdout.write('\\t'.join(['', k, str(v), '\\n']))\r\n else:\r\n sys.stdout.write('\\tEmpty\\n')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "id": "11737771", "language": "Python", "matching_score": 1.5153262615203857, "max_stars_count": 0, "path": "bin/genome_coverage.py" }, { "content": "import sys\r\nimport os\r\nimport argparse\r\nfrom sam_stats import Sam_Reader\r\nimport pysam\r\nimport subprocess\r\n\r\nmy_files = Sam_Reader('../Input/BAM_Files/', check_files=False)\r\nout = pysam.Samfile('001895.1.sam', 'w', template=pysam.AlignmentFile(my_files.input_files[0]))\r\nfor read in my_files.reads(min_len=30, organism='NC_001895.1', verbose=True):\r\n out.write(read)\r\nnew_filename = '001895.1.sam'\r\n\r\nif not new_filename.endswith('.sam'):\r\n new_filename = new_filename + '.sam'\r\nbamfile = new_filename.replace('.sam', '.bam')\r\nconvert_to_bam = [\"samtools\", \"view\", \"-bS\", new_filename]\r\nsort_bamfile = [\"samtools\", \"sort\", bamfile, bamfile.replace('.bam', '')]\r\nindex_bamfile = [\"samtools\", \"index\", bamfile, bamfile.replace('.bam', '.bai')]\r\n\r\nsys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(new_filename))\r\nret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))\r\nif ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(convert_to_bam)))\r\nret_code = subprocess.call(sort_bamfile)\r\nif ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(sort_bamfile)))\r\nret_code = subprocess.call(index_bamfile)\r\nif ret_code != 0:\r\n sys.stderr.write(\"Error running command \\\"{}\\\"\\n\".format(' '.join(index_bamfile)))\r\n", "id": "9805161", "language": "Python", "matching_score": 1.5264443159103394, "max_stars_count": 0, "path": "bin/remove_short_reads.py" }, { "content": "import sys\nimport subprocess\n\n\n#This python script will test if all of the required programs are installed\nsys.stdout.write(\"Testing to see if required programs are installed.\\nNote: This does not test all aspects\"\n \" of each program, only that it is installed and setup with PATH a variable.\\n\"\n \"More on PATH variables here: http://www.linfo.org/path_env_var.html\\n\")\ntry:\n import pysam\n sys.stdout.write('Pysam\\tOK\\n')\nexcept:\n sys.stdout.write('Pysam\\tFailed\\n')\n\ntry:\n import bs4\n sys.stdout.write('BS4(optional)\\tOK\\n')\nexcept:\n sys.stdout.write('BS4(optional)\\tFailed\\n')\n\nproc = subprocess.Popen('fastq-dump --help', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nwhile proc.poll() is None:\n commandResult = proc.wait()\nif commandResult is 0:\n sys.stdout.write('SRA Toolkit\\tOK\\n')\nelse:\n sys.stdout.write('SRA Toolkit\\tFailed\\n')\n\nproc = subprocess.Popen('bowtie2 --help', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nwhile proc.poll() is None:\n commandResult = proc.wait()\nif commandResult is 0:\n sys.stdout.write('Bowtie 2\\tOK\\n')\nelse:\n sys.stdout.write('Bowtie 2\\tFailed\\n')\n\nproc = subprocess.Popen('which samtools', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\nwhile proc.poll() is None:\n commandResult = proc.wait()\nif commandResult is 0:\n sys.stdout.write('Samtools\\tOK\\n')\nelse:\n sys.stdout.write('Samtools\\tFailed\\n')\n", "id": "10218239", "language": "Python", "matching_score": 0.5142741203308105, "max_stars_count": 0, "path": "bin/test_requirements.py" }, { "content": "from setuptools import setup\r\n\r\nsetup(name='sam_reader',\r\n version='0.1b2',\r\n python_version='3+',\r\n author='<NAME>',\r\n description='A package for reading many SAM/BAM files at once using Pysam',\r\n author_email='<EMAIL>',\r\n install_requires=['pysam'],\r\n url='https://github.com/KyleLevi/SAM_Reader ')", "id": "5762826", "language": "Python", "matching_score": 0.17972661554813385, "max_stars_count": 1, "path": "setup.py" } ]
1.184553
korda
[ { "content": "import argparse\n\nfrom gitlab_api_client import GitlabApi\nfrom user_config import get_gitlab_api_client\nfrom subprocess import check_call\n\n\ndef create_project_action(main_args, progname: str):\n gitlab_instance = main_args.gitlab_instance\n\n create_project_parser = argparse.ArgumentParser(description='Create new project',\n prog=f'{progname} gitlab_instance create')\n create_project_parser.add_argument('path',\n help='path of project to create')\n\n args = create_project_parser.parse_args(main_args.args)\n\n gitlab_api_client = get_gitlab_api_client(gitlab_instance)\n\n __create_project(gitlab_api_client, args.path)\n\n\ndef __create_project(gitlab_api_client: GitlabApi, path: str):\n #gitlab_api_client.create_project(path)\n groups = path.split(\"/\")\n path = groups.pop()\n\n group = gitlab_api_client.get_namespace(\"/\".join(groups))\n\n repo = gitlab_api_client.create_project(path, group['id'])\n\n print(f\"Created repo with url {repo['ssh_url_to_repo']}\")\n print(f\"Gitlab link: {repo['web_url']}\")\n\n check_call(args=['git', 'remote', 'add', 'origin', repo['ssh_url_to_repo']])\n\n # git remote add github <EMAIL>@github.com:Unity-Group/hipchat-download-emoji.git\n", "id": "2496956", "language": "Python", "matching_score": 3.0628058910369873, "max_stars_count": 13, "path": "create_project.py" }, { "content": "import argparse\n\nfrom open_project import open_project_action\nfrom create_project import create_project_action\nfrom user_config import config_location\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Gitlab client.')\n parser.add_argument('gitlab_instance', help='you can have multiple gitlab instances to connect to, this argument '\n 'is required to determine which one to use. if instance is not '\n 'configured you will be asked to provide configuration with prompt. '\n f'configurations are saved in in file {config_location}.')\n parser.add_argument('action', help='action to execute', choices=['open', 'create'])\n parser.add_argument('args', nargs=argparse.REMAINDER)\n\n main_args = parser.parse_args()\n\n if main_args.action == 'open':\n open_project_action(main_args, parser.prog)\n elif main_args.action == 'create':\n create_project_action(main_args, parser.prog)\n else:\n print(f\"Unsupported action {main_args.action}\")\n quit(1)\n", "id": "2870241", "language": "Python", "matching_score": 1.7946069240570068, "max_stars_count": 13, "path": "__main__.py" }, { "content": "import argparse\nfrom os.path import isdir\nfrom pathlib import Path\nfrom subprocess import check_call\n\nfrom curses_select import select_option\nfrom gitlab_api_client import GitlabApi\nfrom user_config import get_gitlab_api_client\nfrom user_config import get_project_dir_location\n\n\ndef open_project_action(main_args, progname: str):\n gitlab_instance = main_args.gitlab_instance\n\n open_project_parser = argparse.ArgumentParser(description='Open project action',\n prog=f'{progname} gitlab_instance open')\n open_project_parser.add_argument('--save-dir-to', dest='saveDirTo',\n help='dir path with checked out project will be stored in order to use in '\n 'bash function to this location')\n open_project_parser.add_argument('--search', dest='search', nargs='?', const='', type=str, default='', required=False,\n help='search phrase to narrow projects list')\n\n args = open_project_parser.parse_args(main_args.args)\n\n gitlab_api_client = get_gitlab_api_client(gitlab_instance)\n project_dir = get_project_dir_location()\n\n checkout_dir = __open_project(gitlab_instance, gitlab_api_client, project_dir, args.search)\n\n if args.saveDirTo:\n Path(args.saveDirTo).write_text(checkout_dir + \"\\n\")\n\n\ndef __open_project(gitlab_instance: str, gitlab_api_client: GitlabApi, project_dir: str, search: str):\n projects = [project[\"path_with_namespace\"] for project in gitlab_api_client.projects(search)]\n\n projects.sort()\n\n selected_project = select_option(projects)\n\n if not selected_project:\n print(\"No project selected!\")\n quit(1)\n else:\n print(f\"Selected {selected_project}\")\n\n checkout_dir = str(Path(project_dir) / gitlab_instance / selected_project)\n\n if not isdir(checkout_dir):\n git_repo_address = gitlab_api_client.repo_url(selected_project)\n print(f\"Checking out project from {git_repo_address}\")\n check_call(args=['git', 'clone', git_repo_address, checkout_dir])\n\n return checkout_dir\n", "id": "11449143", "language": "Python", "matching_score": 3.2904653549194336, "max_stars_count": 13, "path": "open_project.py" }, { "content": "import json\nfrom os.path import isdir\nfrom os import mkdir\nfrom pathlib import Path\n\nfrom gitlab_api_client import GitlabApi\n\nconfig_location = Path.home() / \".gitlab-client.json\"\n\n\ndef get_gitlab_api_client(gitlab_instance) -> GitlabApi:\n config = __get_gitlab_instance_config(gitlab_instance)\n return GitlabApi(config[\"url\"], config[\"token\"], config[\"checkout_url\"])\n\n\ndef get_project_dir_location() -> str:\n config = __load_config()\n project_dir_key = 'project_dir'\n\n if project_dir_key not in config:\n provided_dir = input(\"Please provide directory for project checkout: \").lstrip().rstrip().rstrip(\"/\")\n if not provided_dir.startswith(\"/\"):\n provided_dir = Path.home() / provided_dir\n else:\n provided_dir = Path(provided_dir)\n\n print(f\"Saving {provided_dir} as project checkout directory...\")\n config[project_dir_key] = str(provided_dir.absolute())\n __save_config(config)\n\n if not isdir(config[project_dir_key]):\n mkdir(config[project_dir_key])\n\n return config[project_dir_key]\n\n\ndef __get_gitlab_instance_config(gitlab_instance):\n config = __load_config()\n gitlab_instances_key = 'gitlab_instances'\n\n if gitlab_instances_key not in config:\n config[gitlab_instances_key] = {}\n\n if gitlab_instance not in config[gitlab_instances_key]:\n provided_url = input(\"Please provide url to gitlab: \").lstrip().rstrip().rstrip(\"/\")\n provided_token = input(\"Please provide access token to gitlab: \").lstrip().rstrip()\n config[gitlab_instances_key][gitlab_instance] = {\n \"url\": provided_url,\n \"token\": provided_token\n }\n __save_config(config)\n\n if \"checkout_url\" not in config[gitlab_instances_key][gitlab_instance]:\n default_url = config[gitlab_instances_key][gitlab_instance][\"url\"]\n default_url = default_url.replace(\"https://\", \"\").replace(\"http://\", \"\")\n default_url = f\"ssh://git@{default_url}\"\n\n checkout_url = input(f\"Please provide url base for checkout [{default_url}]: \").lstrip().rstrip().rstrip(\"/\")\n if not checkout_url:\n checkout_url = default_url\n\n config[gitlab_instances_key][gitlab_instance][\"checkout_url\"] = checkout_url\n __save_config(config)\n\n return config[gitlab_instances_key][gitlab_instance]\n\n\ndef __save_config(config):\n config_location.write_text(json.dumps(config, indent=4) + \"\\n\")\n\n\ndef __load_config():\n __ensure_config_file_exists()\n return json.loads(config_location.read_text())\n\n\ndef __ensure_config_file_exists():\n if not config_location.is_file():\n config_location.write_text('{}\\n')\n", "id": "11465826", "language": "Python", "matching_score": 2.169586420059204, "max_stars_count": 13, "path": "user_config.py" }, { "content": "import json\nimport urllib.parse\nimport urllib.request\nimport urllib.error\n\n\nclass GitlabApi:\n def __init__(self, url: str, token: str, checkout_url: str):\n self.__url = url\n self.__token = token\n self.__checkout_url = checkout_url\n self.__api_root = '/api/v4'\n\n def repo_url(self, path: str) -> str:\n return self.__checkout_url + '/' + path.lstrip('/')\n\n def get_namespace(self, namespace: str):\n return self.__call('GET', f\"namespaces/{urllib.parse.quote(namespace, safe='')}\")\n\n def create_project(self, path: str, namespace_id):\n return self.__call('POST', \"projects\", path=path, namespace_id=namespace_id, visibility=\"private\")\n\n def projects(self, search: str):\n if search:\n print(\"Searching projects containing %s... This make take few moments.\" % search)\n else:\n print(\"Getting list of all projects... This make take few moments.\")\n projects = []\n per_page = 100\n current_page = 1\n\n while True:\n current_page_projects = self.__call('GET', \"projects\",\n per_page=per_page,\n page=current_page,\n simple=True,\n archived=False,\n search=search\n )\n\n current_page += 1\n projects += current_page_projects\n\n print(f\"{len(projects)} projects retrieved so far...\")\n\n if len(current_page_projects) == 0 or len(current_page_projects) < per_page:\n break\n\n return projects\n\n def __call(self, method, api_path, **kwargs):\n query = ''\n if kwargs:\n query = '?' + urllib.parse.urlencode(kwargs)\n\n request = urllib.request.Request(self.__url + self.__api_root + \"/\" + api_path.lstrip(\"/\") + query)\n request.add_header('Private-Token', self.__token)\n request.method = method\n\n return json.loads(urllib.request.urlopen(request).read())\n", "id": "242781", "language": "Python", "matching_score": 0.38965776562690735, "max_stars_count": 13, "path": "gitlab_api_client.py" }, { "content": "import time\nimport math\nimport string\nfrom curses import wrapper\nimport curses\n\n\ndef select_option(options_list):\n return wrapper(_select_option, options_list)\n\n\ndef _select_option(stdscr, options_list):\n stdscr.clear()\n\n select_size = 50\n size_of_room_above_search = 2\n selected_x = 0\n selected_y = 0\n\n old_scr_height = None\n old_scr_width = None\n\n search_string = ''\n filtered_options_list = list(_filter_list(options_list, search_string))\n old_filtered_options_list = filtered_options_list.copy()\n\n options_grid = None\n row_height = None\n last_col_row_height = None\n col_width = None\n\n while True:\n scr_height, scr_width = stdscr.getmaxyx()\n reset_necessary = False\n\n if scr_height != old_scr_height or scr_width != old_scr_width:\n old_scr_width = scr_width\n old_scr_height = scr_height\n reset_necessary = True\n\n if filtered_options_list != old_filtered_options_list:\n old_filtered_options_list = filtered_options_list.copy()\n reset_necessary = True\n\n if reset_necessary:\n options_grid = list(_split_list(filtered_options_list, max(1, math.floor(scr_width / select_size))))\n\n row_height = len(options_grid[0])\n last_col_row_height = len(options_grid[len(options_grid)-1])\n col_width = len(options_grid)\n\n selected_x = 0\n selected_y = 0\n\n y_offset = min(max((row_height+size_of_room_above_search)-scr_height, 0), selected_y)\n\n stdscr.erase()\n for x in range(0, col_width):\n for y in range(0, len(options_grid[x])): # ostatnia ma inny rozmiar więc zawsze sprawdzamy długość kolumny\n draw_y = y - y_offset + size_of_room_above_search\n\n if draw_y >= scr_height or draw_y < size_of_room_above_search:\n continue\n elif selected_x == x and selected_y == y:\n stdscr.addnstr(draw_y, x*select_size, options_grid[x][y], select_size-2, curses.A_REVERSE)\n else:\n stdscr.addnstr(draw_y, x*select_size, options_grid[x][y], select_size-2)\n\n stdscr.addnstr(0, 0, search_string, scr_width-2)\n stdscr.refresh()\n\n char = _get_char(stdscr)\n\n if char == curses.KEY_RIGHT:\n selected_x = (selected_x+1) % col_width\n if char == curses.KEY_LEFT:\n selected_x = (selected_x-1) % col_width\n if char == curses.KEY_DOWN:\n if selected_x == col_width-1:\n selected_y = (selected_y+1) % last_col_row_height\n else:\n selected_y = (selected_y+1) % row_height\n if char == curses.KEY_UP:\n if selected_x == col_width-1:\n selected_y = (selected_y-1) % last_col_row_height\n else:\n selected_y = (selected_y-1) % row_height\n if char == curses.KEY_HOME:\n selected_x = 0\n selected_y = 0\n if char == curses.KEY_END:\n selected_x = col_width-1\n selected_y = row_height-1\n\n if selected_x == col_width-1 and selected_y >= last_col_row_height:\n selected_y = last_col_row_height-1\n\n if char == ord('\\n'):\n if filtered_options_list:\n return options_grid[selected_x][selected_y]\n else:\n return None\n\n elif chr(char) in string.printable:\n search_string += chr(char)\n filtered_options_list = list(_filter_list(options_list, search_string))\n elif char == curses.KEY_BACKSPACE:\n search_string = search_string[:-1]\n filtered_options_list = list(_filter_list(options_list, search_string))\n\n\ndef _filter_list(l, search):\n for s in l:\n if not search or search.lower() in s.lower():\n yield s\n\n\ndef _get_char(stdscr):\n stdscr.nodelay(1)\n while True:\n char = stdscr.getch()\n\n if char != -1:\n return char\n\n time.sleep(0.01)\n\n\ndef _split_list(l, n):\n col_size = math.ceil(len(l) / n)\n return _chunks(l, col_size)\n\n\ndef _chunks(l, n):\n if len(l) > 1:\n for i in range(0, len(l), n):\n yield l[i:i + n]\n else:\n yield l\n", "id": "11157800", "language": "Python", "matching_score": 0.11279661953449249, "max_stars_count": 13, "path": "curses_select.py" } ]
1.982097
yangzilongdmgy
[ { "content": "from torch import nn as nn\nfrom mmdet.models import BACKBONES\nimport time\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom ..model_utils import ModulatedDeformConvBlock\nfrom torchplus.tools import change_default_args\nfrom torchplus.nn.modules.common import Sequential\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.cnn import build_norm_layer\nfrom torchvision.models import resnet\nfrom mmcv.cnn import (build_norm_layer, build_upsample_layer, constant_init,\n is_norm, kaiming_init)\n\[email protected]_module()\nclass SECONDFPNDCN(nn.Module):\n def __init__(self,\n in_channels=128,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n out_channels=(256, 256, 256),\n use_dcn=True):\n \"\"\"upsample_strides support float: [0.25, 0.5, 1]\n if upsample_strides < 1, conv2d will be used instead of convtranspose2d.\n \"\"\"\n super(SECONDFPNDCN, self).__init__()\n self.layer_strides = layer_strides\n self.num_filters = num_filters\n self.layer_nums = layer_nums\n self.upsample_strides = upsample_strides\n self.num_upsample_filters = out_channels\n self.num_input_features = in_channels\n self.use_dcn=use_dcn\n\n\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(out_channels) == len(upsample_strides)\n self.upsample_start_idx = len(layer_nums) - len(upsample_strides)\n\n must_equal_list = []\n for i in range(len(upsample_strides)):\n must_equal_list.append(upsample_strides[i] / np.prod(\n layer_strides[:i + self.upsample_start_idx + 1]))\n for val in must_equal_list:\n assert val == must_equal_list[0]\n\n BatchNorm2d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n ConvTranspose2d = change_default_args(bias=False)(nn.ConvTranspose2d)\n self.activation_fcn=change_default_args(negative_slope=0.01,inplace=True)(nn.LeakyReLU)\n\n in_filters = [in_channels, *num_filters[:-1]]\n\n blocks = []\n deblocks = []\n\n for i, layer_num in enumerate(layer_nums):\n block, num_out_filters = self._make_layer(\n in_filters[i],\n num_filters[i],\n layer_num,\n stride=layer_strides[i])\n blocks.append(block)\n if i - self.upsample_start_idx >= 0:\n stride = upsample_strides[i - self.upsample_start_idx]\n stride = np.round(stride).astype(np.int64)\n if self.use_dcn:\n deblock=Sequential(ModulatedDeformConvBlock(num_out_filters,num_out_filters,act_fn=self.activation_fcn))\n deblock.add(ConvTranspose2d(num_out_filters,\n self.num_upsample_filters[i - self.upsample_start_idx],\n stride,\n stride=stride),)\n deblock.add(BatchNorm2d(self.num_upsample_filters[i -self.upsample_start_idx]))\n deblock.add(self.activation_fcn())\n else:\n deblock=Sequential(ConvTranspose2d(num_out_filters,\n self.num_upsample_filters[i - self.upsample_start_idx],\n stride,\n stride=stride),\n BatchNorm2d(self.num_upsample_filters[i -self.upsample_start_idx]),\n self.activation_fcn())\n\n deblocks.append(deblock)\n self.blocks = nn.ModuleList(blocks)\n self.deblocks = nn.ModuleList(deblocks)\n\n\n\n\n def _make_layer(self, inplanes, planes, num_blocks, stride=1):\n\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n Conv2d = change_default_args(bias=False)(nn.Conv2d)\n\n block = Sequential(\n Conv2d(inplanes, planes, 3, padding=1,stride=stride),\n BatchNorm2d(planes),\n self.activation_fcn())\n\n for j in range(num_blocks):\n block.add(Conv2d(planes, planes, 3, padding=1,dilation=1))\n block.add(BatchNorm2d(planes))\n block.add(self.activation_fcn())\n\n return block, planes\n\n\n\n @property\n def downsample_factor(self):\n factor = np.prod(self.layer_strides)\n if len(self.upsample_strides) > 0:\n factor /= self.upsample_strides[-1]\n return factor\n\n def forward(self, x):\n ups = []\n stage_outputs = []\n out=[]\n for i in range(len(self.blocks)):\n x = self.blocks[i](x)\n stage_outputs.append(x)\n if i - self.upsample_start_idx >= 0:\n ups.append(self.deblocks[i - self.upsample_start_idx](x))\n out.append(ups[-1])\n else:\n out.append(stage_outputs[-1])\n\n\n x = [torch.cat(out, dim=1)]\n\n\n return x\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize weights of the 2D backbone.\"\"\"\n # Do not initialize the conv layers\n # to follow the original implementation\n # for m in self.modules():\n # if isinstance(m, nn.Conv2d):\n # kaiming_init(m)\n # elif isinstance(m, nn.BatchNorm2d):\n # constant_init(m, 1)\n\n if isinstance(pretrained, str):\n from mmdet3d.utils import get_root_logger\n logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False, logger=logger)\n\n", "id": "4887146", "language": "Python", "matching_score": 2.680328130722046, "max_stars_count": 91, "path": "mmdet3d/models/backbones/second_fpn_dcn.py" }, { "content": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport torch\nimport torch.nn as nn\n\nfrom mmcv.ops import ModulatedDeformConv2d as ModulatedDeformConv2d\nfrom mmcv.ops import DeformConv2d as DeformConv2d\nfrom mmcv.cnn import build_norm_layer\n\nclass DeformConvBlock(nn.Module):\n def __init__(self, chi, cho,activation=\"relu\"):\n super(DeformConvBlock, self).__init__()\n\n self.actf = nn.Sequential(\n nn.BatchNorm2d(cho,eps=1e-3, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n if activation==\"lrelu\":\n self.actf = nn.Sequential(\n nn.BatchNorm2d(cho, eps=1e-3, momentum=0.01),\n nn.LeakyReLU(0.1,inplace=True))\n self.conv = DeformConvWithOff(chi, cho,\n kernel_size=3, deformable_groups=1,)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.actf(x)\n return x\n\nclass ModulatedDeformConvBlock(nn.Module):\n def __init__(self, chi, cho,act_fn):\n super(ModulatedDeformConvBlock, self).__init__()\n self.norm_cfg= dict(type='BN2d', eps=1e-3, momentum=0.01)\n self.actf = nn.Sequential(\n build_norm_layer(self.norm_cfg,cho)[1],\n act_fn()\n )\n self.conv = ModulatedDeformConvWithOff(chi, cho,\n kernel_size=3, deformable_groups=1,)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.actf(x)\n return x\n\n\n\nclass DeformConvWithOff(nn.Module):\n\n def __init__(self, in_channels, out_channels,\n kernel_size=3, stride=1, padding=1,\n dilation=1, deformable_groups=1):\n super(DeformConvWithOff, self).__init__()\n self.offset_conv = nn.Conv2d(\n in_channels,\n deformable_groups * 2 * kernel_size * kernel_size,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n )\n self.dcn = DeformConv2d(\n in_channels, out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n deformable_groups=deformable_groups,\n )\n\n def forward(self, input):\n offset = self.offset_conv(input)\n output = self.dcn(input, offset)\n return output\n\n\nclass ModulatedDeformConvWithOff(nn.Module):\n\n def __init__(self, in_channels, out_channels,\n kernel_size=3, stride=1, padding=1,\n dilation=1, deformable_groups=1):\n super(ModulatedDeformConvWithOff, self).__init__()\n self.offset_mask_conv = nn.Conv2d(\n in_channels,\n deformable_groups * 3 * kernel_size * kernel_size,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n )\n self.dcnv2 = ModulatedDeformConv2d(\n in_channels, out_channels, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation,\n deformable_groups=deformable_groups,\n )\n\n self.init_offset()\n\n def init_offset(self):\n self.offset_mask_conv.weight.data.zero_()\n self.offset_mask_conv.bias.data.zero_()\n\n def forward(self, input):\n x = self.offset_mask_conv(input)\n o1, o2, mask = torch.chunk(x, 3, dim=1)\n offset = torch.cat((o1, o2), dim=1)\n mask = torch.sigmoid(mask)\n output = self.dcnv2(input, offset, mask)\n return output\n", "id": "352606", "language": "Python", "matching_score": 2.015192985534668, "max_stars_count": 91, "path": "mmdet3d/models/model_utils/deform_conv_layers.py" }, { "content": "from mmdet3d.ops import spconv\nfrom torch import nn\nimport torch\nfrom ..builder import MIDDLE_ENCODERS\nfrom mmdet3d.ops import three_interpolate, three_nn\nfrom torchplus.tools import change_default_args\nfrom ..losses.center_loss import weighted_sigmoid_focal_loss,weighted_smoothl1\n\nBatchNorm1d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\nSpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\nSubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\n\n\n@MIDDLE_ENCODERS.register_module()\nclass SparseEncoder_AUX(nn.Module):\n def __init__(self,in_channels=4,sparse_shape=[40, 1600, 1408],out_channels=64):\n\n super(SparseEncoder_AUX, self).__init__()\n\n\n self.sparse_shape = sparse_shape\n print(self.sparse_shape)\n self.backbone = VxNet(in_channels,out_channels)\n self.num_input_features=in_channels\n\n def forward(self, voxel_features, coors, batch_size):\n\n points_mean=voxel_features.new_zeros((voxel_features.shape[0],4))\n points_mean[:, 0] = coors[:, 0]\n points_mean[:, 1:] = voxel_features[:, :3]\n\n\n coors = coors.int()\n x = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape, batch_size)\n x, point_misc = self.backbone(x, points_mean)\n\n x = x.dense()\n N, C, D, H, W = x.shape\n x = x.view(N, C * D, H, W)\n\n return x, point_misc\n\n\n def build_aux_target(self, points, gt_boxes3d):\n center_offsets = []\n pts_labels = []\n for i in range(len(gt_boxes3d)):\n boxes3d = gt_boxes3d[i]\n boxes3d = boxes3d.to(points.device)\n idx = torch.nonzero(points[:, 0] == i)[:,0]\n xyz = points[idx, 1:]\n # print(\"xyz shape\",xyz.shape)\n cls_label = xyz.new_zeros(xyz.shape[0])\n reg_label = xyz.new_zeros((xyz.shape[0], 3))\n points_box_id = boxes3d.points_in_boxes(xyz).long()\n\n for j in range(boxes3d.tensor.shape[0]):\n fg_pts_rect = xyz[points_box_id == j]\n cls_label[points_box_id == j] = 1\n center3d = boxes3d.gravity_center[j:j+1] # (x, y, z)\n # idx_box=torch.nonzero(points_box_id==j)\n reg_label[points_box_id == j] = center3d - fg_pts_rect\n\n # import mayavi.mlab as mlab\n # from mmdet.datasets.kitti_utils import draw_lidar, draw_gt_boxes3d\n # f = draw_lidar((new_xyz).numpy(), show=False)\n # pts = new_xyz[pts_label].numpy()\n # mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], color=(1, 1, 1), scale_factor=0.25, figure=f)\n # f = draw_gt_boxes3d(center_to_corner_box3d(boxes3d.numpy()), f, draw_text=False, show=True)\n\n pts_labels.append(cls_label)\n center_offsets.append(reg_label)\n\n center_offsets = torch.cat(center_offsets)\n pts_labels = torch.cat(pts_labels)\n\n return pts_labels, center_offsets\n\n def aux_loss(self, points, point_cls, point_reg, gt_bboxes):\n\n N = len(gt_bboxes)\n\n pts_labels, center_targets = self.build_aux_target(points, gt_bboxes)\n\n rpn_cls_target = pts_labels.float()\n pos = (pts_labels > 0).float()\n neg = (pts_labels == 0).float()\n\n pos_normalizer = pos.sum()\n pos_normalizer = torch.clamp(pos_normalizer, min=1.0)\n\n cls_weights = pos + neg\n cls_weights = cls_weights / pos_normalizer\n\n reg_weights = pos\n reg_weights = reg_weights / pos_normalizer\n\n aux_loss_cls = weighted_sigmoid_focal_loss(point_cls.view(-1), rpn_cls_target, weight=cls_weights, avg_factor=1.)\n aux_loss_cls /= N\n\n aux_loss_reg = weighted_smoothl1(point_reg, center_targets, beta=1 / 9., weight=reg_weights[..., None], avg_factor=1.)\n aux_loss_reg /= N\n\n return dict(\n aux_loss_cls = aux_loss_cls,\n aux_loss_reg = aux_loss_reg,\n )\n\nclass VxNet(nn.Module):\n\n def __init__(self, num_input_features,num_out_features=64):\n super(VxNet, self).__init__()\n #[40,1600,1408]\n\n\n self.activation_fcn=change_default_args(negative_slope=0.01,inplace=True)(nn.LeakyReLU)\n\n self.extra_conv = spconv.SparseSequential(\n SpConv3d(64, 64, (3,1,1), (2,1,1), indice_key=\"down3\"),\n BatchNorm1d(64),\n self.activation_fcn())\n\n\n self.conv0 = double_conv(num_input_features, 16, 'subm0',activation=self.activation_fcn)\n self.down0 = stride_conv(16, 32, 'down0',activation=self.activation_fcn)\n\n self.conv1 = double_conv(32, 32, 'subm1',activation=self.activation_fcn) #[20,800,704]\n self.down1 = stride_conv(32, 64, 'down1',activation=self.activation_fcn)\n\n self.conv2 = triple_conv(64, 64, 'subm2',activation=self.activation_fcn) #[10,400,352]\n self.down2 = stride_conv(64, 64, 'down2',activation=self.activation_fcn)\n\n self.conv3 = triple_conv(64, 64, 'subm3',activation=self.activation_fcn) # #[5,200,176]\n\n\n self.point_fc = nn.Linear(160, 64, bias=False)\n self.point_cls = nn.Linear(64, 1, bias=False)\n self.point_reg = nn.Linear(64, 3, bias=False)\n\n\n def forward(self, x, points_mean):\n\n x = self.conv0(x)\n x = self.down0(x) # sp\n x = self.conv1(x) # 2x sub\n\n if self.training:\n # 根据体素的gridmap坐标计算体素在点云空间中对应的点坐标\n vx_feat, vx_nxyz = tensor2points(x, voxel_size=(.1, .1, .2))\n # 根据降采样后每个体素中心的xyz坐标计算得到全部体素xyz均值处的特征\n p1 = nearest_neighbor_interpolate(points_mean, vx_nxyz, vx_feat)\n\n\n x = self.down1(x)\n x = self.conv2(x)\n\n if self.training:\n vx_feat, vx_nxyz = tensor2points(x, voxel_size=(.2, .2, .4))\n p2 = nearest_neighbor_interpolate(points_mean, vx_nxyz, vx_feat)\n\n x = self.down2(x)\n x = self.conv3(x)\n\n if self.training:\n vx_feat, vx_nxyz = tensor2points(x, voxel_size=(.4, .4, .8))\n p3 = nearest_neighbor_interpolate(points_mean, vx_nxyz, vx_feat)\n\n out = self.extra_conv(x)\n\n if not self.training:\n return out, None\n\n pointwise = self.point_fc(torch.cat([p1, p2, p3], dim=-1))\n point_cls = self.point_cls(pointwise)\n point_reg = self.point_reg(pointwise)\n return out, (points_mean, point_cls, point_reg)\n\n\n\n\ndef single_conv(in_channels, out_channels, indice_key=None,activation=None):\n return spconv.SparseSequential(\n SubMConv3d(in_channels, out_channels, 1, indice_key=indice_key),\n BatchNorm1d(out_channels),\n activation())\n\n\ndef double_conv(in_channels, out_channels, indice_key=None,activation=None):\n return spconv.SparseSequential(\n SubMConv3d(in_channels, out_channels,3,indice_key=indice_key),\n BatchNorm1d(out_channels),\n activation(),\n SubMConv3d(out_channels, out_channels, 3,indice_key=indice_key),\n BatchNorm1d(out_channels),\n activation())\n\n\n\ndef triple_conv(in_channels, out_channels, indice_key=None,activation=None):\n return spconv.SparseSequential(\n SubMConv3d(in_channels, out_channels, 3, indice_key=indice_key),\n BatchNorm1d(out_channels),\n activation(),\n SubMConv3d(out_channels, out_channels, 3, indice_key=indice_key),\n BatchNorm1d(out_channels),\n activation(),\n SubMConv3d(out_channels, out_channels, 3, indice_key=indice_key),\n BatchNorm1d(out_channels,),\n activation(),\n )\n\ndef stride_conv(in_channels, out_channels, indice_key=None,activation=None):\n\n return spconv.SparseSequential(\n SpConv3d(in_channels, out_channels, 3, 2, padding=1, indice_key=indice_key),\n BatchNorm1d(out_channels),\n activation())\n\ndef nearest_neighbor_interpolate(unknown, known, known_feats):\n \"\"\"\n :param unknown: (n, 4) 每个体素的xyz均值\n :param known: (m, 4) 体素中心对应的xyz坐标\n :param known_feats: (m, C) 体素的特征\n :return:\n new_features: (n, C) tensor of the features of the unknown features\n \"\"\"\n unknown=unknown.unsqueeze(0)\n known=known.unsqueeze(0)\n known_feats=known_feats.unsqueeze(0).permute(0,2,1).contiguous()\n dist, idx = three_nn(unknown, known)\n dist_recip = 1.0 / (dist + 1e-8)\n norm = torch.sum(dist_recip, dim=1, keepdim=True)\n weight = dist_recip / norm\n interpolated_feats = three_interpolate(known_feats, idx, weight)\n interpolated_feats=interpolated_feats.permute(0,2,1).contiguous().squeeze(0)\n return interpolated_feats\n\n\ndef tensor2points(tensor, offset=(0., -40., -3.), voxel_size=(.05, .05, .1)):\n #根据体素的坐标计算体素在点云空间中对应的点坐标\n indices = tensor.indices.float() #coordinate\n offset = torch.Tensor(offset).to(indices.device)\n voxel_size = torch.Tensor(voxel_size).to(indices.device)\n indices[:, 1:] = indices[:, [3, 2, 1]] * voxel_size + offset + .5 * voxel_size\n return tensor.features, indices", "id": "4966609", "language": "Python", "matching_score": 6.312091827392578, "max_stars_count": 0, "path": "mmdet3d/models/middle_encoders/sparse_encoder_aux.py" }, { "content": "import time\n\nimport numpy as np\n# import spconv\nfrom mmdet3d.ops import spconv\nimport torch\nfrom torch import nn\nfrom torchplus.tools import change_default_args\n# from mmdet3d.models.registry import MIDDLE_ENCODERS\nfrom ..builder import MIDDLE_ENCODERS\nfrom .sparse_encoder_aux import single_conv, double_conv, stride_conv, triple_conv\nfrom torchplus.nn.modules.common import Sequential\n\nBatchNorm1d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm1d)\nSpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)\nSubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)\nBatchNorm2d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\nConv2d = change_default_args(bias=False)(nn.Conv2d)\n\n\n@MIDDLE_ENCODERS.register_module()\nclass SparseEncoderV2(nn.Module):\n\n def __init__(self,\n sparse_shape,\n in_channels=128,\n out_channels=128,\n name='SparseEncoderV2'):\n super(SparseEncoderV2, self).__init__()\n self.name = name\n\n print(\"input sparse shape is \", sparse_shape)\n self.sparse_shape = sparse_shape\n self.voxel_output_shape = sparse_shape\n\n self.activation_fcn = change_default_args(\n negative_slope=0.01, inplace=True)(\n nn.LeakyReLU)\n # input: # [1600, 1200, 40]\n self.conv0 = double_conv(\n in_channels, 16, 'subm0', activation=self.activation_fcn)\n self.down0 = stride_conv(\n 16, 32, 'down0', activation=self.activation_fcn)\n\n self.conv1 = double_conv(\n 32, 32, 'subm1', activation=self.activation_fcn) # [20,800,704]\n self.down1 = stride_conv(\n 32, 64, 'down1', activation=self.activation_fcn)\n\n self.conv2 = triple_conv(\n 64, 64, 'subm2', activation=self.activation_fcn) # [10,400,352]\n self.down2 = stride_conv(\n 64, 64, 'down2', activation=self.activation_fcn)\n\n self.conv3 = triple_conv(\n 64, 64, 'subm3', activation=self.activation_fcn) # [5,200,176]\n\n self.down3 = spconv.SparseSequential(\n SpConv3d(64, 64, (3, 1, 1), (2, 1, 1), indice_key=\"down3\"),\n BatchNorm1d(64), self.activation_fcn()) # [5,200,176]\n\n # self.down2extra = spconv.SparseSequential(\n # SpConv3d(64, 64, (3, 1, 1), (2, 1, 1), indice_key=\"down2extra\"),\n # BatchNorm1d(64),\n # self.activation_fcn())\n #\n # self.conv2d= Sequential(\n # Conv2d(256, 128, 3, padding=1,stride=1),\n # BatchNorm2d(128),\n # self.activation_fcn())\n\n def forward(self, voxel_features, coors, batch_size):\n # coors[:, 1] += 1\n coors = coors.int()\n x = spconv.SparseConvTensor(voxel_features, coors, self.sparse_shape,\n batch_size)\n # t = time.time()\n x0 = self.conv0(x)\n x0 = self.down0(x0)\n\n x1 = self.conv1(x0)\n x1 = self.down1(x1)\n x2 = self.conv2(x1)\n # xconv2=spconv.SparseConvTensor(x2.features.clone(),x2.indices,x2.spatial_shape,x2.batch_size)\n # xconv2.indice_dict=x2.indice_dict\n # xconv2.grid=x2.grid\n # xconv2=self.down2extra(xconv2)\n # xconv2=xconv2.dense()\n # N1,C1,D1,H1,W1=xconv2.shape\n # xconv2=xconv2.view(N1,C1*D1,H1,W1)\n # xconv2=self.conv2d(xconv2)\n\n x2 = self.down2(x2)\n x3 = self.conv3(x2)\n x3 = self.down3(x3)\n\n ret = x3.dense()\n\n N, C, D, H, W = ret.shape\n ret = ret.view(N, C * D, H, W)\n\n # print(\"ret shape\",ret.shape)\n # print(\"xconv2\",xconv2.shape)\n return ret\n", "id": "12808400", "language": "Python", "matching_score": 0.6245912313461304, "max_stars_count": 0, "path": "mmdet3d/models/middle_encoders/sparse_encoderv2.py" }, { "content": "import torch\n\n\ndef distance2bbox(points, distance, max_shape=None):\n \"\"\"Decode distance prediction to bounding box.\n\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n distance (Tensor): Distance from the given point to 4\n boundaries (left, top, right, bottom).\n max_shape (tuple): Shape of the image.\n\n Returns:\n Tensor: Decoded bboxes.\n \"\"\"\n x1 = points[:, 0] - distance[:, 0]\n y1 = points[:, 1] - distance[:, 1]\n x2 = points[:, 0] + distance[:, 2]\n y2 = points[:, 1] + distance[:, 3]\n if max_shape is not None:\n x1 = x1.clamp(min=0, max=max_shape[1])\n y1 = y1.clamp(min=0, max=max_shape[0])\n x2 = x2.clamp(min=0, max=max_shape[1])\n y2 = y2.clamp(min=0, max=max_shape[0])\n return torch.stack([x1, y1, x2, y2], -1)\n\n\ndef bbox2distance(points, bbox, max_dis=None, eps=0.1):\n \"\"\"Decode bounding box based on distances.\n\n Args:\n points (Tensor): Shape (n, 2), [x, y].\n bbox (Tensor): Shape (n, 4), \"xyxy\" format\n max_dis (float): Upper bound of the distance.\n eps (float): a small value to ensure target < max_dis, instead <=\n\n Returns:\n Tensor: Decoded distances.\n \"\"\"\n left = points[:, 0] - bbox[:, 0]\n top = points[:, 1] - bbox[:, 1]\n right = bbox[:, 2] - points[:, 0]\n bottom = bbox[:, 3] - points[:, 1]\n if max_dis is not None:\n left = left.clamp(min=0, max=max_dis - eps)\n top = top.clamp(min=0, max=max_dis - eps)\n right = right.clamp(min=0, max=max_dis - eps)\n bottom = bottom.clamp(min=0, max=max_dis - eps)\n return torch.stack([left, top, right, bottom], -1)\n", "id": "6881853", "language": "Python", "matching_score": 1.0986453294754028, "max_stars_count": 9, "path": "mmdet3d/models/utils/box_transform.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .clip_sigmoid import clip_sigmoid\nfrom .mlp import MLP\nfrom .box_transform import bbox2distance, distance2bbox\nfrom .misc import images_to_levels, multi_apply\nfrom .visualization import overlay_bbox_cv\n\n__all__ = [\n 'clip_sigmoid', 'MLP', 'bbox2distance', 'distance2bbox',\n 'images_to_levels', 'multi_apply', 'overlay_bbox_cv'\n]\n", "id": "7094562", "language": "Python", "matching_score": 0.2978377640247345, "max_stars_count": 0, "path": "mmdet3d/models/utils/__init__.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg, print_log\n\nfrom .collect_env import collect_env\nfrom .logger import get_root_logger\nfrom .draw_tools import draw_gt_boxes3d, draw_lidar, draw_projected_boxes3d\n\n__all__ = [\n 'Registry', 'build_from_cfg', 'get_root_logger', 'collect_env',\n 'print_log', 'draw_gt_boxes3d', 'draw_lidar', 'draw_projected_boxes3d'\n]\n", "id": "8946631", "language": "Python", "matching_score": 0.5410939455032349, "max_stars_count": 0, "path": "mmdet3d/utils/__init__.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nfrom collections import OrderedDict\nfrom os import path as osp\n\nfrom mmdet3d.core import show_multi_modality_result, show_result\nfrom mmdet3d.core.bbox import DepthInstance3DBoxes\nfrom mmdet.core import eval_map\nfrom mmdet.datasets import DATASETS\n\nfrom mmdet3d.datasets.sunrgbd_dataset import SUNRGBDDataset\nfrom .custom_3d import Custom3DDataset\nfrom .pipelines import Compose\n\n\[email protected]_module()\nclass MyDataset(SUNRGBDDataset):\n\n def __init__(self,\n data_root,\n ann_file,\n pipeline=None,\n classes=None,\n modality=...,\n box_type_3d='Depth',\n filter_empty_gt=True,\n test_mode=False):\n super().__init__(\n data_root,\n ann_file,\n pipeline=pipeline,\n classes=classes,\n modality=modality,\n box_type_3d=box_type_3d,\n filter_empty_gt=filter_empty_gt,\n test_mode=test_mode)\n", "id": "3288489", "language": "Python", "matching_score": 0.8302521705627441, "max_stars_count": 0, "path": "mmdet3d/datasets/sunrgbd_pcd_only_dataset.py" }, { "content": "# Copyright 2021 RangiLyu.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nfrom pycocotools.coco import COCO\n\nfrom .base import BaseDataset\nfrom PIL import Image\n\n\nclass CocoDataset(BaseDataset):\n\n def get_data_info(self, ann_path):\n self.coco_api = COCO(ann_path)\n self.cat_ids = sorted(self.coco_api.getCatIds())\n self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n self.cats = self.coco_api.loadCats(self.cat_ids)\n self.img_ids = sorted(self.coco_api.imgs.keys())\n img_info = self.coco_api.loadImgs(self.img_ids)\n return img_info\n\n def get_per_img_info(self, idx):\n img_info = self.data_info[idx]\n file_name = img_info[\"file_name\"]\n height = img_info[\"height\"]\n width = img_info[\"width\"]\n id = img_info[\"id\"]\n if not isinstance(id, int):\n raise TypeError(\"Image id must be int.\")\n info = {\n \"file_name\": file_name,\n \"height\": height,\n \"width\": width,\n \"id\": id\n }\n return info\n\n def get_img_annotation(self, idx):\n \"\"\"\n load per image annotation\n :param idx: index in dataloader\n :return: annotation dict\n \"\"\"\n img_id = self.img_ids[idx]\n ann_ids = self.coco_api.getAnnIds([img_id])\n anns = self.coco_api.loadAnns(ann_ids)\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n if self.use_instance_mask:\n gt_masks = []\n if self.use_keypoint:\n gt_keypoints = []\n for ann in anns:\n if ann.get(\"ignore\", False):\n continue\n x1, y1, w, h = ann[\"bbox\"]\n if ann[\"area\"] <= 0 or w < 1 or h < 1:\n continue\n if ann[\"category_id\"] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get(\"iscrowd\", False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann[\"category_id\"]])\n if self.use_instance_mask:\n gt_masks.append(self.coco_api.annToMask(ann))\n if self.use_keypoint:\n gt_keypoints.append(ann[\"keypoints\"])\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n annotation = dict(\n bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)\n if self.use_instance_mask:\n annotation[\"masks\"] = gt_masks\n if self.use_keypoint:\n if gt_keypoints:\n annotation[\"keypoints\"] = np.array(\n gt_keypoints, dtype=np.float32)\n else:\n annotation[\"keypoints\"] = np.zeros((0, 51), dtype=np.float32)\n return annotation\n\n def get_train_data(self, idx):\n #### load original nanodet for box\n ann = self.get_img_annotation(idx)\n img_info = self.get_per_img_info(idx)\n file_name = img_info[\"file_name\"]\n image_path = os.path.join(self.img_path, file_name)\n img = cv2.imread(image_path)\n if img is None:\n print(\"image {} read failed.\".format(image_path))\n raise FileNotFoundError(\n \"Cant load image! Please check image path!\")\n meta = dict(\n img=img,\n img_info=img_info,\n gt_bboxes=ann[\"bboxes\"],\n gt_labels=ann[\"labels\"])\n\n ### load semantic stuff\n image_path = os.path.join(self.sem_img_path, file_name[:-3] + 'png')\n img = np.array(Image.open(image_path))\n if img is None:\n print(\"semantic image {} read failed.\".format(image_path))\n raise FileNotFoundError(\n \"Cant load semantic image! Please check semantic image path!\")\n\n img[img == 0] = 255\n img = img + 79\n img[img == 78] = 255\n\n ### load semantic thing individual\n n = len(ann['masks'])\n for i in range(n):\n img[ann['masks'][i] == 1] = ann[\"labels\"][i]\n\n meta[\"img_semantic_stuff\"] = img\n\n ### process images\n meta = self.pipeline(meta, self.input_size)\n meta[\"img\"] = torch.from_numpy(meta[\"img\"].transpose(\n 2, 0, 1)) #h,w,c to c,h,w\n meta[\"img_semantic_stuff\"] = torch.from_numpy(\n meta[\"img_semantic_stuff\"]).unsqueeze(0).to(\n dtype=torch.float32) #h,w\n\n # ##### check image\n # unique_items = np.unique(meta[\"img_semantic_stuff\"])\n # print('Stuff unique', unique_items)\n\n # fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(18, 10))\n # for line in axs:\n # for a in line:\n # a.axis('off')\n # i = 0\n # ax = axs[i // 2, i % 2]\n # pic1 = ax.imshow(meta[\"img_semantic_stuff\"].numpy())\n # plt.colorbar(pic1)\n\n # i = 1\n # ax = axs[i // 2, i % 2]\n # ax.imshow(meta[\"img\"].numpy()[0,:,:])\n\n # # i = 2\n # # ax = axs[i // 2, i % 2]\n # # pic2 = ax.imshow(meta[\"img_semantic_thing\"].numpy())\n # # plt.colorbar(pic2)\n\n # fig.tight_layout()\n # fig.savefig('check_stuff.png')\n # ######################\n\n return meta\n\n def get_val_data(self, idx):\n \"\"\"\n Currently no difference from get_train_data.\n Not support TTA(testing time augmentation) yet.\n :param idx:\n :return:\n \"\"\"\n # TODO: support TTA\n return self.get_train_data(idx)\n\n\n# from .CoCo2017_CatLabelNameColor_StuffThing_Separated import info_separated_dict\n# from .file_io import PathManager\n# import json\n\n# def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):\n# \"\"\"\n# Args:\n# image_dir (str): path to the raw dataset. e.g., \"~/coco/train2017\".\n# gt_dir (str): path to the raw annotations. e.g., \"~/coco/panoptic_train2017\".\n# json_file (str): path to the json file. e.g., \"~/coco/annotations/panoptic_train2017.json\".\n\n# Returns:\n# list[dict]: a list of dicts in Detectron2 standard format. (See\n# `Using Custom Datasets </tutorials/datasets.html>`_ )\n# \"\"\"\n\n# def _convert_category_id(segment_info, meta):\n# if segment_info[\"category_id\"] in meta[\"thing_dataset_id_to_contiguous_id\"]:\n# segment_info[\"category_id\"] = meta[\"thing_dataset_id_to_contiguous_id\"][\n# segment_info[\"category_id\"]\n# ]\n# segment_info[\"isthing\"] = True\n# else:\n# segment_info[\"category_id\"] = meta[\"stuff_dataset_id_to_contiguous_id\"][\n# segment_info[\"category_id\"]\n# ]\n# segment_info[\"isthing\"] = False\n# return segment_info\n\n# with PathManager.open(json_file) as f:\n# json_info = json.load(f)\n\n# ret = []\n# for ann in json_info[\"annotations\"]:\n# image_id = int(ann[\"image_id\"])\n# # TODO: currently we assume image and label has the same filename but\n# # different extension, and images have extension \".jpg\" for COCO. Need\n# # to make image extension a user-provided argument if we extend this\n# # function to support other COCO-like datasets.\n# image_file = os.path.join(image_dir, os.path.splitext(ann[\"file_name\"])[0] + \".jpg\")\n# label_file = os.path.join(gt_dir, ann[\"file_name\"])\n# segments_info = [_convert_category_id(x, meta) for x in ann[\"segments_info\"]]\n# ret.append(\n# {\n# \"file_name\": image_file,\n# \"image_id\": image_id,\n# \"pan_seg_file_name\": label_file,\n# \"segments_info\": segments_info,\n# }\n# )\n# assert len(ret), f\"No images found in {image_dir}!\"\n# assert PathManager.isfile(ret[0][\"file_name\"]), ret[0][\"file_name\"]\n# assert PathManager.isfile(ret[0][\"pan_seg_file_name\"]), ret[0][\"pan_seg_file_name\"]\n# return ret\n# semantic_stuff_info = self.semantic_stuff[idx]\n# print(semantic_stuff_info['pan_seg_file_name'] )\n\n# self.semantic_stuff = load_coco_panoptic_json(\n# sem_ann_path, img_path, sem_img_path, info_separated_dict\n# ) #from detectron2\n", "id": "5431563", "language": "Python", "matching_score": 3.4017648696899414, "max_stars_count": 0, "path": "mmdet3d/datasets/coco.py" }, { "content": "dataset_type = 'CocoDataset'\nimg_path = 'data/dataset_coco/train2017'\nann_path = 'data/dataset_coco/annotations/instances_train2017.json'\nsem_img_path = 'data/dataset_coco/panoptic_stuff_train2017'\ninput_size = [512, 512]\nuse_instance_mask = True\nkeep_ratio = True\npipeline = dict(\n perspective=0.0,\n scale=[0.5, 1.5],\n stretch=[[1, 1], [1, 1]],\n rotation=0,\n shear=0,\n translate=0.2,\n flip=0.5,\n brightness=0.2,\n contrast=[0.6, 1.4],\n saturation=[0.5, 1.2],\n normalize=[[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]])\n", "id": "11272347", "language": "Python", "matching_score": 0.6605738401412964, "max_stars_count": 0, "path": "configs/_base_/datasets/coco_dataset.py" }, { "content": "import time\nfrom mmdet.apis.train import set_random_seed\nimport tqdm\nfrom mmcv.cnn.builder import build_model_from_cfg\nimport tqdm\nimport torch\nfrom typing import Tuple\nfrom mmcv import Config\nfrom mmdet.datasets.builder import build_dataloader\nfrom mmdet3d.apis.test import single_gpu_test\nfrom mmdet3d.core.evaluation.seg_eval import fast_hist\n\nfrom mmdet3d.datasets.builder import build_dataset\nfrom mmdet3d.models.builder import build_model\nfrom mmcv.parallel import MMDataParallel\nimport pickle\n\ncfg = Config.fromfile('./configs/mergenet/merge_net.py')\n\ndatasets = build_dataset(cfg.data.test)\n\n# For testing\ndataloader = build_dataloader(datasets, 6, 1, dist=False, shuffle=False)\nmodel = build_model(cfg.model)\nset_random_seed(0)\nmodel = MMDataParallel(model, device_ids=[0])\n# show bin file addresss\n\ndataset = dataloader.dataset\n\nwith open('./cuda_error_batch', 'rb') as f:\n data = pickle.load(f)\n\n# Test Evalation and Test pipeline\nmodel.eval()\nfor i, data in tqdm.tqdm(enumerate(dataloader)):\n img = data['img'][0]\n img_metas = data['img_metas'][0]\n points = data['points'][0]\n inputs = dict(img=img, img_metas=img_metas, points=points)\n result = model(return_loss=False, **inputs)\n", "id": "11425045", "language": "Python", "matching_score": 5.421545028686523, "max_stars_count": 0, "path": "test_cuda_error.py" }, { "content": "import time\nimport tqdm\nfrom mmcv.cnn.builder import build_model_from_cfg\nimport tqdm\nimport torch\nfrom typing import Tuple\nfrom mmcv import Config\nfrom mmdet.datasets.builder import build_dataloader\nfrom mmdet3d.apis.test import single_gpu_test\n\nfrom mmdet3d.datasets.builder import build_dataset\nfrom mmdet3d.models.builder import build_model\nfrom mmcv.parallel import MMDataParallel\n\ncfg = Config.fromfile('./configs/mergenet/merge_net.py')\n\ndatasets = build_dataset(cfg.data.test)\ndataloader = build_dataloader(datasets, 1, 1, dist=False, shuffle=False)\nmodel = build_model(cfg.model)\n\nmodel = MMDataParallel(model, device_ids=[0])\n# show bin file addresss\nmodel.eval()\ndataset = dataloader.dataset\nwith torch.no_grad():\n for i, data in enumerate(dataloader):\n result = model(return_loss=False, rescale=True, **data)", "id": "5043740", "language": "Python", "matching_score": 0.219183549284935, "max_stars_count": 0, "path": "simple_run.py" }, { "content": "from mmdet.models.builder import HEADS\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport cv2\n\n# import json\n# import copy\n# from ..loss.segment_loss import DiceLoss, FocalLoss\n\n\ndef make_one_hot(labels, classes, clsoffset):\n one_hot = torch.FloatTensor(labels.size()[0],\n classes).zero_().to(labels.device)\n target = one_hot.scatter_(1, labels.data - clsoffset, 1)\n return target\n\n\nclass DiceLoss(nn.Module):\n\n def __init__(self, smooth=1., ignore_index=255, clsoffset=0):\n super(DiceLoss, self).__init__()\n self.ignore_index = ignore_index\n self.smooth = smooth\n self.clsoffset = clsoffset\n\n def forward(self, output, target):\n if self.ignore_index not in range(target.min(), target.max()):\n if (target == self.ignore_index).sum() > 0:\n target[target == self.ignore_index] = target.min()\n target = make_one_hot(\n target.unsqueeze(dim=1),\n classes=output.size()[1],\n clsoffset=self.clsoffset)\n output = F.softmax(output, dim=1)\n output_flat = output.contiguous().view(-1)\n target_flat = target.contiguous().view(-1)\n intersection = (output_flat * target_flat).sum()\n loss = 1 - ((2. * intersection + self.smooth) /\n (output_flat.sum() + target_flat.sum() + self.smooth))\n return loss\n\n\nclass FocalLoss(nn.Module):\n\n def __init__(self,\n gamma=2,\n alpha=None,\n ignore_index=255,\n size_average=True,\n clsoffset=0):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.size_average = size_average\n self.CE_loss = nn.CrossEntropyLoss(\n reduce=False, ignore_index=ignore_index, weight=alpha)\n self.clsoffset = clsoffset\n\n def forward(self, output, target):\n logpt = self.CE_loss(output, target - self.clsoffset)\n pt = torch.exp(-logpt)\n loss = ((1 - pt)**self.gamma) * logpt\n if self.size_average:\n return loss.mean()\n return loss.sum()\n\n\nclass FocalLoss_BCE(nn.Module):\n\n def __init__(self,\n gamma=2,\n alpha=None,\n ignore_index=255,\n size_average=True):\n super(FocalLoss_BCE, self).__init__()\n self.gamma = gamma\n self.size_average = size_average\n self.CE_loss = nn.BCEWithLogitsLoss(reduce=False, weight=alpha)\n\n def forward(self, output, target):\n logpt = self.CE_loss(output, target)\n pt = torch.exp(-logpt)\n loss = ((1 - pt)**self.gamma) * logpt\n if self.size_average:\n return loss.mean()\n return loss.sum()\n\n\nclass DepthwiseConv(nn.Module):\n\n def __init__(self, in_ch, out_ch):\n super(DepthwiseConv, self).__init__()\n self.depthwise = nn.Sequential(\n nn.Conv2d(\n in_ch,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1,\n groups=in_ch,\n bias=False),\n nn.BatchNorm2d(out_ch),\n nn.LeakyReLU(0.1, inplace=False),\n )\n\n def forward(self, x):\n x = self.depthwise(x)\n return x\n\n\nclass PointwiseConv(nn.Module):\n\n def __init__(self, in_ch, out_ch):\n super(PointwiseConv, self).__init__()\n self.pointwise = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(out_ch),\n nn.LeakyReLU(0.1, inplace=False),\n )\n\n def forward(self, x):\n x = self.pointwise(x)\n return x\n\n\[email protected]_module()\nclass SemanticHeadStuff(nn.Module):\n\n def __init__(self, in_ch32, in_ch64, in_ch128, hidden_ch, class_ts,\n droprate):\n super(SemanticHeadStuff, self).__init__()\n self.layer_32 = nn.Sequential(\n DepthwiseConv(in_ch32, in_ch32),\n nn.Dropout(p=droprate),\n PointwiseConv(in_ch32, hidden_ch),\n )\n\n self.upsample32_64 = nn.Upsample(scale_factor=2, mode='bilinear')\n\n self.layer_64 = nn.Sequential(\n DepthwiseConv(in_ch64, in_ch64),\n nn.Dropout(p=droprate),\n PointwiseConv(in_ch64, hidden_ch),\n )\n\n self.upsample64_128 = nn.Upsample(scale_factor=2, mode='bilinear')\n\n self.layer_128 = nn.Sequential(\n DepthwiseConv(in_ch128, in_ch128),\n nn.Dropout(p=droprate),\n PointwiseConv(in_ch128, hidden_ch),\n )\n\n self.predictor_thing_stuff = nn.Conv2d(\n hidden_ch, class_ts, kernel_size=1, stride=1, padding=0)\n self.predictor_thing_mask = nn.Conv2d(\n hidden_ch, 1, kernel_size=1, stride=1, padding=0)\n\n self.dice_loss_stuff = DiceLoss(\n smooth=1., ignore_index=255, clsoffset=80)\n self.dice_loss_thing = DiceLoss(smooth=1., ignore_index=255)\n\n # weight_thing_stuff = torch.ones(class_ts).float()\n # weight_thing_stuff[80:] = 1/100\n\n self.focal_loss_stuff = FocalLoss(\n gamma=2,\n alpha=None,\n ignore_index=255,\n size_average=True,\n clsoffset=80)\n self.focal_loss_thing = FocalLoss(\n gamma=2, alpha=None, ignore_index=255, size_average=True)\n\n self.dice_loss_thing_mask = DiceLoss(smooth=1., ignore_index=255)\n self.focal_loss_thing_mask = FocalLoss_BCE(\n gamma=2, alpha=None, ignore_index=255, size_average=True)\n\n def forward(self, x_32, x_64, x_128):\n x_32 = self.layer_32(x_32)\n x_32 = self.upsample32_64(x_32) # in64\n\n x_64 = self.layer_64(x_64)\n x_64 += x_32\n x_64 = self.upsample64_128(x_64) # in128\n\n x_128 = self.layer_128(x_128)\n x_128 += x_64\n\n x_32 = self.predictor_thing_stuff(x_128)\n x_32_thing_mask = self.predictor_thing_mask(x_128)\n return x_32, x_32_thing_mask\n\n def loss(self, preds, preds_thing_mask, gt):\n b, c, h, w = preds.size()\n\n gt = F.interpolate(gt, size=[h, w], mode=\"nearest\") # 128\n gt = gt.to(dtype=torch.int64) # convert from float32 to int64\n gt = gt.permute(0, 2, 3, 1) # size = [b, h, w, 1]\n gt = torch.flatten(gt, 0, -1) # size = [bhw1]\n # print(gt.size())\n\n gt_mask_stuff = gt > 79 # isstuff=True\n gt_mask_thing = gt <= 79 # isthing=True\n # print(gt_mask_stuff.size())\n # print(gt_mask_thing.size())\n\n gt_stuff = gt[gt_mask_stuff]\n gt_thing = gt[gt_mask_thing]\n # print('gt_stuff', gt_stuff.size())\n # print('gt_thing', gt_thing.size())\n\n preds = preds.permute(0, 2, 3, 1) # size = [b, h, w, c]\n preds = torch.flatten(preds, 0, -2) # size = [bhw, c]\n # print(preds.size())\n\n preds_stuff = preds[gt_mask_stuff]\n preds_stuff = preds_stuff[:, 80:]\n # print('preds_stuff', preds_stuff.size())\n\n preds_thing = preds[gt_mask_thing]\n preds_thing = preds_thing[:, :80]\n # print('preds_thing', preds_thing.size())\n\n dloss_stuff = self.dice_loss_stuff(preds_stuff, gt_stuff)\n floss_stuff = self.focal_loss_stuff(preds_stuff, gt_stuff)\n loss_stuff = dloss_stuff + floss_stuff\n\n dloss_thing = self.dice_loss_thing(preds_thing, gt_thing)\n floss_thing = self.focal_loss_thing(preds_thing, gt_thing)\n loss_thing = dloss_thing + floss_thing\n\n # get loss for thing mask\n gt_mask_thing = gt_mask_thing.bool().int().to(dtype=torch.float32)\n\n preds_thing_mask = preds_thing_mask.permute(0, 2, 3,\n 1) # size = [b, h, w, c]\n # preds_thing_mask = torch.flatten(preds_thing_mask, 0, -2) # size = [bhw, c]\n preds_thing_mask = torch.flatten(preds_thing_mask, 0,\n -1) # size = [bhw, c]\n\n # dloss_thing_mask = self.dice_loss_thing_mask(preds_thing_mask, gt_mask_thing)\n floss_thing_mask = self.focal_loss_thing_mask(preds_thing_mask,\n gt_mask_thing)\n # print(floss_thing_mask)\n # loss_thing_mask = dloss_thing_mask + floss_thing_mask\n\n loss = loss_stuff + loss_thing + floss_thing_mask\n\n loss_states = dict(\n loss=loss,\n Dice_Loss_stuff=dloss_stuff,\n Focal_Loss_stuff=floss_stuff,\n Dice_Loss_thing=dloss_thing,\n Focal_Loss_thing=floss_thing,\n # Dice_Loss_thing_mask=dloss_thing_mask,\n Focal_Loss_thing_mask=floss_thing_mask,\n )\n # print(loss_states)\n return loss, loss_states\n\n def post_process(self, preds, preds_thing_mask, meta):\n b, c, h, w = preds.size()\n\n # get inverse warp matrix\n warp_matrix = meta[\"warp_matrix\"]\n warp_matrix = np.linalg.inv(warp_matrix)\n width, height = meta['img_info'][\"height\"], meta['img_info'][\"width\"]\n\n preds_thing_mask = torch.sigmoid(preds_thing_mask)\n preds_stuff_mask = 1 - preds_thing_mask\n\n preds_thing_mask = preds_thing_mask.expand([-1, 80, -1, -1])\n preds_stuff_mask = preds_stuff_mask.expand([-1, 53, -1, -1])\n\n preds_mask = torch.cat((preds_thing_mask, preds_stuff_mask), 1)\n # print(preds_mask.size(), preds_mask)\n\n preds = preds * preds_mask\n\n # 128 to 512\n preds = F.interpolate(\n preds, scale_factor=4, mode=\"bilinear\") # mode=\"nearest\"\n # print(preds.size())\n\n # preds[:, 80:, :, :] = F.softmax(preds[:, 80:, :, :], dim=1)\n # preds[:, :80, :, :] = F.softmax(preds[:, :80, :, :], dim=1)\n\n preds = F.softmax(preds, dim=1)\n\n preds = preds.squeeze(0)\n preds = preds.argmax(dim=0)\n\n preds = preds.cpu().numpy()\n\n print('Semantic Thing+Sutff Labels', np.unique(preds))\n preds = cv2.warpPerspective(\n preds,\n warp_matrix,\n dsize=tuple([height, width]),\n flags=0,\n borderMode=cv2.BORDER_CONSTANT,\n borderValue=(255, 255, 255))\n\n return preds\n\n # ###### process stuff\n # preds_stuff = preds[:, 80:, :, :]\n # preds_stuff = F.softmax(preds_stuff, dim=1)\n # preds_stuff = preds_stuff.squeeze(0)\n # # print(preds_stuff.size())\n\n # preds_stuff = preds_stuff.argmax(dim=0)\n # # print(preds_stuff.size())\n\n # preds_stuff = preds_stuff + 80\n # preds_stuff = preds_stuff.cpu().numpy()\n\n # print('Semantic Stuff Labels', np.unique(preds_stuff))\n # preds_stuff = cv2.warpPerspective(preds_stuff, warp_matrix, dsize=tuple([height, width]), flags=0, borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))\n\n # ###### process thing\n # preds_thing = preds[:, :80, :, :]\n # preds_thing = F.softmax(preds_thing, dim=1)\n # preds_thing = preds_thing.squeeze(0)\n # preds_thing = preds_thing.argmax(dim=0)\n # preds_thing = preds_thing.cpu().numpy()\n\n # print('Semantic Thing Labels', np.unique(preds_thing))\n # preds_thing = cv2.warpPerspective(preds_thing, warp_matrix, dsize=tuple([height, width]), flags=0, borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))\n\n # return preds_stuff, preds_thing\n\n # exit()\n\n # print(preds_stuff_mask.size(), preds_stuff_mask)\n\n # preds_stuff = F.softmax(preds[:, 80:, :, :], dim=1)\n # print(preds_stuff.size(), preds_stuff)\n\n # preds_thing = F.softmax(preds[:, :80, :, :], dim=1)\n # # print(preds_thing.size(), preds_thing)\n\n # b, c, h, w = preds_thing.size()\n # for i in range(c):\n # preds_thing[:, i, :, :] = preds_thing[:, i, :, :]*(preds_thing_mask.squeeze(0))\n\n # print(i)\n # print(preds_thing[:, i, :, :].size())\n # print((preds_thing_mask.squeeze(0)).size())\n\n # exit()\n\n # preds_stuff_mask = 1 - preds_thing_mask\n # print(preds_stuff_mask.size(), preds_stuff_mask)\n\n # preds_stuff = F.softmax(preds[:, 80:, :, :], dim=1)\n # print(preds_stuff.size(), preds_stuff)\n\n # aaa = torch.ones(1,1,128,128)\n # print(aaa)\n # print(aaa-1)\n\n # exit()\n\n # 128 to 512\n # preds = F.interpolate(preds, scale_factor=4, mode=\"bilinear\") # mode=\"nearest\"\n # print(preds.size())\n\n # preds[:, 80:, :, :] = F.softmax(preds[:, 80:, :, :], dim=1)\n # preds[:, :80, :, :] = F.softmax(preds[:, :80, :, :], dim=1)\n\n # # # preds[:, :80, :, :] = F.softmax(preds[:, :80, :, :], dim=1)\n # # for i in range(0, 80):\n # # preds[:, i, :, :] = preds[:, i, :, :]*(preds_thing_mask.squeeze(0))\n\n # # # preds[:, 80:, :, :] = F.softmax(preds[:, 80:, :, :], dim=1)\n # # for i in range(80, c):\n # # preds[:, i, :, :] = preds[:, i, :, :]*(preds_stuff_mask.squeeze(0))\n\n # # print(preds.size())\n\n # preds = F.softmax(preds, dim=1)\n\n # preds = F.interpolate(preds, scale_factor=4, mode=\"bilinear\") # mode=\"nearest\"\n\n # preds = preds.argmax(dim=1)\n\n # # print(preds.size())\n\n # preds = preds.squeeze(0)\n\n # preds = preds.cpu().numpy()\n\n # print('Semantic Thing+Sutff Labels', np.unique(preds))\n # preds = cv2.warpPerspective(preds, warp_matrix, dsize=tuple([height, width]), flags=0, borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))\n\n # preds_thing_mask = preds_thing_mask.expand([-1, 133, -1, -1])\n\n # preds_thing_mask[:, 80:, :, :] = 1 - preds_thing_mask[:, 80:, :, :]", "id": "5069328", "language": "Python", "matching_score": 3.2102773189544678, "max_stars_count": 0, "path": "mmdet3d/models/dense_heads/semantic_head_stuff.py" }, { "content": "# Copyright 2021 RangiLyu.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nimport torch\n\nfrom mmdet.models import DETECTORS, build_backbone, build_head, build_neck\nfrom mmcv.runner import BaseModule\nfrom mmdet.models.detectors import SingleStageDetector, BaseDetector\n\n\[email protected]_module()\nclass OneStageDetector(BaseDetector):\n\n def __init__(self,\n backbone,\n neck=None,\n head=None,\n head_semantic_stuff=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None):\n super(OneStageDetector, self).__init__()\n self.backbone = build_backbone(backbone)\n self.neck = build_neck(neck)\n self.head = build_head(head)\n self.head_semantic_stuff = build_head(head_semantic_stuff)\n\n def extract_feat(self, img):\n return super().extract_feat(img)\n\n def simple_test(self, x):\n x = self.backbone(x)\n feature128 = x[0]\n feature64 = x[1]\n x = self.neck([x[1], x[2], x[3]]) # feature32\n x_box = self.head(x)\n x_semantic_stuff, x_semantic_thing_mask = self.head_semantic_stuff(\n x[0], feature64, feature128)\n return x_box, x_semantic_stuff, x_semantic_thing_mask\n\n def inference(self, meta, class_names):\n with torch.no_grad():\n torch.cuda.synchronize()\n time1 = time.time()\n preds_box, preds_semantic_stuff, preds_semantic_thing_mask = self(\n meta[\"img\"])\n torch.cuda.synchronize()\n time2 = time.time()\n # print(\"forward time: {:.3f}s\".format((time2 - time1)), end=\" | \")\n\n # process box result\n preds_box = self.head.post_process(preds_box, meta)\n preds_box = preds_box[0]\n\n # process semantic result\n preds_semantic_stuff = self.head_semantic_stuff.post_process(\n preds_semantic_stuff, preds_semantic_thing_mask, meta)\n\n torch.cuda.synchronize()\n # print(\"decode time: {:.3f}s\".format((time.time() - time2)), end=\" | \")\n return (preds_box, preds_semantic_stuff)\n\n def forward(self, img, img_metas, **kwargs):\n preds_box, preds_semantic_stuff, preds_semantic_thing_mask = self(img)\n loss_box, loss_states_box = self.head.loss(preds_box, img_metas)\n loss_semantic_stuff, loss_states_semantic_stuff = self.head_semantic_stuff.loss(\n preds_semantic_stuff, preds_semantic_thing_mask,\n img_metas[\"img_semantic_stuff\"])\n\n loss = loss_box + loss_semantic_stuff\n\n loss_states = dict(\n loss=loss,\n Box_QFL=loss_states_box['loss_qfl'],\n Box_Bbox=loss_states_box['loss_bbox'],\n Box_DFL=loss_states_box['loss_dfl'],\n Stuff_Dice=loss_states_semantic_stuff['Dice_Loss_stuff'],\n Stuff_Focal=loss_states_semantic_stuff['Focal_Loss_stuff'],\n Thing_Dice=loss_states_semantic_stuff['Dice_Loss_thing'],\n Thing_Focal=loss_states_semantic_stuff['Focal_Loss_thing'],\n\n # ThingMask_Dice=loss_states_semantic_stuff['Dice_Loss_thing_mask'],\n Thing_Mask=loss_states_semantic_stuff['Focal_Loss_thing_mask'])\n\n return (preds_box, preds_semantic_stuff,\n preds_semantic_thing_mask), loss, loss_states\n\n def aug_test(self, imgs, img_metas, **kwargs):\n return super().aug_test(imgs, img_metas, **kwargs)", "id": "4615642", "language": "Python", "matching_score": 2.428687810897827, "max_stars_count": 0, "path": "mmdet3d/models/detectors/one_stage_detector.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport warnings\n\nfrom mmdet3d.core import bbox3d2result, merge_aug_bboxes_3d\nfrom mmdet3d.models.utils import MLP\nfrom mmdet.models import DETECTORS\nfrom .. import builder\nfrom .base import Base3DDetector\n\n\ndef sample_valid_seeds(mask, num_sampled_seed=1024):\n r\"\"\"Randomly sample seeds from all imvotes.\n\n Modified from `<https://github.com/facebookresearch/imvotenet/blob/a8856345146bacf29a57266a2f0b874406fd8823/models/imvotenet.py#L26>`_\n\n Args:\n mask (torch.Tensor): Bool tensor in shape (\n seed_num*max_imvote_per_pixel), indicates\n whether this imvote corresponds to a 2D bbox.\n num_sampled_seed (int): How many to sample from all imvotes.\n\n Returns:\n torch.Tensor: Indices with shape (num_sampled_seed).\n \"\"\" # noqa: E501\n device = mask.device\n batch_size = mask.shape[0]\n sample_inds = mask.new_zeros((batch_size, num_sampled_seed),\n dtype=torch.int64)\n for bidx in range(batch_size):\n # return index of non zero elements\n valid_inds = torch.nonzero(mask[bidx, :]).squeeze(-1)\n if len(valid_inds) < num_sampled_seed:\n # compute set t1 - t2\n t1 = torch.arange(num_sampled_seed, device=device)\n t2 = valid_inds % num_sampled_seed\n combined = torch.cat((t1, t2))\n uniques, counts = combined.unique(return_counts=True)\n difference = uniques[counts == 1]\n\n rand_inds = torch.randperm(\n len(difference),\n device=device)[:num_sampled_seed - len(valid_inds)]\n cur_sample_inds = difference[rand_inds]\n cur_sample_inds = torch.cat((valid_inds, cur_sample_inds))\n else:\n rand_inds = torch.randperm(\n len(valid_inds), device=device)[:num_sampled_seed]\n cur_sample_inds = valid_inds[rand_inds]\n sample_inds[bidx, :] = cur_sample_inds\n return sample_inds\n\n\[email protected]_module()\nclass ImVoteNet(Base3DDetector):\n r\"\"\"`ImVoteNet <https://arxiv.org/abs/2001.10692>`_ for 3D detection.\"\"\"\n\n def __init__(self,\n pts_backbone=None,\n pts_bbox_heads=None,\n pts_neck=None,\n img_backbone=None,\n img_neck=None,\n img_roi_head=None,\n img_rpn_head=None,\n img_bbox_head=None,\n img_mlp=None,\n freeze_img_branch=False,\n fusion_layer=None,\n num_sampled_seed=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n\n super(ImVoteNet, self).__init__(init_cfg=init_cfg)\n\n # point branch\n if pts_backbone is not None:\n self.pts_backbone = builder.build_backbone(pts_backbone)\n if pts_neck is not None:\n self.pts_neck = builder.build_neck(pts_neck)\n if pts_bbox_heads is not None:\n pts_bbox_head_common = pts_bbox_heads.common\n pts_bbox_head_common.update(\n train_cfg=train_cfg.pts if train_cfg is not None else None)\n pts_bbox_head_common.update(test_cfg=test_cfg.pts)\n pts_bbox_head_joint = pts_bbox_head_common.copy()\n pts_bbox_head_joint.update(pts_bbox_heads.joint)\n pts_bbox_head_pts = pts_bbox_head_common.copy()\n pts_bbox_head_pts.update(pts_bbox_heads.pts)\n pts_bbox_head_img = pts_bbox_head_common.copy()\n pts_bbox_head_img.update(pts_bbox_heads.img)\n\n self.pts_bbox_head_joint = builder.build_head(pts_bbox_head_joint)\n self.pts_bbox_head_pts = builder.build_head(pts_bbox_head_pts)\n self.pts_bbox_head_img = builder.build_head(pts_bbox_head_img)\n self.pts_bbox_heads = [\n self.pts_bbox_head_joint, self.pts_bbox_head_pts,\n self.pts_bbox_head_img\n ]\n self.loss_weights = pts_bbox_heads.loss_weights\n\n # image branch\n if img_backbone:\n self.img_backbone = builder.build_backbone(img_backbone)\n if img_neck is not None:\n self.img_neck = builder.build_neck(img_neck)\n if img_rpn_head is not None:\n rpn_train_cfg = train_cfg.img_rpn if train_cfg \\\n is not None else None\n img_rpn_head_ = img_rpn_head.copy()\n img_rpn_head_.update(\n train_cfg=rpn_train_cfg, test_cfg=test_cfg.img_rpn)\n self.img_rpn_head = builder.build_head(img_rpn_head_)\n if img_roi_head is not None:\n rcnn_train_cfg = train_cfg.img_rcnn if train_cfg \\\n is not None else None\n img_roi_head.update(\n train_cfg=rcnn_train_cfg, test_cfg=test_cfg.img_rcnn)\n self.img_roi_head = builder.build_head(img_roi_head)\n if img_bbox_head is not None:\n self.img_bbox_head = builder.build_head(img_bbox_head)\n # fusion\n if fusion_layer is not None:\n self.fusion_layer = builder.build_fusion_layer(fusion_layer)\n self.max_imvote_per_pixel = fusion_layer.max_imvote_per_pixel\n\n self.freeze_img_branch = freeze_img_branch\n if freeze_img_branch:\n\n self.freeze_img_branch_params()\n\n if img_mlp is not None:\n self.img_mlp = MLP(**img_mlp)\n\n self.num_sampled_seed = num_sampled_seed\n\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n if pretrained is None:\n img_pretrained = None\n pts_pretrained = None\n elif isinstance(pretrained, dict):\n img_pretrained = pretrained.get('img', None)\n pts_pretrained = pretrained.get('pts', None)\n else:\n raise ValueError(\n f'pretrained should be a dict, got {type(pretrained)}')\n\n if self.with_img_backbone:\n if img_pretrained is not None:\n warnings.warn('DeprecationWarning: pretrained is a deprecated \\\n key, please consider using init_cfg')\n self.img_backbone.init_cfg = dict(\n type='Pretrained', checkpoint=img_pretrained)\n if self.with_img_roi_head:\n if img_pretrained is not None:\n warnings.warn('DeprecationWarning: pretrained is a deprecated \\\n key, please consider using init_cfg')\n self.img_roi_head.init_cfg = dict(\n type='Pretrained', checkpoint=img_pretrained)\n\n if self.with_pts_backbone:\n if img_pretrained is not None:\n warnings.warn('DeprecationWarning: pretrained is a deprecated \\\n key, please consider using init_cfg')\n self.pts_backbone.init_cfg = dict(\n type='Pretrained', checkpoint=pts_pretrained)\n\n def freeze_img_branch_params(self):\n \"\"\"Freeze all image branch parameters.\"\"\"\n if self.with_img_bbox_head:\n for param in self.img_bbox_head.parameters():\n param.requires_grad = False\n if self.with_img_backbone:\n for param in self.img_backbone.parameters():\n param.requires_grad = False\n if self.with_img_neck:\n for param in self.img_neck.parameters():\n param.requires_grad = False\n if self.with_img_rpn:\n for param in self.img_rpn_head.parameters():\n param.requires_grad = False\n if self.with_img_roi_head:\n for param in self.img_roi_head.parameters():\n param.requires_grad = False\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"Overload in order to load img network ckpts into img branch.\"\"\"\n module_names = ['backbone', 'neck', 'roi_head', 'rpn_head']\n for key in list(state_dict):\n for module_name in module_names:\n if key.startswith(module_name) and ('img_' +\n key) not in state_dict:\n state_dict['img_' + key] = state_dict.pop(key)\n\n super()._load_from_state_dict(state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs)\n\n def train(self, mode=True):\n \"\"\"Overload in order to keep image branch modules in eval mode.\"\"\"\n super(ImVoteNet, self).train(mode)\n if self.freeze_img_branch:\n if self.with_img_bbox_head:\n self.img_bbox_head.eval()\n if self.with_img_backbone:\n self.img_backbone.eval()\n if self.with_img_neck:\n self.img_neck.eval()\n if self.with_img_rpn:\n self.img_rpn_head.eval()\n if self.with_img_roi_head:\n self.img_roi_head.eval()\n\n @property\n def with_img_bbox(self):\n \"\"\"bool: Whether the detector has a 2D image box head.\"\"\"\n return ((hasattr(self, 'img_roi_head') and self.img_roi_head.with_bbox)\n or (hasattr(self, 'img_bbox_head')\n and self.img_bbox_head is not None))\n\n @property\n def with_img_bbox_head(self):\n \"\"\"bool: Whether the detector has a 2D image box head (not roi).\"\"\"\n return hasattr(self,\n 'img_bbox_head') and self.img_bbox_head is not None\n\n @property\n def with_img_backbone(self):\n \"\"\"bool: Whether the detector has a 2D image backbone.\"\"\"\n return hasattr(self, 'img_backbone') and self.img_backbone is not None\n\n @property\n def with_img_neck(self):\n \"\"\"bool: Whether the detector has a neck in image branch.\"\"\"\n return hasattr(self, 'img_neck') and self.img_neck is not None\n\n @property\n def with_img_rpn(self):\n \"\"\"bool: Whether the detector has a 2D RPN in image detector branch.\"\"\"\n return hasattr(self, 'img_rpn_head') and self.img_rpn_head is not None\n\n @property\n def with_img_roi_head(self):\n \"\"\"bool: Whether the detector has a RoI Head in image branch.\"\"\"\n return hasattr(self, 'img_roi_head') and self.img_roi_head is not None\n\n @property\n def with_pts_bbox(self):\n \"\"\"bool: Whether the detector has a 3D box head.\"\"\"\n return hasattr(self,\n 'pts_bbox_head') and self.pts_bbox_head is not None\n\n @property\n def with_pts_backbone(self):\n \"\"\"bool: Whether the detector has a 3D backbone.\"\"\"\n return hasattr(self, 'pts_backbone') and self.pts_backbone is not None\n\n @property\n def with_pts_neck(self):\n \"\"\"bool: Whether the detector has a neck in 3D detector branch.\"\"\"\n return hasattr(self, 'pts_neck') and self.pts_neck is not None\n\n def extract_feat(self, imgs):\n \"\"\"Just to inherit from abstract method.\"\"\"\n pass\n\n def extract_img_feat(self, img):\n \"\"\"Directly extract features from the img backbone+neck.\"\"\"\n\n if len(img.shape) == 3:\n img = img.unsqueeze(0)\n x = self.img_backbone(img)\n if self.with_img_neck:\n x = self.img_neck(x)\n return x\n\n def extract_img_feats(self, imgs):\n \"\"\"Extract features from multiple images.\n\n Args:\n imgs (list[torch.Tensor]): A list of images. The images are\n augmented from the same image but in different ways.\n\n Returns:\n list[torch.Tensor]: Features of different images\n \"\"\"\n\n assert isinstance(imgs, list)\n return [self.extract_img_feat(img) for img in imgs]\n\n def extract_pts_feat(self, pts):\n \"\"\"Extract features of points.\"\"\"\n x = self.pts_backbone(pts)\n if self.with_pts_neck:\n x = self.pts_neck(x)\n\n seed_points = x['fp_xyz'][-1]\n seed_features = x['fp_features'][-1]\n seed_indices = x['fp_indices'][-1]\n\n return (seed_points, seed_features, seed_indices)\n\n def extract_pts_feats(self, pts):\n \"\"\"Extract features of points from multiple samples.\"\"\"\n assert isinstance(pts, list)\n return [self.extract_pts_feat(pt) for pt in pts]\n\n @torch.no_grad()\n def extract_bboxes_2d(self,\n img,\n img_metas,\n train=True,\n bboxes_2d=None,\n **kwargs):\n \"\"\"Extract bounding boxes from 2d detector.\n\n Args:\n img (torch.Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): Image meta info.\n train (bool): train-time or not.\n bboxes_2d (list[torch.Tensor]): provided 2d bboxes,\n not supported yet.\n\n Return:\n list[torch.Tensor]: a list of processed 2d bounding boxes.\n \"\"\"\n if bboxes_2d is None:\n if self.with_img_bbox_head:\n rets = []\n x = self.extract_img_feat(img)\n # TODO the value return from yolo is not correct.\n pred_maps = self.img_bbox_head(x)[0]\n bboxes = self.img_bbox_head.get_bboxes(\n pred_maps, img_metas, cfg=self.test_cfg)\n for box in bboxes:\n res = torch.cat((box[0], box[1].view(-1, 1)), 1)\n rets.append(res)\n return rets\n x = self.extract_img_feat(img)\n proposal_list = self.img_rpn_head.simple_test_rpn(x, img_metas)\n rets = self.img_roi_head.simple_test(\n x, proposal_list, img_metas, rescale=False)\n\n rets_processed = []\n for ret in rets:\n tmp = np.concatenate(ret, axis=0)\n sem_class = img.new_zeros((len(tmp)))\n start = 0\n for i, bboxes in enumerate(ret):\n sem_class[start:start + len(bboxes)] = i\n start += len(bboxes)\n ret = img.new_tensor(tmp)\n\n # append class index\n ret = torch.cat([ret, sem_class[:, None]], dim=-1)\n inds = torch.argsort(ret[:, 4], descending=True)\n ret = ret.index_select(0, inds)\n\n # drop half bboxes during training for better generalization\n if train:\n rand_drop = torch.randperm(len(ret))[:(len(ret) + 1) // 2]\n rand_drop = torch.sort(rand_drop)[0]\n ret = ret[rand_drop]\n\n rets_processed.append(ret.float())\n return rets_processed\n else:\n rets_processed = []\n for ret in bboxes_2d:\n if len(ret) > 0 and train:\n rand_drop = torch.randperm(len(ret))[:(len(ret) + 1) // 2]\n rand_drop = torch.sort(rand_drop)[0]\n ret = ret[rand_drop]\n rets_processed.append(ret.float())\n return rets_processed\n\n def forward_train(self,\n points=None,\n img=None,\n img_metas=None,\n gt_bboxes=None,\n gt_labels=None,\n gt_bboxes_ignore=None,\n gt_masks=None,\n proposals=None,\n bboxes_2d=None,\n gt_bboxes_3d=None,\n gt_labels_3d=None,\n pts_semantic_mask=None,\n pts_instance_mask=None,\n **kwargs):\n \"\"\"Forwarding of train for image branch pretrain or stage 2 train.\n\n Args:\n points (list[torch.Tensor]): Points of each batch.\n img (torch.Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[dict]): list of image and point cloud meta info\n dict. For example, keys include 'ori_shape', 'img_norm_cfg',\n and 'transformation_3d_flow'. For details on the values of\n the keys see `mmdet/datasets/pipelines/formatting.py:Collect`.\n gt_bboxes (list[torch.Tensor]): Ground truth bboxes for each image\n with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[torch.Tensor]): class indices for each\n 2d bounding box.\n gt_bboxes_ignore (None | list[torch.Tensor]): specify which\n 2d bounding boxes can be ignored when computing the loss.\n gt_masks (None | torch.Tensor): true segmentation masks for each\n 2d bbox, used if the architecture supports a segmentation task.\n proposals: override rpn proposals (2d) with custom proposals.\n Use when `with_rpn` is False.\n bboxes_2d (list[torch.Tensor]): provided 2d bboxes,\n not supported yet.\n gt_bboxes_3d (:obj:`BaseInstance3DBoxes`): 3d gt bboxes.\n gt_labels_3d (list[torch.Tensor]): gt class labels for 3d bboxes.\n pts_semantic_mask (None | list[torch.Tensor]): point-wise semantic\n label of each batch.\n pts_instance_mask (None | list[torch.Tensor]): point-wise instance\n label of each batch.\n\n Returns:\n dict[str, torch.Tensor]: a dictionary of loss components.\n \"\"\"\n if points is None:\n x = self.extract_img_feat(img)\n losses = dict()\n\n # RPN forward and loss\n if self.with_img_rpn:\n proposal_cfg = self.train_cfg.get('img_rpn_proposal',\n self.test_cfg.img_rpn)\n rpn_losses, proposal_list = self.img_rpn_head.forward_train(\n x,\n img_metas,\n gt_bboxes,\n gt_labels=None,\n gt_bboxes_ignore=gt_bboxes_ignore,\n proposal_cfg=proposal_cfg)\n losses.update(rpn_losses)\n else:\n proposal_list = proposals\n\n roi_losses = self.img_roi_head.forward_train(\n x, img_metas, proposal_list, gt_bboxes, gt_labels,\n gt_bboxes_ignore, gt_masks, **kwargs)\n losses.update(roi_losses)\n return losses\n else:\n bboxes_2d = self.extract_bboxes_2d(\n img, img_metas, bboxes_2d=bboxes_2d, **kwargs)\n\n points = torch.stack(points)\n seeds_3d, seed_3d_features, seed_indices = \\\n self.extract_pts_feat(points)\n\n img_features, masks = self.fusion_layer(img, bboxes_2d, seeds_3d,\n img_metas)\n\n inds = sample_valid_seeds(masks, self.num_sampled_seed)\n batch_size, img_feat_size = img_features.shape[:2]\n pts_feat_size = seed_3d_features.shape[1]\n inds_img = inds.view(batch_size, 1,\n -1).expand(-1, img_feat_size, -1)\n img_features = img_features.gather(-1, inds_img)\n inds = inds % inds.shape[1]\n inds_seed_xyz = inds.view(batch_size, -1, 1).expand(-1, -1, 3)\n seeds_3d = seeds_3d.gather(1, inds_seed_xyz)\n inds_seed_feats = inds.view(batch_size, 1,\n -1).expand(-1, pts_feat_size, -1)\n seed_3d_features = seed_3d_features.gather(-1, inds_seed_feats)\n seed_indices = seed_indices.gather(1, inds)\n\n img_features = self.img_mlp(img_features)\n fused_features = torch.cat([seed_3d_features, img_features], dim=1)\n\n feat_dict_joint = dict(\n seed_points=seeds_3d,\n seed_features=fused_features,\n seed_indices=seed_indices)\n feat_dict_pts = dict(\n seed_points=seeds_3d,\n seed_features=seed_3d_features,\n seed_indices=seed_indices)\n feat_dict_img = dict(\n seed_points=seeds_3d,\n seed_features=img_features,\n seed_indices=seed_indices)\n\n loss_inputs = (points, gt_bboxes_3d, gt_labels_3d,\n pts_semantic_mask, pts_instance_mask, img_metas)\n bbox_preds_joints = self.pts_bbox_head_joint(\n feat_dict_joint, self.train_cfg.pts.sample_mod)\n bbox_preds_pts = self.pts_bbox_head_pts(\n feat_dict_pts, self.train_cfg.pts.sample_mod)\n bbox_preds_img = self.pts_bbox_head_img(\n feat_dict_img, self.train_cfg.pts.sample_mod)\n losses_towers = []\n losses_joint = self.pts_bbox_head_joint.loss(\n bbox_preds_joints,\n *loss_inputs,\n gt_bboxes_ignore=gt_bboxes_ignore)\n losses_pts = self.pts_bbox_head_pts.loss(\n bbox_preds_pts,\n *loss_inputs,\n gt_bboxes_ignore=gt_bboxes_ignore)\n losses_img = self.pts_bbox_head_img.loss(\n bbox_preds_img,\n *loss_inputs,\n gt_bboxes_ignore=gt_bboxes_ignore)\n losses_towers.append(losses_joint)\n losses_towers.append(losses_pts)\n losses_towers.append(losses_img)\n combined_losses = dict()\n for loss_term in losses_joint:\n if 'loss' in loss_term:\n combined_losses[loss_term] = 0\n for i in range(len(losses_towers)):\n combined_losses[loss_term] += \\\n losses_towers[i][loss_term] * \\\n self.loss_weights[i]\n else:\n # only save the metric of the joint head\n # if it is not a loss\n combined_losses[loss_term] = \\\n losses_towers[0][loss_term]\n\n return combined_losses\n\n def forward_dummy(self, points, img, img_metas=None):\n \n return_dict = self.forward_test(points=points, img=img, img_metas=img_metas)[0]\n boxes = return_dict['boxes_3d'].corners\n scores = return_dict['scores_3d']\n labels = return_dict['labels_3d']\n return boxes, scores, labels\n\n def forward_test(self, points=None, img_metas=None, img=None, bboxes_2d=None, **kwargs):\n \"\"\"Forwarding of test for image branch pretrain or stage 2 train.\n\n Args:\n points (list[list[torch.Tensor]], optional): the outer\n list indicates test-time augmentations and the inner\n list contains all points in the batch, where each Tensor\n should have a shape NxC. Defaults to None.\n img_metas (list[list[dict]], optional): the outer list\n indicates test-time augs (multiscale, flip, etc.)\n and the inner list indicates images in a batch.\n Defaults to None.\n img (list[list[torch.Tensor]], optional): the outer\n list indicates test-time augmentations and inner Tensor\n should have a shape NxCxHxW, which contains all images\n in the batch. Defaults to None. Defaults to None.\n bboxes_2d (list[list[torch.Tensor]], optional):\n Provided 2d bboxes, not supported yet. Defaults to None.\n\n Returns:\n list[list[torch.Tensor]]|list[dict]: Predicted 2d or 3d boxes.\n \"\"\"\n if points is None:\n for var, name in [(img, 'img'), (img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError(\n f'{name} must be a list, but got {type(var)}')\n\n num_augs = len(img)\n if num_augs != len(img_metas):\n raise ValueError(f'num of augmentations ({len(img)}) '\n f'!= num of image meta ({len(img_metas)})')\n\n if num_augs == 1:\n # proposals (List[List[Tensor]]): the outer list indicates\n # test-time augs (multiscale, flip, etc.) and the inner list\n # indicates images in a batch.\n # The Tensor should have a shape Px4, where P is the number of\n # proposals.\n if 'proposals' in kwargs:\n kwargs['proposals'] = kwargs['proposals'][0]\n return self.simple_test_img_only(\n img=img[0], img_metas=img_metas[0], **kwargs)\n else:\n assert img[0].size(0) == 1, 'aug test does not support ' \\\n 'inference with batch size ' \\\n f'{img[0].size(0)}'\n # TODO: support test augmentation for predefined proposals\n assert 'proposals' not in kwargs\n return self.aug_test_img_only(\n img=img, img_metas=img_metas, **kwargs)\n\n else:\n for var, name in [(points, 'points'), (img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError('{} must be a list, but got {}'.format(\n name, type(var)))\n\n num_augs = len(points)\n if num_augs != len(img_metas):\n print(img_metas)\n raise ValueError(\n 'num of augmentations ({}) != num of image meta ({})'.\n format(len(points), len(img_metas)))\n\n if num_augs == 1:\n return self.simple_test(\n points[0],\n img_metas[0],\n img[0],\n bboxes_2d=bboxes_2d[0] if bboxes_2d is not None else None,\n **kwargs)\n else:\n return self.aug_test(points, img_metas, img, bboxes_2d,\n **kwargs)\n\n def simple_test_img_only(self,\n img,\n img_metas,\n proposals=None,\n rescale=False):\n r\"\"\"Test without augmentation, image network pretrain. May refer to\n `<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py>`_.\n\n Args:\n img (torch.Tensor): Should have a shape NxCxHxW, which contains\n all images in the batch.\n img_metas (list[dict]):\n proposals (list[Tensor], optional): override rpn proposals\n with custom proposals. Defaults to None.\n rescale (bool, optional): Whether or not rescale bboxes to the\n original shape of input image. Defaults to False.\n\n Returns:\n list[list[torch.Tensor]]: Predicted 2d boxes.\n \"\"\" # noqa: E501\n assert self.with_img_bbox, 'Img bbox head must be implemented.'\n assert self.with_img_backbone, 'Img backbone must be implemented.'\n assert self.with_img_rpn, 'Img rpn must be implemented.'\n assert self.with_img_roi_head, 'Img roi head must be implemented.'\n\n x = self.extract_img_feat(img)\n\n if proposals is None:\n proposal_list = self.img_rpn_head.simple_test_rpn(x, img_metas)\n else:\n proposal_list = proposals\n\n ret = self.img_roi_head.simple_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n return ret\n\n def simple_test(self,\n points=None,\n img_metas=None,\n img=None,\n bboxes_2d=None,\n rescale=False,\n **kwargs):\n \"\"\"Test without augmentation, stage 2.\n\n Args:\n points (list[torch.Tensor], optional): Elements in the list\n should have a shape NxC, the list indicates all point-clouds\n in the batch. Defaults to None.\n img_metas (list[dict], optional): List indicates\n images in a batch. Defaults to None.\n img (torch.Tensor, optional): Should have a shape NxCxHxW,\n which contains all images in the batch. Defaults to None.\n bboxes_2d (list[torch.Tensor], optional):\n Provided 2d bboxes, not supported yet. Defaults to None.\n rescale (bool, optional): Whether or not rescale bboxes.\n Defaults to False.\n\n Returns:\n list[dict]: Predicted 3d boxes.\n \"\"\"\n bboxes_2d = self.extract_bboxes_2d(\n img, img_metas, train=False, bboxes_2d=bboxes_2d, **kwargs)\n\n points = torch.stack(points)\n seeds_3d, seed_3d_features, seed_indices = \\\n self.extract_pts_feat(points)\n\n img_features, masks = self.fusion_layer(img, bboxes_2d, seeds_3d,\n img_metas)\n\n inds = sample_valid_seeds(masks, self.num_sampled_seed)\n batch_size, img_feat_size = img_features.shape[:2]\n pts_feat_size = seed_3d_features.shape[1]\n inds_img = inds.view(batch_size, 1, -1).expand(-1, img_feat_size, -1)\n img_features = img_features.gather(-1, inds_img)\n inds = inds % inds.shape[1]\n inds_seed_xyz = inds.view(batch_size, -1, 1).expand(-1, -1, 3)\n seeds_3d = seeds_3d.gather(1, inds_seed_xyz)\n inds_seed_feats = inds.view(batch_size, 1,\n -1).expand(-1, pts_feat_size, -1)\n seed_3d_features = seed_3d_features.gather(-1, inds_seed_feats)\n seed_indices = seed_indices.gather(1, inds)\n\n img_features = self.img_mlp(img_features)\n\n fused_features = torch.cat([seed_3d_features, img_features], dim=1)\n\n feat_dict = dict(\n seed_points=seeds_3d,\n seed_features=fused_features,\n seed_indices=seed_indices)\n bbox_preds = self.pts_bbox_head_joint(feat_dict,\n self.test_cfg.pts.sample_mod)\n bbox_list = self.pts_bbox_head_joint.get_bboxes(\n points, bbox_preds, img_metas, rescale=rescale)\n bbox_results = [\n bbox3d2result(bboxes, scores, labels)\n for bboxes, scores, labels in bbox_list\n ]\n return bbox_results\n\n def aug_test_img_only(self, img, img_metas, rescale=False):\n r\"\"\"Test function with augmentation, image network pretrain. May refer\n to `<https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/two_stage.py>`_.\n\n Args:\n img (list[list[torch.Tensor]], optional): the outer\n list indicates test-time augmentations and inner Tensor\n should have a shape NxCxHxW, which contains all images\n in the batch. Defaults to None. Defaults to None.\n img_metas (list[list[dict]], optional): the outer list\n indicates test-time augs (multiscale, flip, etc.)\n and the inner list indicates images in a batch.\n Defaults to None.\n rescale (bool, optional): Whether or not rescale bboxes to the\n original shape of input image. If rescale is False, then\n returned bboxes and masks will fit the scale of imgs[0].\n Defaults to None.\n\n Returns:\n list[list[torch.Tensor]]: Predicted 2d boxes.\n \"\"\" # noqa: E501\n assert self.with_img_bbox, 'Img bbox head must be implemented.'\n assert self.with_img_backbone, 'Img backbone must be implemented.'\n assert self.with_img_rpn, 'Img rpn must be implemented.'\n assert self.with_img_roi_head, 'Img roi head must be implemented.'\n\n x = self.extract_img_feats(img)\n proposal_list = self.img_rpn_head.aug_test_rpn(x, img_metas)\n\n return self.img_roi_head.aug_test(\n x, proposal_list, img_metas, rescale=rescale)\n\n def aug_test(self,\n points=None,\n img_metas=None,\n imgs=None,\n bboxes_2d=None,\n rescale=False,\n **kwargs):\n \"\"\"Test function with augmentation, stage 2.\n\n Args:\n points (list[list[torch.Tensor]], optional): the outer\n list indicates test-time augmentations and the inner\n list contains all points in the batch, where each Tensor\n should have a shape NxC. Defaults to None.\n img_metas (list[list[dict]], optional): the outer list\n indicates test-time augs (multiscale, flip, etc.)\n and the inner list indicates images in a batch.\n Defaults to None.\n imgs (list[list[torch.Tensor]], optional): the outer\n list indicates test-time augmentations and inner Tensor\n should have a shape NxCxHxW, which contains all images\n in the batch. Defaults to None. Defaults to None.\n bboxes_2d (list[list[torch.Tensor]], optional):\n Provided 2d bboxes, not supported yet. Defaults to None.\n rescale (bool, optional): Whether or not rescale bboxes.\n Defaults to False.\n\n Returns:\n list[dict]: Predicted 3d boxes.\n \"\"\"\n points_cat = [torch.stack(pts) for pts in points]\n feats = self.extract_pts_feats(points_cat, img_metas)\n\n # only support aug_test for one sample\n aug_bboxes = []\n for x, pts_cat, img_meta, bbox_2d, img in zip(feats, points_cat,\n img_metas, bboxes_2d,\n imgs):\n\n bbox_2d = self.extract_bboxes_2d(\n img, img_metas, train=False, bboxes_2d=bbox_2d, **kwargs)\n\n seeds_3d, seed_3d_features, seed_indices = x\n\n img_features, masks = self.fusion_layer(img, bbox_2d, seeds_3d,\n img_metas)\n\n inds = sample_valid_seeds(masks, self.num_sampled_seed)\n batch_size, img_feat_size = img_features.shape[:2]\n pts_feat_size = seed_3d_features.shape[1]\n inds_img = inds.view(batch_size, 1,\n -1).expand(-1, img_feat_size, -1)\n img_features = img_features.gather(-1, inds_img)\n inds = inds % inds.shape[1]\n inds_seed_xyz = inds.view(batch_size, -1, 1).expand(-1, -1, 3)\n seeds_3d = seeds_3d.gather(1, inds_seed_xyz)\n inds_seed_feats = inds.view(batch_size, 1,\n -1).expand(-1, pts_feat_size, -1)\n seed_3d_features = seed_3d_features.gather(-1, inds_seed_feats)\n seed_indices = seed_indices.gather(1, inds)\n\n img_features = self.img_mlp(img_features)\n\n fused_features = torch.cat([seed_3d_features, img_features], dim=1)\n\n feat_dict = dict(\n seed_points=seeds_3d,\n seed_features=fused_features,\n seed_indices=seed_indices)\n bbox_preds = self.pts_bbox_head_joint(feat_dict,\n self.test_cfg.pts.sample_mod)\n bbox_list = self.pts_bbox_head_joint.get_bboxes(\n pts_cat, bbox_preds, img_metas, rescale=rescale)\n\n bbox_list = [\n dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels)\n for bboxes, scores, labels in bbox_list\n ]\n aug_bboxes.append(bbox_list[0])\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas,\n self.bbox_head.test_cfg)\n\n return [merged_bboxes]\n", "id": "2576098", "language": "Python", "matching_score": 6.032758712768555, "max_stars_count": 0, "path": "mmdet3d/models/detectors/imvotenet.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport warnings\nimport torch.nn.functional as F\nfrom mmdet3d.ops.voxel.voxelize import Voxelization\nfrom .single_stage import SingleStage3DDetector\nfrom mmdet3d.core import bbox3d2result, merge_aug_bboxes_3d\nfrom mmdet3d.models.utils import MLP\nfrom mmdet.models import DETECTORS\nfrom .. import builder\nfrom .base import Base3DDetector\n\n\[email protected]_module()\nclass MergeNet(Base3DDetector):\n\n def __init__(self,\n voxel_layer=None,\n voxel_encoder=None,\n pts_backbone=None,\n img_backbone=None,\n img_neck=None,\n img_bbox_head=None,\n backbone=None,\n middle_encoder=None,\n bbox_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n super(MergeNet, self).__init__(init_cfg=init_cfg)\n # point branch\n if pts_backbone is not None:\n self.pts_backbone = builder.build_backbone(pts_backbone)\n\n # image branch\n if self.with_img_backbone:\n self.img_backbone = builder.build_backbone(img_backbone)\n if self.with_img_neck:\n self.img_neck = builder.build_neck(img_neck)\n if self.with_img_bbox_head:\n self.img_bbox_head = builder.build_head(img_bbox_head)\n self.freeze_img_branch_params()\n\n # Merge Branch(Centernet3d's head)\n bbox_head.update(train_cfg=train_cfg)\n bbox_head.update(test_cfg=test_cfg)\n\n self.backbone = builder.build_backbone(backbone)\n self.voxel_layer = Voxelization(**voxel_layer)\n self.voxel_encoder = builder.build_voxel_encoder(voxel_encoder)\n self.centernet3d_head = builder.build_head(bbox_head)\n\n self.middle_encoder = builder.build_middle_encoder(middle_encoder)\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n def extract_feat(self, imgs):\n \"mmdetection3d needs such abstract method.\"\n pass\n\n def freeze_img_branch_params(self):\n \"\"\"Freeze all image branch parameters.\"\"\"\n if self.with_img_bbox_head:\n for param in self.img_bbox_head.parameters():\n param.requires_grad = False\n if self.with_img_backbone:\n for param in self.img_backbone.parameters():\n param.requires_grad = False\n if self.with_img_neck:\n for param in self.img_neck.parameters():\n param.requires_grad = False\n if self.with_img_rpn:\n for param in self.img_rpn_head.parameters():\n param.requires_grad = False\n if self.with_img_roi_head:\n for param in self.img_roi_head.parameters():\n param.requires_grad = False\n\n @property\n def with_img_bbox(self):\n \"\"\"bool: Whether the detector has a 2D image box head.\"\"\"\n return ((hasattr(self, 'img_roi_head') and self.img_roi_head.with_bbox)\n or (hasattr(self, 'img_bbox_head')\n and self.img_bbox_head is not None))\n\n @property\n def with_img_bbox_head(self):\n \"\"\"bool: Whether the detector has a 2D image box head (not roi).\"\"\"\n return hasattr(self,\n 'img_bbox_head') and self.img_bbox_head is not None\n\n @property\n def with_img_backbone(self):\n \"\"\"bool: Whether the detector has a 2D image backbone.\"\"\"\n return hasattr(self, 'img_backbone') and self.img_backbone is not None\n\n @property\n def with_img_neck(self):\n \"\"\"bool: Whether the detector has a neck in image branch.\"\"\"\n return hasattr(self, 'img_neck') and self.img_neck is not None\n\n @property\n def with_img_rpn(self):\n \"\"\"bool: Whether the detector has a 2D RPN in image detector branch.\"\"\"\n return hasattr(self, 'img_rpn_head') and self.img_rpn_head is not None\n\n @property\n def with_img_roi_head(self):\n \"\"\"bool: Whether the detector has a RoI Head in image branch.\"\"\"\n return hasattr(self, 'img_roi_head') and self.img_roi_head is not None\n\n @property\n def with_pts_bbox(self):\n \"\"\"bool: Whether the detector has a 3D box head.\"\"\"\n return hasattr(self,\n 'pts_bbox_head') and self.pts_bbox_head is not None\n\n @property\n def with_pts_backbone(self):\n \"\"\"bool: Whether the detector has a 3D backbone.\"\"\"\n return hasattr(self, 'pts_backbone') and self.pts_backbone is not None\n\n @property\n def with_pts_neck(self):\n \"\"\"bool: Whether the detector has a neck in 3D detector branch.\"\"\"\n return hasattr(self, 'pts_neck') and self.pts_neck is not None\n\n @torch.no_grad()\n def extrac_img_feat(self, img, img_metas=None):\n x = self.img_backbone(img)\n img_features = self.img_neck(x)\n img_bbox = self.img_bbox_head(img_features)\n return img_features, img_bbox\n\n def extract_pts_feat(self, points):\n x = self.pts_backbone(points)\n seed_points = x['fp_xyz'][-1]\n seed_features = x['fp_features'][-1]\n seed_indices = x['fp_indices'][-1]\n\n return (seed_points, seed_features, seed_indices)\n\n @torch.no_grad()\n def voxelize(self, points):\n \"\"\"Apply hard voxelization to points.\"\"\"\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = self.voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch\n\n def extract_voxel_feat(self, points):\n \"\"\"Extract features from points.\"\"\"\n voxels, num_points, coors = self.voxelize(points)\n voxel_features = self.voxel_encoder(voxels, num_points, coors)\n batch_size = coors[-1, 0].item() + 1\n point_misc = None\n x = self.middle_encoder(voxel_features, coors, batch_size)\n x = self.backbone(x)\n # print(\"x shape\",x[0].shape)\n # if xconv2 is not None:\n # x=[x[0]+xconv2]\n return x, point_misc\n\n def forward_train(self,\n img,\n points=None,\n img_metas=None,\n gt_bboxes=None,\n gt_labels=None,\n gt_bboxes_3d=None,\n gt_labels_3d=None,\n gt_bboxes_ignore=None):\n # img feature\n # img_features, img_bbox = self.extrac_img_feat(img)\n\n # points feature\n # points = torch.stack(points)\n\n # seeds_3d, seed_3d_features, seed_indices = self.extract_pts_feat(\n # points)\n\n # x, _ = self.extract_voxel_feat(seeds_3d)\n\n # For points only.\n # points = torch.stack(points)\n # TODO Debug dim change\n # points = [torch.tensor(np.load('./bug_points.npy')).to('cuda')]\n x, _ = self.extract_voxel_feat(points)\n # merge\n pred_dict = self.centernet3d_head(x)\n losses = dict()\n head_loss = self.centernet3d_head.loss(pred_dict, gt_labels_3d,\n gt_bboxes_3d)\n losses.update(head_loss)\n return losses\n\n def simple_test(self, points, img_metas, imgs, rescale=False):\n \"\"\"Testing for one img and one point cloud.\n \"\"\"\n # img feature\n # img_features, img_bbox = self.extrac_img_feat(imgs)\n\n # points feature\n # points = torch.stack(points)\n # x, _ = self.extract_voxel_feat(points)\n # merge\n # pred_dict = self.centernet3d_head(x)\n # seeds_3d, seed_3d_features, seed_indices = self.extrac_pts_feat(points)\n\n # merge\n x, _ = self.extract_voxel_feat(points=points)\n pred_dict = self.centernet3d_head(x)\n bbox_list = self.centernet3d_head.get_bboxes(pred_dict, img_metas)\n bbox_results = [\n bbox3d2result(bboxes, scores, labels, img_meta)\n for bboxes, scores, labels, img_meta in bbox_list\n ]\n return bbox_results\n\n def aug_test(self, points, img_metas, imgs, rescale=False):\n feats, _ = self.extract_voxel_feat(points)\n aug_bboxes = []\n for x, img_meta in zip(feats, img_metas):\n # points feature\n outs = self.centernet3d_head([x])\n bbox_list = self.centernet3d_head.get_bboxes(outs, img_meta)\n bbox_list = [\n dict(boxes_3d=bboxes, scores_3d=scores, labels_3d=labels)\n for bboxes, scores, labels, img_meta in bbox_list\n ]\n aug_bboxes.append(bbox_list[0])\n merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, [[img_meta]],\n self.centernet3d_head.test_cfg)\n return merged_bboxes\n\n def forward_dummy(self, points, img_metas, imgs):\n pass", "id": "5730253", "language": "Python", "matching_score": 2.340853452682495, "max_stars_count": 0, "path": "mmdet3d/models/detectors/mergenet.py" }, { "content": "import torch\nfrom torch.nn import functional as F\nfrom torch import nn\nfrom mmdet.models import HEADS\nfrom collections import defaultdict\nfrom mmcv.cnn import bias_init_with_prob, normal_init\nimport torch\nfrom torch import nn as nn\n\nfrom mmdet.core import (build_anchor_generator, build_assigner,\n build_bbox_coder, build_sampler, multi_apply)\nfrom mmdet.models import HEADS\nfrom ..builder import build_loss\nfrom mmdet.models.losses import MSELoss\nfrom mmdet3d.models.losses import ModifiedFocalLoss\n\n\[email protected]_module()\nclass Center3DHead(nn.Module):\n\n def __init__(\n self,\n num_classes,\n in_channels,\n feat_channels,\n train_cfg,\n test_cfg,\n bbox_coder=dict(\n type=\"XYZWLHRBoxCoder\",\n voxel_size=[0.05, 0.05, 0.1],\n pc_range=[0, -40, -3, 70.4, 40, 1],\n num_dir_bins=12,\n downsample_ratio=4.0,\n min_overlap=0.01),\n loss_cls=dict(type='ModifiedFocalLoss', loss_weight=0.5),\n loss_xy=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_z=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_dim=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_dir=dict(type='GatherBinResLoss', loss_weight=1.0),\n bias_cls=None,\n loss_corner=None,\n loss_decode=None,\n ):\n \"\"\"upsample_strides support float: [0.25, 0.5, 1]\n if upsample_strides < 1, conv2d will be used instead of convtranspose2d.\n \"\"\"\n super(Center3DHead, self).__init__()\n self.num_class = num_classes\n self.in_channels = in_channels\n self.tran_cfg = train_cfg\n self.test_cfg = test_cfg\n self.corner_attention = False\n if loss_corner is not None:\n self.corner_attention = True\n\n self.activaton_fun = nn.LeakyReLU(0.01, inplace=True)\n\n #build box coder\n self.box_coder = build_bbox_coder(bbox_coder)\n\n self.loss_cls = build_loss(loss_cls)\n self.loss_xy = build_loss(loss_xy)\n self.loss_z = build_loss(loss_z)\n self.loss_dim = build_loss(loss_dim)\n if loss_dir['type'] == \"GatherBinResLoss\":\n loss_dir['num_dir_bins'] = bbox_coder['num_dir_bins']\n self.loss_dir = build_loss(loss_dir)\n\n self.loss_decode = self.loss_corner = None\n if loss_corner is not None:\n self.loss_corner = build_loss(loss_corner)\n print(\"use corner attention module!\")\n if loss_decode is not None:\n loss_decode[\"box_coder\"] = bbox_coder\n self.loss_decode = build_loss(loss_decode)\n print(\"use decode loss!\")\n\n if bias_cls is None:\n bias_cls = bias_init_with_prob(0.01)\n\n self.heads = {\n \"center_pred\": self.num_class,\n \"xy_pred\": 2,\n \"z_pred\": 1,\n \"dim_pred\": 3, # sin cos\n \"dir_pred\": 2\n }\n if bbox_coder[\"num_dir_bins\"] > 0:\n assert loss_dir[\n \"type\"] == \"GatherBinResLoss\", \"num_dir_bins greater than 0, GatherBinResLoss is required\"\n self.heads[\"dir_pred\"] = bbox_coder[\"num_dir_bins\"] * 2\n\n if self.corner_attention:\n self.heads['corner_pred'] = self.num_class\n\n for head in self.heads:\n classes = self.heads[head]\n # if head in [\"dim_pred\"]:\n # fc = nn.Sequential(\n # nn.Conv2d(in_channels, feat_channels,\n # kernel_size=3, padding=1, bias=True),\n # self.activaton_fun,\n # nn.Conv2d(feat_channels, classes,\n # kernel_size=3, stride=1,padding=1))\n # else:\n fc = nn.Sequential(\n nn.Conv2d(\n in_channels,\n feat_channels,\n kernel_size=3,\n padding=1,\n bias=True), self.activaton_fun,\n nn.Conv2d(\n feat_channels,\n classes,\n kernel_size=1,\n stride=1,\n ))\n if head in [\"center_pred\", \"corner_pred\"]:\n fc[-1].bias.data.fill_(bias_cls)\n self.__setattr__(head, fc)\n\n def forward(self, x):\n z = {}\n x = x[0]\n # if not self.training and self.corner_attention:\n # self.heads.pop('corner_pred')\n\n for head in self.heads:\n z[head] = self.__getattr__(head)(x)\n if head in [\"center_pred\", \"corner_pred\", \"xy_pred\"]:\n z[head] = torch.sigmoid(z[head])\n if head in [\"dir_pred\"] and self.heads[\"dir_pred\"] == 2:\n z[head] = torch.tanh(z[head])\n return z\n\n def init_weights(self):\n \"\"\"Initialize the weights of head.\"\"\"\n # bias_cls = bias_init_with_prob(0.01)\n # normal_init(self.conv_cls, std=0.01, bias=bias_cls)\n # normal_init(self.conv_reg, std=0.01)\n pass\n\n def get_bboxes(self, pred_dicts, input_metas):\n return self.box_coder.decode_center(\n pred_dicts,\n input_metas,\n score_threshold=self.test_cfg['score_thr'])\n\n def loss(self, pred_dict, gt_labels, gt_bboxes, img_metas=None):\n gt_dict = self.box_coder.generate_target(gt_labels, gt_bboxes)\n mask = gt_dict[\"reg_mask\"]\n index = gt_dict[\"gt_index\"]\n\n if isinstance(self.loss_cls, MSELoss):\n avg_fac = gt_dict[\"score_map\"].sum()\n cls_loss = self.loss_cls(\n pred_dict[\"center_pred\"],\n gt_dict[\"score_map\"],\n avg_factor=avg_fac)\n elif isinstance(self.loss_cls, ModifiedFocalLoss):\n cls_loss = self.loss_cls(pred_dict[\"center_pred\"],\n gt_dict[\"score_map\"])\n else:\n raise NotImplementedError\n\n xy_loss = self.loss_xy(pred_dict[\"xy_pred\"], mask, index,\n gt_dict[\"gt_xyz\"][..., :2])\n z_loss = self.loss_z(pred_dict[\"z_pred\"], mask, index,\n gt_dict[\"gt_xyz\"][..., 2:])\n dim_loss = self.loss_dim(pred_dict[\"dim_pred\"], mask, index,\n gt_dict[\"gt_dim\"])\n dir_loss = self.loss_dir(pred_dict[\"dir_pred\"], mask, index,\n gt_dict[\"gt_dir\"])\n # total_loss = cls_loss + xy_loss + z_loss + dim_loss + dir_loss\n loss_dict = {\n \"cls_loss\": cls_loss,\n \"xy_loss\": xy_loss,\n \"z_loss\": z_loss,\n \"dim_loss\": dim_loss,\n \"dir_loss\": dir_loss,\n }\n if self.loss_corner is not None:\n if isinstance(self.loss_corner, MSELoss):\n avg_fac = gt_dict[\"corner_map\"].sum()\n corner_loss = self.loss_corner(\n pred_dict[\"corner_pred\"],\n gt_dict[\"corner_map\"],\n avg_factor=avg_fac)\n elif isinstance(self.loss_corner, ModifiedFocalLoss):\n corner_loss = self.loss_corner(pred_dict[\"corner_pred\"],\n gt_dict[\"corner_map\"])\n else:\n raise NotImplementedError\n # corner_loss=self.loss_corner(pred_dict[\"corner_pred\"],gt_dict[\"corner_map\"])\n loss_dict[\"corner_loss\"] = corner_loss\n\n if self.loss_decode is not None:\n decode_loss = self.loss_decode(pred_dict, mask, index,\n gt_dict[\"gt_boxes3d\"],\n gt_dict[\"gt_dir\"])\n loss_dict[\"decode_loss\"] = decode_loss\n\n return loss_dict", "id": "4435191", "language": "Python", "matching_score": 4.07733154296875, "max_stars_count": 0, "path": "mmdet3d/models/dense_heads/center3d_head.py" }, { "content": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom mmdet3d.core.bbox.structures import LiDARInstance3DBoxes\nfrom mmdet3d.core.bbox.coders import gather_feature\nfrom mmdet.models.builder import LOSSES\nfrom mmdet.core import build_bbox_coder\n\[email protected]_module()\nclass ModifiedFocalLoss(nn.Module):\n def __init__(self,loss_weight,reduction=\"mean\"):\n super(ModifiedFocalLoss,self).__init__()\n self.weight=loss_weight\n self.reduction=reduction\n\n def forward(self,pred,target):\n loss=modified_focal_loss(pred,target,reduction=self.reduction)\n loss=loss*self.weight\n return loss\n\[email protected]_module()\nclass GatherBalancedL1Loss(nn.Module):\n def __init__(self,loss_weight,beta=1.0, alpha=0.5, gamma=1.5,reduction=\"none\"):\n super(GatherBalancedL1Loss,self).__init__()\n self.beta=beta\n self.alpha=alpha\n self.gamma=gamma\n self.weight=loss_weight\n self.reduction=reduction\n assert reduction==\"none\",\"only none reduction is support!\"\n\n def forward(self,output,mask,index,target):\n pred = gather_feature(output, index, use_transform=True) # (-1,C)\n mask = mask.unsqueeze(dim=2).expand_as(pred).float()\n pred=pred*mask\n target=target*mask\n\n assert pred.size() == target.size() and target.numel() > 0\n loss=balanced_l1_loss(pred,target,beta=self.beta,alpha=self.alpha,gamma=self.gamma,reduction=self.reduction)\n loss = loss.sum() / (mask.sum() + 1e-4)*self.weight\n return loss\n\[email protected]_module()\nclass BalancedL1LossV2(nn.Module):\n def __init__(self,loss_weight,beta=1.0, alpha=0.5, gamma=1.5,reduction=\"none\"):\n super(BalancedL1LossV2,self).__init__()\n self.beta=beta\n self.alpha=alpha\n self.gamma=gamma\n self.weight=loss_weight\n self.reduction=reduction\n assert reduction==\"none\",\"only none reduction is support!\"\n\n def forward(self,output,target,mask):\n\n assert output.size() == target.size()\n mask = mask.unsqueeze(dim=1).expand_as(output).float()\n loss=balanced_l1_loss(output,target,beta=self.beta,alpha=self.alpha,gamma=self.gamma,reduction=self.reduction)\n loss=loss*mask\n loss = loss.sum() / (mask.sum() + 1e-4)*self.weight\n return loss\n\[email protected]_module()\nclass GatherL1Loss(nn.Module):\n def __init__(self,loss_weight,reduction=\"none\"):\n super(GatherL1Loss,self).__init__()\n self.weight=loss_weight\n self.reduction=reduction\n assert reduction==\"none\",\"only none reduction is support!\"\n def forward(self,output,mask,index,target):\n pred = gather_feature(output, index, use_transform=True) # (-1,C)\n mask = mask.unsqueeze(dim=2).expand_as(pred).float()\n pred=pred*mask\n target=target*mask\n assert pred.size() == target.size() and target.numel() > 0\n loss = F.l1_loss(pred * mask, target * mask, reduction=self.reduction)\n loss = loss / (mask.sum() + 1e-4)*self.weight\n return loss\n\[email protected]_module()\nclass GatherBinResLoss(nn.Module):\n def __init__(self,loss_weight,num_dir_bins=12,reduction=\"none\"):\n super(GatherBinResLoss,self).__init__()\n self.weight=loss_weight\n self.reduction=reduction\n self.num_dir_bins=num_dir_bins\n\n def dir_bin_res_loss(self,dir_preds,mask,index,gt_dir):\n preds = gather_feature(dir_preds, index, use_transform=True) # (B,-1,C)\n\n pred_bin=preds[...,:self.num_dir_bins]\n pred_reg=preds[...,self.num_dir_bins:]\n\n gt_bin=gt_dir[...,0]\n gt_bin=gt_bin.long()\n gt_reg=gt_dir[...,1]\n mask = mask.float()\n ry_bin_onehot = gt_bin.new_zeros(gt_bin.size(0),gt_bin.size(1),self.num_dir_bins)\n ry_bin_onehot.scatter_(2, gt_bin.unsqueeze(-1), 1)\n loss_ry_bin = F.cross_entropy(pred_bin.view(-1,pred_bin.size(-1)),\n gt_bin.view(-1),reduction='none')\n loss_ry_res = F.smooth_l1_loss((pred_reg *ry_bin_onehot).sum(dim=-1),\n gt_reg,reduction='none')\n loss_ry_res = (loss_ry_res * mask).sum() / (mask.sum() + 1e-4)\n loss_ry_bin = (loss_ry_bin * mask.reshape(-1)).sum() / (mask.reshape(-1).sum() + 1e-4)\n return loss_ry_bin+loss_ry_res\n\n\n def forward(self,dir_preds,mask,index,gt_dir):\n loss=self.dir_bin_res_loss(dir_preds,mask,index,gt_dir)\n return loss*self.weight\n\n\[email protected]_module()\nclass BinResLoss(nn.Module):\n def __init__(self,loss_weight,num_rad_bin=12,reduction=\"none\"):\n super(BinResLoss,self).__init__()\n self.weight=loss_weight\n self.reduction=reduction\n self.num_rad_bin=num_rad_bin\n\n def dir_bin_res_loss(self,dir_preds,gt_dir,mask):\n\n\n pred_bin=dir_preds[:,:self.num_rad_bin,:,:]\n pred_reg=dir_preds[:,self.num_rad_bin:,:,:]\n\n gt_bin=gt_dir[:,0:1,:,:]\n gt_bin=gt_bin.long()\n gt_reg=gt_dir[:,1:2,:,:]\n # mask = mask.unsqueeze(1).expand_as(pred_bin).float()\n ry_bin_onehot = torch.cuda.FloatTensor(pred_bin.size(0),pred_bin.size(1),pred_bin.size(2),pred_bin.size(3)).zero_()\n ry_bin_onehot.scatter_(1, gt_bin, 1)\n loss_ry_bin = F.cross_entropy(pred_bin,\n gt_bin.squeeze(1),reduction='none')\n loss_ry_res = F.smooth_l1_loss((pred_reg * ry_bin_onehot).sum(dim=1),\n gt_reg.squeeze(1),reduction='none')\n loss_ry_res = (loss_ry_res*mask).sum() / (mask.sum() + 1e-4)\n loss_ry_bin = (loss_ry_bin*mask).sum() / (mask.sum() + 1e-4)\n return loss_ry_bin+loss_ry_res\n\n\n def forward(self,dir_preds,gt_dir,mask):\n loss=self.dir_bin_res_loss(dir_preds,gt_dir,mask)\n return loss*self.weight\n\[email protected]_module()\nclass Boxes3dDecodeLoss(nn.Module):\n def __init__(self,loss_weight,box_coder=None,beta=1.0, alpha=0.5, gamma=1.5):\n super(Boxes3dDecodeLoss,self).__init__()\n self.beta = beta\n self.alpha = alpha\n self.gamma = gamma\n self.weight = loss_weight\n self.box_coder = build_bbox_coder(box_coder)\n\n def forward(self,pred_dict, mask, index, target,gt_dir=None):\n #\n\n # fmap=example['score_map']\n # print(\"fmap shape is \",fmap.shape, fmap.dtype,fmap.device)\n voxel_size = self.box_coder.voxel_size\n pc_range = self.box_coder.pc_range\n fmap = pred_dict['center_pred']\n batch,channels,height,width=fmap.shape\n dim_pred = pred_dict['dim_pred']\n dim_pred = gather_feature(dim_pred, index, use_transform=True)\n xy_pred = pred_dict['xy_pred']\n xy_pred = gather_feature(xy_pred, index, use_transform=True)\n z_pred = pred_dict['z_pred']\n z_pred = gather_feature(z_pred, index, use_transform=True)\n dir_pred = pred_dict['dir_pred']\n dir_pred = gather_feature(dir_pred, index, use_transform=True)\n\n if self.box_coder.num_dir_bins<=0:\n dir_pred=torch.atan2(dir_pred[:, :, 0:1], dir_pred[:, :, 1:])\n else:\n dir_bin =gt_dir[...,0:1].long()\n dir_res= torch.gather(dir_pred[:, :, self.box_coder.num_dir_bins:], dim=-1,\n index=dir_bin)\n dir_pred=self.box_coder.class2angle(dir_bin,dir_res)\n\n ys = (index / width).int().float().unsqueeze(-1)\n xs = (index % width).int().float().unsqueeze(-1)\n xs = xs + xy_pred[:, :, 0:1]\n ys = ys + xy_pred[:, :, 1:2]\n xs = xs * self.box_coder.downsample_ratio * voxel_size[0] + pc_range[0]\n ys = ys * self.box_coder.downsample_ratio * voxel_size[1] + pc_range[1]\n\n boxes_pred=torch.cat([xs,ys,z_pred,dim_pred,dir_pred],dim=-1).reshape(-1,7)\n boxes_pred_instances=LiDARInstance3DBoxes(boxes_pred,origin=(0.5,0.5,0))\n corners_pred=boxes_pred_instances.corners.reshape(batch,-1,8,3)\n boxes_gt=target.reshape(-1,7)\n boxes_gt_instances=LiDARInstance3DBoxes(boxes_gt,origin=(0.5,0.5,0))\n corners_gt = boxes_gt_instances.corners.reshape(batch,-1,8,3)\n if self.box_coder.num_dir_bins<=0:\n boxes_gt_flip=boxes_gt.clone()\n boxes_gt_flip[:,6]+=np.pi\n boxes_gt_flip_instances=LiDARInstance3DBoxes(boxes_gt_flip,origin=(0.5,0.5,0))\n corners_gt_flip=boxes_gt_flip_instances.corners.reshape(batch,-1,8,3)\n diff= torch.min(torch.abs(corners_pred - corners_gt),torch.abs(corners_pred-corners_gt_flip))\n b = np.e ** (self.gamma / self.alpha) - 1\n loss = torch.where(\n diff < self.beta,\n self.alpha / b * (b * diff + 1) * torch.log(b * diff / self.beta + 1) - self.alpha * diff,\n self.gamma * diff + self.gamma / b - self.alpha * self.beta)\n else:\n loss = balanced_l1_loss(corners_pred, corners_gt, beta=self.beta, alpha=self.alpha, gamma=self.gamma)\n mask = mask.unsqueeze(-1).unsqueeze(-1).expand_as(corners_gt).float()\n loss=loss*mask\n loss = loss.sum() / (mask.sum() + 1e-4)*self.weight\n return loss\n\ndef modified_focal_loss(pred, gt,reduction=\"sum\"):\n \"\"\"\n focal loss copied from CenterNet, modified version focal loss\n change log: numeric stable version implementation\n \"\"\"\n pos_inds = gt.eq(1).float()\n neg_inds = gt.lt(1).float()\n\n neg_weights = torch.pow(1 - gt, 4)\n # clamp min value is set to 1e-12 to maintain the numerical stability\n pred = torch.clamp(pred, 1e-12)\n\n pos_loss = -torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds\n neg_loss = -torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds\n\n if reduction==\"none\":\n loss=pos_loss+neg_loss\n elif reduction==\"sum\":\n loss=pos_loss.sum()+neg_loss.sum()\n\n elif reduction==\"mean\":\n num_pos = pos_inds.float().sum()\n if num_pos == 0:\n loss = neg_loss.sum()\n else:\n loss = (pos_loss.sum() + neg_loss.sum()) / num_pos\n else:\n raise NotImplementedError\n return loss\n\n\ndef balanced_l1_loss(pred,target, beta=1.0, alpha=0.5, gamma=1.5,reduction=\"none\"):\n\n assert beta > 0\n assert pred.size() == target.size() and target.numel() > 0\n\n diff = torch.abs(pred - target)\n b = np.e ** (gamma / alpha) - 1\n loss = torch.where(\n diff < beta,\n alpha / b * (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,\n gamma * diff + gamma / b - alpha * beta,)\n if reduction==\"none\":\n loss=loss\n elif reduction==\"sum\":\n loss=loss.sum()\n elif reduction==\"mean\":\n loss=loss.mean()\n else:\n raise NotImplementedError\n return loss\n\ndef smooth_l1_loss(pred, target, beta=1.0, reduction='mean'):\n assert beta > 0\n assert pred.size() == target.size() and target.numel() > 0\n diff = torch.abs(pred - target)\n loss = torch.where(diff < beta, 0.5 * diff * diff / beta,\n diff - 0.5 * beta)\n reduction_enum = F._Reduction.get_enum(reduction)\n # none: 0, mean:1, sum: 2\n if reduction_enum == 0:\n return loss\n elif reduction_enum == 1:\n return loss.sum() / pred.numel()\n elif reduction_enum == 2:\n return loss.sum()\n\n\ndef weighted_smoothl1(pred, target, weight, beta=1.0, avg_factor=None):\n if avg_factor is None:\n avg_factor = torch.sum(weight > 0).float().item() / 4 + 1e-6\n loss = smooth_l1_loss(pred, target, beta, reduction='none')\n return torch.sum(loss * weight)[None] / avg_factor\n\n\ndef sigmoid_focal_loss(pred,\n target,\n weight,\n gamma=2.0,\n alpha=0.25,\n reduction='mean'):\n pred_sigmoid = pred.sigmoid()\n target = target.type_as(pred)\n pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\n weight = (alpha * target + (1 - alpha) * (1 - target)) * weight\n weight = weight * pt.pow(gamma)\n loss = F.binary_cross_entropy_with_logits(\n pred, target, reduction='none') * weight\n reduction_enum = F._Reduction.get_enum(reduction)\n # none: 0, mean:1, sum: 2\n if reduction_enum == 0:\n return loss\n elif reduction_enum == 1:\n return loss.mean()\n elif reduction_enum == 2:\n return loss.sum()\n\n\ndef weighted_sigmoid_focal_loss(pred,\n target,\n weight,\n gamma=2.0,\n alpha=0.25,\n avg_factor=None,\n num_classes=80):\n if avg_factor is None:\n avg_factor = torch.sum(weight > 0).float().item() / num_classes + 1e-6\n return sigmoid_focal_loss(\n pred, target, weight, gamma=gamma, alpha=alpha,\n reduction='sum')[None] / avg_factor\n\n\n", "id": "1482212", "language": "Python", "matching_score": 2.372098445892334, "max_stars_count": 91, "path": "mmdet3d/models/losses/center_loss.py" }, { "content": "# dataset settings\n_base_ = ['./_base_/schedules/cyclic_40e.py', './_base_/default_runtime.py']\ndataset_type = 'KittiDataset'\ndata_root = '/mmdetection3d/data/kitti/'\nclass_names = ['Car']\ndpoint_cloud_range = [0, -40, -3, 70.4, 40, 1]\nvoxel_size = [0.05, 0.05, 0.1]\nnum_class = 1\ncheckpoint_config = dict(interval=2)\nevaluation = dict(interval=5)\nlr = 0.000225\noptimizer = dict(type='AdamW', lr=lr, betas=(0.95, 0.99), weight_decay=0.01)\nlr_config = dict(\n policy='cyclic',\n target_ratio=(10, 1e-4),\n cyclic_times=1,\n step_ratio_up=0.4,\n)\ntotal_epochs = 40\ninput_modality = dict(use_lidar=True, use_camera=False)\n\ndb_sampler = dict(\n data_root=data_root,\n info_path=data_root + 'kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n classes=class_names,\n sample_groups=dict(Car=15))\n\nfile_client_args = dict(backend='disk')\n\ntrain_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=file_client_args),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=True,\n with_label_3d=True,\n file_client_args=file_client_args),\n dict(type='ObjectSample', db_sampler=db_sampler),\n dict(\n type='ObjectNoise',\n num_try=100,\n translation_std=[1.0, 1.0, 0.5],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.78539816, 0.78539816]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05]),\n dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),\n dict(type='PointShuffle'),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n]\ntest_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=file_client_args),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1., 1.],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter', point_cloud_range=point_cloud_range),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n]\n\ndata = dict(\n samples_per_gpu=6,\n workers_per_gpu=4,\n train=dict(\n type='RepeatDataset',\n times=1,\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_train.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=train_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=False,\n box_type_3d='LiDAR')),\n val=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True,\n box_type_3d='LiDAR'),\n test=dict(\n type=dataset_type,\n data_root=data_root,\n ann_file=data_root + 'kitti_infos_test.pkl',\n split='testing',\n pts_prefix='velodyne_reduced',\n pipeline=test_pipeline,\n modality=input_modality,\n classes=class_names,\n test_mode=True,\n box_type_3d='LiDAR'))\n\nmodel = dict(\n type='CenterNet3D',\n voxel_layer=dict(\n max_num_points=5,\n point_cloud_range=point_cloud_range,\n voxel_size=voxel_size,\n max_voxels=(16000, 40000)),\n voxel_encoder=dict(type='HardSimpleVFE'),\n middle_encoder=dict(\n type='SparseEncoderV2',\n in_channels=4,\n sparse_shape=[40, 1600, 1408],\n out_channels=320),\n backbone=dict(\n type='SECONDFPNDCN',\n in_channels=128,\n layer_nums=[3],\n layer_strides=[1],\n num_filters=[128],\n upsample_strides=[2],\n out_channels=[128],\n ),\n bbox_head=dict(\n type='Center3DHead',\n num_classes=1,\n in_channels=128,\n feat_channels=128,\n bbox_coder=dict(\n type='Center3DBoxCoder',\n num_class=num_class,\n voxel_size=voxel_size,\n pc_range=point_cloud_range,\n num_dir_bins=0,\n downsample_ratio=4.0,\n min_overlap=0.01,\n keypoint_sensitive=True,\n ),\n loss_cls=dict(type='MSELoss', loss_weight=1.0),\n loss_xy=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_z=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_dim=dict(type='GatherBalancedL1Loss', loss_weight=2.0),\n loss_dir=dict(type='GatherBalancedL1Loss', loss_weight=0.5),\n # loss_decode=dict(type='Boxes3dDecodeLoss', loss_weight=0.5),\n bias_cls=-7.94,\n loss_corner=dict(type='MSELoss', loss_weight=1.0),\n ))\n# model training and testing settings\ntrain_cfg = dict()\ntest_cfg = dict(score_thr=0.10, )\nfind_unused_parameters = True\n", "id": "7089060", "language": "Python", "matching_score": 12.106255531311035, "max_stars_count": 0, "path": "configs/centernet3d/centernet3d.py" }, { "content": "lr = 0.000225\noptimizer = dict(\n type='AdamW', lr=0.000225, betas=(0.95, 0.99), weight_decay=0.01)\noptimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))\nlr_config = dict(\n policy='cyclic',\n target_ratio=(10, 0.0001),\n cyclic_times=1,\n step_ratio_up=0.4)\nmomentum_config = dict(\n policy='cyclic',\n target_ratio=(0.8947368421052632, 1),\n cyclic_times=1,\n step_ratio_up=0.4)\nrunner = dict(type='EpochBasedRunner', max_epochs=1)\ncheckpoint_config = dict(interval=2)\nlog_config = dict(\n interval=50,\n hooks=[dict(type='TextLoggerHook'),\n dict(type='TensorboardLoggerHook')])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nwork_dir = './work_dirs/centernet3d'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\ndataset_type = 'KittiDataset'\ndata_root = '/mmdetection3d/data/kitti/'\nclass_names = ['Car']\npoint_cloud_range = [0, -40, -3, 70.4, 40, 1]\nvoxel_size = [0.05, 0.05, 0.1]\nnum_class = 1\nevaluation = dict(interval=1)\ntotal_epochs = 1\ninput_modality = dict(use_lidar=True, use_camera=False)\ndb_sampler = dict(\n data_root='/mmdetection3d/data/kitti/',\n info_path='/mmdetection3d/data/kitti/kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n classes=['Car'],\n sample_groups=dict(Car=15))\nfile_client_args = dict(backend='disk')\ntrain_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=True,\n with_label_3d=True,\n file_client_args=dict(backend='disk')),\n dict(\n type='ObjectSample',\n db_sampler=dict(\n data_root='/mmdetection3d/data/kitti/',\n info_path='/mmdetection3d/data/kitti/kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(\n filter_by_difficulty=[-1], filter_by_min_points=dict(Car=5)),\n classes=['Car'],\n sample_groups=dict(Car=15))),\n dict(\n type='ObjectNoise',\n num_try=100,\n translation_std=[1.0, 1.0, 0.5],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.78539816, 0.78539816]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05]),\n dict(\n type='PointsRangeFilter', point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='ObjectRangeFilter', point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(type='PointShuffle'),\n dict(type='DefaultFormatBundle3D', class_names=['Car']),\n dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n]\ntest_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1.0, 1.0],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=['Car'],\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n]\ndata = dict(\n samples_per_gpu=6,\n workers_per_gpu=4,\n train=dict(\n type='RepeatDataset',\n times=1,\n dataset=dict(\n type='KittiDataset',\n data_root='/mmdetection3d/data/kitti/',\n ann_file='/mmdetection3d/data/kitti/kitti_infos_train.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=[\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='LoadAnnotations3D',\n with_bbox_3d=True,\n with_label_3d=True,\n file_client_args=dict(backend='disk')),\n dict(\n type='ObjectSample',\n db_sampler=dict(\n data_root='/mmdetection3d/data/kitti/',\n info_path=\n '/mmdetection3d/data/kitti/kitti_dbinfos_train.pkl',\n rate=1.0,\n prepare=dict(\n filter_by_difficulty=[-1],\n filter_by_min_points=dict(Car=5)),\n classes=['Car'],\n sample_groups=dict(Car=15))),\n dict(\n type='ObjectNoise',\n num_try=100,\n translation_std=[1.0, 1.0, 0.5],\n global_rot_range=[0.0, 0.0],\n rot_range=[-0.78539816, 0.78539816]),\n dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05]),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='ObjectRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(type='PointShuffle'),\n dict(type='DefaultFormatBundle3D', class_names=['Car']),\n dict(\n type='Collect3D',\n keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])\n ],\n modality=dict(use_lidar=True, use_camera=False),\n classes=['Car'],\n test_mode=False,\n box_type_3d='LiDAR')),\n val=dict(\n type='KittiDataset',\n data_root='/mmdetection3d/data/kitti/',\n ann_file='/mmdetection3d/data/kitti/kitti_infos_val.pkl',\n split='training',\n pts_prefix='velodyne_reduced',\n pipeline=[\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1.0, 1.0],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=['Car'],\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n ],\n modality=dict(use_lidar=True, use_camera=False),\n classes=['Car'],\n test_mode=True,\n box_type_3d='LiDAR'),\n test=dict(\n type='KittiDataset',\n data_root='/mmdetection3d/data/kitti/',\n ann_file='/mmdetection3d/data/kitti/kitti_infos_test.pkl',\n split='testing',\n pts_prefix='velodyne_reduced',\n pipeline=[\n dict(\n type='LoadPointsFromFile',\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n file_client_args=dict(backend='disk')),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 800),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1.0, 1.0],\n translation_std=[0, 0, 0]),\n dict(type='RandomFlip3D'),\n dict(\n type='PointsRangeFilter',\n point_cloud_range=[0, -40, -3, 70.4, 40, 1]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=['Car'],\n with_label=False),\n dict(type='Collect3D', keys=['points'])\n ])\n ],\n modality=dict(use_lidar=True, use_camera=False),\n classes=['Car'],\n test_mode=True,\n box_type_3d='LiDAR'))\nmodel = dict(\n type='CenterNet3D',\n voxel_layer=dict(\n max_num_points=5,\n point_cloud_range=[0, -40, -3, 70.4, 40, 1],\n voxel_size=[0.05, 0.05, 0.1],\n max_voxels=(16000, 40000)),\n voxel_encoder=dict(type='HardSimpleVFE'),\n middle_encoder=dict(\n type='SparseEncoderV2',\n in_channels=4,\n sparse_shape=[40, 1600, 1408],\n out_channels=320),\n backbone=dict(\n type='SECONDFPNDCN',\n in_channels=128,\n layer_nums=[3],\n layer_strides=[1],\n num_filters=[128],\n upsample_strides=[2],\n out_channels=[128]),\n bbox_head=dict(\n type='Center3DHead',\n num_classes=1,\n in_channels=128,\n feat_channels=128,\n bbox_coder=dict(\n type='Center3DBoxCoder',\n num_class=1,\n voxel_size=[0.05, 0.05, 0.1],\n pc_range=[0, -40, -3, 70.4, 40, 1],\n num_dir_bins=0,\n downsample_ratio=4.0,\n min_overlap=0.01,\n keypoint_sensitive=True),\n loss_cls=dict(type='MSELoss', loss_weight=1.0),\n loss_xy=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_z=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_dim=dict(type='GatherBalancedL1Loss', loss_weight=2.0),\n loss_dir=dict(type='GatherBalancedL1Loss', loss_weight=0.5),\n bias_cls=-7.94,\n loss_corner=dict(type='MSELoss', loss_weight=1.0)),\n train_cfg=dict(),\n test_cfg=dict(score_thr=0.1))\n# train_cfg = dict()\n# test_cfg = dict(score_thr=0.1)\nfind_unused_parameters = True\ngpu_ids = range(0, 1)\n", "id": "2164148", "language": "Python", "matching_score": 8.943546295166016, "max_stars_count": 0, "path": "configs/centernet3d/centernet3d_debug.py" }, { "content": "_base_ = [\n '../_base_/datasets/sunrgbd-3d-10class.py',\n '../_base_/schedules/schedule_3x.py', '../_base_/default_runtime.py',\n '../_base_/models/imvotenet_image_base_yolo.py'\n]\n\nclass_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser',\n 'night_stand', 'bookshelf', 'bathtub')\n\nnum_class = len(class_names)\npoint_cloud_range = [-7, -8, -2, 7.08, 8, 2] # xyzxyz to voxilize\nvoxel_size = [0.01, 0.01, 0.1] # For Loss and Gt calculation\n\n# use caffe img_norm\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n\nmodel = dict(\n type='MergeNet',\n voxel_layer=dict(\n max_num_points=5,\n point_cloud_range=point_cloud_range,\n voxel_size=voxel_size,\n max_voxels=(\n 16000, # if training, max_voxels[0],\n 20000)), # else max_voxels[1]\n voxel_encoder=dict(type='HardSimpleVFE'),\n backbone=dict(\n type='SECONDFPNDCN',\n in_channels=128,\n layer_nums=[3],\n layer_strides=[1],\n num_filters=[128],\n upsample_strides=[2],\n out_channels=[128]),\n pts_backbone=dict(\n type='PointNet2SASSG',\n in_channels=4,\n num_points=(2048, 1024, 512, 256), # points for SAMPLER.\n radius=(0.2, 0.4, 0.8, 1.2),\n num_samples=(64, 32, 16, 16),\n sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),\n (128, 128, 256)),\n fp_channels=((256, 256), (256, 256)),\n norm_cfg=dict(type='BN2d'),\n sa_cfg=dict(\n type='PointSAModule',\n pool_mod='max',\n use_xyz=True,\n normalize_xyz=True)),\n middle_encoder=dict(\n type='SparseEncoderV2',\n in_channels=4,\n sparse_shape=[40, 1600, 1408],\n out_channels=320),\n bbox_head=dict(\n type='Center3DHead',\n num_classes=num_class,\n in_channels=128,\n feat_channels=128,\n bbox_coder=dict(\n type='Center3DBoxCoder',\n num_class=num_class,\n voxel_size=voxel_size,\n pc_range=point_cloud_range,\n num_dir_bins=0,\n downsample_ratio=4.0,\n min_overlap=0.01,\n keypoint_sensitive=True,\n ),\n loss_cls=dict(type='MSELoss', loss_weight=1.0),\n loss_xy=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_z=dict(type='GatherBalancedL1Loss', loss_weight=1.0),\n loss_dim=dict(type='GatherBalancedL1Loss', loss_weight=2.0),\n loss_dir=dict(type='GatherBalancedL1Loss', loss_weight=0.5),\n # loss_decode=dict(type='Boxes3dDecodeLoss', loss_weight=0.5),\n bias_cls=-7.94,\n loss_corner=dict(type='MSELoss', loss_weight=1.0),\n ),\n train_cfg=dict(\n pts=dict(\n pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote')),\n test_cfg=dict(\n img_rcnn=dict(score_thr=0.1),\n pts=dict(\n sample_mod='seed',\n nms_thr=0.25,\n score_thr=0.05,\n per_class_proposal=True)))\n\ntrain_pipeline = [\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=True,\n load_dim=6,\n use_dim=[0, 1, 2]),\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations3D'),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='Resize', img_scale=(1333, 600), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.0),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(\n type='RandomFlip3D',\n sync_2d=False,\n flip_ratio_bev_horizontal=0.5,\n ),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[-0.523599, 0.523599],\n scale_ratio_range=[0.85, 1.15],\n shift_height=True),\n dict(type='PointSample', num_points=20000),\n dict(type='DefaultFormatBundle3D', class_names=class_names),\n dict(\n type='Collect3D',\n keys=[\n 'img', 'gt_bboxes', 'gt_labels', 'points', 'gt_bboxes_3d',\n 'gt_labels_3d'\n ])\n]\n\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=True,\n load_dim=6,\n use_dim=[0, 1, 2]),\n dict(\n type='MultiScaleFlipAug3D',\n img_scale=(1333, 600),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.0),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(\n type='GlobalRotScaleTrans',\n rot_range=[0, 0],\n scale_ratio_range=[1., 1.],\n translation_std=[0, 0, 0]),\n dict(\n type='RandomFlip3D',\n sync_2d=False,\n flip_ratio_bev_horizontal=0.5,\n ),\n dict(type='PointSample', num_points=20000),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['img', 'points'])\n ]),\n]\n\neval_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='LoadPointsFromFile',\n coord_type='DEPTH',\n shift_height=False,\n load_dim=6,\n use_dim=[0, 1, 2]),\n dict(\n type='DefaultFormatBundle3D',\n class_names=class_names,\n with_label=False),\n dict(type='Collect3D', keys=['img', 'points'])\n]\n\ndata = dict(\n samples_per_gpu=1,\n workers_per_gpu=4,\n train=dict(dataset=dict(pipeline=train_pipeline, filter_empty_gt=True)),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\nevaluation = dict(pipeline=eval_pipeline)\nfind_unused_parameters = True\ngpu_ids = range(0, 2)\n", "id": "10325651", "language": "Python", "matching_score": 6.16673469543457, "max_stars_count": 0, "path": "configs/mergenet/mergenet_voxel_voxelencoder_middleencoder_1x1_36epochs_sunrgbd.py" }, { "content": "_base_ = [\n '../_base_/datasets/sunrgbd-3d-10class.py',\n '../_base_/default_runtime.py',\n '../_base_/models/base_yolo.py'\n]\n# use caffe img_norm\nimg_norm_cfg = dict(\n mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)\n\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Expand',\n mean=img_norm_cfg['mean'],\n to_rgb=img_norm_cfg['to_rgb'],\n ratio_range=(1, 2)),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=(320, 320), keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\n\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(320, 320),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img'])\n ])\n]\n\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(times=1, dataset=dict(pipeline=train_pipeline)),\n val=dict(pipeline=test_pipeline),\n test=dict(pipeline=test_pipeline))\n\n# optimizer\noptimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005)\noptimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))\n# learning policy\nlr_config = dict(\n policy='step',\n warmup='linear',\n warmup_iters=4000,\n warmup_ratio=0.0001,\n step=[24, 28])\n\nrunner = dict(type='EpochBasedRunner', max_epochs=30)\nevaluation = dict(interval=1, metric=['bbox'])\nfind_unused_parameters = True\n", "id": "12281401", "language": "Python", "matching_score": 1.6282938718795776, "max_stars_count": 0, "path": "configs/imvotenet/yolo_imvote.py" }, { "content": "_base_ = [\n \"../_base_/datasets/coco_instance.py\", \"../_base_/schedules/cyclic_20e.py\",\n \"../_base_/default_runtime.py\", \"../_base_/models/nanodet.py\"\n]\n\n", "id": "10982928", "language": "Python", "matching_score": 0.07621639966964722, "max_stars_count": 0, "path": "configs/nanodet/nanodet_semantic_efficientnet.py" }, { "content": "# Copyright 2021 RangiLyu.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cv2\nimport matplotlib as mpl\nimport matplotlib.figure as mplfigure\nimport numpy as np\nimport pycocotools.mask as mask_util\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\n\n_SMALL_OBJECT_AREA_THRESH = 1000\n\n\ndef overlay_bbox_cv(img, dets, class_names, score_thresh):\n all_box = []\n for label in dets:\n for bbox in dets[label]:\n score = bbox[-1]\n if score > score_thresh:\n x0, y0, x1, y1 = [int(i) for i in bbox[:4]]\n all_box.append([label, x0, y0, x1, y1, score])\n all_box.sort(key=lambda v: v[5])\n for box in all_box:\n label, x0, y0, x1, y1, score = box\n # color = self.cmap(i)[:3]\n color = (_COLORS[label] * 255).astype(np.uint8).tolist()\n text = \"{}:{:.1f}%\".format(class_names[label], score * 100)\n txt_color = (0, 0, 0) if np.mean(_COLORS[label]) > 0.5 else (255, 255, 255)\n font = cv2.FONT_HERSHEY_SIMPLEX\n txt_size = cv2.getTextSize(text, font, 0.5, 2)[0]\n cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)\n\n cv2.rectangle(\n img,\n (x0, y0 - txt_size[1] - 1),\n (x0 + txt_size[0] + txt_size[1], y0 - 1),\n color,\n -1,\n )\n cv2.putText(img, text, (x0, y0 - 1), font, 0.5, txt_color, thickness=1)\n return img, all_box\n\n\ndef rand_cmap(\n nlabels,\n type=\"bright\",\n first_color_black=False,\n last_color_black=False,\n verbose=False,\n):\n \"\"\"\n Creates a random colormap to be used together with matplotlib.\n Useful for segmentation tasks\n :param nlabels: Number of labels (size of colormap)\n :param type: 'bright' for strong colors, 'soft' for pastel colors\n :param first_color_black: Option to use first color as black, True or False\n :param last_color_black: Option to use last color as black, True or False\n :param verbose: Prints the number of labels and shows the colormap. True or False\n :return: colormap for matplotlib\n \"\"\"\n import colorsys\n\n import numpy as np\n from matplotlib.colors import LinearSegmentedColormap\n\n if type not in (\"bright\", \"soft\"):\n print('Please choose \"bright\" or \"soft\" for type')\n return\n\n if verbose:\n print(\"Number of labels: \" + str(nlabels))\n\n # Generate color map for bright colors, based on hsv\n if type == \"bright\":\n randHSVcolors = [\n (\n np.random.uniform(low=0.0, high=1),\n np.random.uniform(low=0.2, high=1),\n np.random.uniform(low=0.9, high=1),\n )\n for i in range(nlabels)\n ]\n\n # Convert HSV list to RGB\n randRGBcolors = []\n for HSVcolor in randHSVcolors:\n randRGBcolors.append(\n colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])\n )\n\n if first_color_black:\n randRGBcolors[0] = [0, 0, 0]\n\n if last_color_black:\n randRGBcolors[-1] = [0, 0, 0]\n\n random_colormap = LinearSegmentedColormap.from_list(\n \"new_map\", randRGBcolors, N=nlabels\n )\n\n # Generate soft pastel colors, by limiting the RGB spectrum\n if type == \"soft\":\n low = 0.6\n high = 0.95\n randRGBcolors = [\n (\n np.random.uniform(low=low, high=high),\n np.random.uniform(low=low, high=high),\n np.random.uniform(low=low, high=high),\n )\n for i in range(nlabels)\n ]\n\n if first_color_black:\n randRGBcolors[0] = [0, 0, 0]\n\n if last_color_black:\n randRGBcolors[-1] = [0, 0, 0]\n random_colormap = LinearSegmentedColormap.from_list(\n \"new_map\", randRGBcolors, N=nlabels\n )\n\n return random_colormap\n\n\nclass VisImage:\n \"\"\"\n Visualize detection results.\n\n Modified from Detectron2\n https://github.com/facebookresearch/detectron2\n \"\"\"\n\n def __init__(self, img, scale=1.0):\n self.img = img\n self.scale = scale\n self.width, self.height = img.shape[1], img.shape[0]\n self._setup_figure(img)\n\n def _setup_figure(self, img):\n \"\"\"\n Args:\n Same as in :meth:`__init__()`.\n\n Returns:\n fig (matplotlib.pyplot.figure): top level container for all the\n image plot elements.\n ax (matplotlib.pyplot.Axes): contains figure elements and sets\n the coordinate system.\n \"\"\"\n fig = mplfigure.Figure(frameon=False)\n self.dpi = fig.get_dpi()\n # add a small 1e-2 to avoid precision lost due to matplotlib's truncation\n # (https://github.com/matplotlib/matplotlib/issues/15363)\n fig.set_size_inches(\n (self.width * self.scale + 1e-2) / self.dpi,\n (self.height * self.scale + 1e-2) / self.dpi,\n )\n self.canvas = FigureCanvasAgg(fig)\n # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)\n ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])\n ax.axis(\"off\")\n ax.set_xlim(0.0, self.width)\n ax.set_ylim(self.height)\n\n self.fig = fig\n self.ax = ax\n\n def save(self, filepath):\n \"\"\"\n Args:\n filepath (str): a string that contains the absolute path, including\n the file name, where the visualized image will be saved.\n \"\"\"\n if filepath.lower().endswith(\".jpg\") or filepath.lower().endswith(\".png\"):\n # faster than matplotlib's imshow\n cv2.imwrite(filepath, self.get_image()[:, :, ::-1])\n else:\n # support general formats (e.g. pdf)\n self.ax.imshow(self.img, interpolation=\"nearest\")\n self.fig.savefig(filepath)\n\n def get_image(self):\n \"\"\"\n Returns:\n ndarray:\n the visualized image of shape (H, W, 3) (RGB) in uint8 type.\n The shape is scaled w.r.t the input image using the given\n `scale` argument.\n \"\"\"\n canvas = self.canvas\n s, (width, height) = canvas.print_to_buffer()\n if (self.width, self.height) != (width, height):\n img = cv2.resize(self.img, (width, height))\n else:\n img = self.img\n\n # buf = io.BytesIO() # works for cairo backend\n # canvas.print_rgba(buf)\n # width, height = self.width, self.height\n # s = buf.getvalue()\n\n buffer = np.frombuffer(s, dtype=\"uint8\")\n\n # imshow is slow. blend manually (still quite slow)\n img_rgba = buffer.reshape(height, width, 4)\n rgb, alpha = np.split(img_rgba, [3], axis=2)\n\n try:\n import numexpr as ne # fuse them with numexpr\n\n visualized_image = ne.evaluate(\n \"img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)\"\n )\n except ImportError:\n alpha = alpha.astype(\"float32\") / 255.0\n visualized_image = img * (1 - alpha) + rgb * alpha\n\n visualized_image = visualized_image.astype(\"uint8\")\n\n return visualized_image\n\n\nclass Visualizer:\n def __init__(self, img, dets, class_names, socre_thresh):\n self.img = img\n self.dets = dets\n self.class_names = class_names\n self.num_classes = len(self.class_names)\n self.score_thresh = socre_thresh\n self.viz = VisImage(img=self.img)\n self._default_font_size = max(\n np.sqrt(self.viz.height * self.viz.width) // 100, 10\n )\n\n def mask_to_polygon(self, mask, need_binary=True):\n res = cv2.findContours(mask, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n hierarchy = res[-1]\n if hierarchy is None: # empty mask\n return None, None, None\n has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0\n res = res[-2]\n res = [x.flatten() for x in res]\n res = [x for x in res if len(x) >= 6]\n\n p = mask_util.frPyObjects(res, self.viz.height, self.viz.width)\n p = mask_util.merge(p)\n bbox = mask_util.toBbox(p)\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n\n return res, bbox, has_holes\n\n def draw_box(self, box_coord, alpha=0.5, edge_color=\"g\", line_style=\"-\"):\n x0, y0, x1, y1 = box_coord\n width = x1 - x0\n height = y1 - y0\n linewidth = max(self._default_font_size / 6, 1)\n self.viz.ax.add_patch(\n mpl.patches.Rectangle(\n (x0, y0),\n width,\n height,\n fill=False,\n edgecolor=edge_color,\n linewidth=linewidth * self.viz.scale,\n alpha=alpha,\n linestyle=line_style,\n )\n )\n return self.viz\n\n def draw_polycon(self, mask, color, edge_color, alpha=0.5):\n if edge_color is None:\n edge_color = color\n edge_color = mpl.colors.to_rgb(edge_color) + (1,)\n\n polygon = mpl.patches.Polygon(\n mask,\n fill=False,\n # facecolor=mpl.colors.to_rgb(color) + (alpha,),\n edgecolor=edge_color,\n linewidth=max(self._default_font_size // 15 * self.viz.scale, 1),\n )\n self.viz.ax.add_patch(polygon)\n return self.viz\n\n def draw_mask(self, mask, polys, color, edge_color, alpha=0.5):\n if edge_color is None:\n edge_color = color\n edge_color = mpl.colors.to_rgb(edge_color) + (1,)\n color_mask = np.ones((mask.shape[0], mask.shape[1], 3))\n for i in range(3):\n color_mask[:, :, i] = color[i]\n self.viz.ax.imshow(np.dstack((color_mask, mask * alpha)))\n for ploy in polys:\n self.draw_polycon(ploy.reshape(-1, 2), color, edge_color=None, alpha=alpha)\n\n def _jitter(self, color):\n \"\"\"\n Randomly modifies given color to produce a slightly different color than\n the color given.\n\n Args:\n color (tuple[double]): a tuple of 3 elements, containing the RGB\n values of the color picked. The values in the list are in the\n [0.0, 1.0] range.\n\n Returns:\n jittered_color (tuple[double]): a tuple of 3 elements, containing\n the RGB values of the color after being jittered. The values\n in the list are in the [0.0, 1.0] range.\n \"\"\"\n color = mpl.colors.to_rgb(color)\n vec = np.random.rand(3)\n # better to do it in another color space\n vec = vec / np.linalg.norm(vec) * 0.5\n res = np.clip(vec + color, 0, 1)\n return tuple(res)\n\n def overlay_bbox(self, alpha=1.0):\n for label in self.dets:\n for bbox in self.dets[label]:\n x0, y0, x1, y1, score = bbox\n if score >= self.score_thresh:\n # color = self.cmap(i)[:3]\n color = _COLORS[label]\n text = \"{}:{:.1f}%\".format(self.class_names[label], score * 100)\n self.draw_box(bbox[:4], alpha=1.0, edge_color=color, line_style=\"-\")\n text_pos = (x0, y0)\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.viz.scale\n or y1 - y0 < 40 * self.viz.scale\n ):\n if y1 >= self.viz.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.viz.height * self.viz.width)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n\n self.draw_text(\n text,\n text_pos,\n color=\"black\",\n horizontal_alignment=\"left\",\n font_size=font_size,\n )\n out = self.viz.get_image()\n return out\n\n def overlay_masks(self, alpha=0.5):\n ov = self.img.copy()\n im = self.img # .astype(np.float32)\n total_ma = np.zeros([im.shape[0], im.shape[1]])\n total_contours = []\n for i, det in enumerate(self.dets[::-1]):\n score = det[\"score\"]\n if score >= self.score_thresh:\n ma = det[\"mask\"]\n _, ma = cv2.threshold(\n ma, thresh=127, maxval=255, type=cv2.THRESH_BINARY\n )\n fg = (\n im * alpha\n + np.ones(im.shape) * (1 - alpha) * self.cmap(i)[:3] * 255\n )\n ov[ma == 255] = fg[ma == 255]\n total_ma += ma\n contours = cv2.findContours(\n ma.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE\n )[-2:]\n total_contours.append(contours)\n for cnt in total_contours:\n cv2.drawContours(ov, cnt[0], -1, (0.0, 0.0, 0.0), 1)\n ov[total_ma == 0] = im[total_ma == 0]\n return ov\n\n def overlay_instance(self, alpha=0.4):\n for i, det in enumerate(self.dets[::-1]):\n score = det[\"score\"]\n if score >= self.score_thresh:\n label = det[\"label\"]\n binary_mask = det[\"mask\"]\n # color = self.cmap(i)[:3]\n color = _COLORS[label]\n color = self._jitter(color)\n contours, bbox, has_holes = self.mask_to_polygon(binary_mask.copy())\n if not contours:\n continue\n self.draw_mask(\n binary_mask, contours, color, edge_color=None, alpha=alpha\n )\n\n x0, y0, x1, y1 = bbox\n text = \"{}:{:.1f}%\".format(self.class_names[label], score * 100)\n text_pos = np.median(binary_mask.nonzero(), axis=1)[::-1]\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.viz.scale\n or y1 - y0 < 40 * self.viz.scale\n ):\n if y1 >= self.viz.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.viz.height * self.viz.width)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n\n self.draw_text(\n text,\n text_pos,\n color=\"black\",\n horizontal_alignment=\"center\",\n font_size=font_size,\n )\n out = self.viz.get_image()\n return out\n\n def draw_text(\n self,\n text,\n position,\n *,\n font_size=None,\n color=\"g\",\n horizontal_alignment=\"center\",\n rotation=0\n ):\n \"\"\"\n Args:\n text (str): class label\n position (tuple): a tuple of the x and y coordinates to place text on image.\n font_size (int, optional): font of the text. If not provided, a font size\n proportional to the image width is calculated and used.\n color: color of the text. Refer to `matplotlib.colors` for full list\n of formats that are accepted.\n horizontal_alignment (str): see `matplotlib.text.Text`\n rotation: rotation angle in degrees CCW\n\n Returns:\n output (VisImage): image object with text drawn.\n \"\"\"\n if not font_size:\n font_size = self._default_font_size\n\n # since the text background is dark, we don't want the text to be dark\n color = np.maximum(list(mpl.colors.to_rgb(color)), 0.2)\n color[np.argmax(color)] = max(0.8, np.max(color))\n\n x, y = position\n self.viz.ax.text(\n x,\n y,\n text,\n size=font_size * self.viz.scale,\n family=\"sans-serif\",\n bbox={\n \"facecolor\": (0.5, 0.5, 1.0),\n \"alpha\": 0.8,\n \"pad\": 0.7,\n \"edgecolor\": (0.8, 0.8, 1.0),\n },\n verticalalignment=\"top\",\n horizontalalignment=horizontal_alignment,\n color=color,\n zorder=10,\n rotation=rotation,\n )\n return self.viz\n\n\n_COLORS = (\n np.array(\n [\n 0.000,\n 0.447,\n 0.741,\n 0.850,\n 0.325,\n 0.098,\n 0.929,\n 0.694,\n 0.125,\n 0.494,\n 0.184,\n 0.556,\n 0.466,\n 0.674,\n 0.188,\n 0.301,\n 0.745,\n 0.933,\n 0.635,\n 0.078,\n 0.184,\n 0.300,\n 0.300,\n 0.300,\n 0.600,\n 0.600,\n 0.600,\n 1.000,\n 0.000,\n 0.000,\n 1.000,\n 0.500,\n 0.000,\n 0.749,\n 0.749,\n 0.000,\n 0.000,\n 1.000,\n 0.000,\n 0.000,\n 0.000,\n 1.000,\n 0.667,\n 0.000,\n 1.000,\n 0.333,\n 0.333,\n 0.000,\n 0.333,\n 0.667,\n 0.000,\n 0.333,\n 1.000,\n 0.000,\n 0.667,\n 0.333,\n 0.000,\n 0.667,\n 0.667,\n 0.000,\n 0.667,\n 1.000,\n 0.000,\n 1.000,\n 0.333,\n 0.000,\n 1.000,\n 0.667,\n 0.000,\n 1.000,\n 1.000,\n 0.000,\n 0.000,\n 0.333,\n 0.500,\n 0.000,\n 0.667,\n 0.500,\n 0.000,\n 1.000,\n 0.500,\n 0.333,\n 0.000,\n 0.500,\n 0.333,\n 0.333,\n 0.500,\n 0.333,\n 0.667,\n 0.500,\n 0.333,\n 1.000,\n 0.500,\n 0.667,\n 0.000,\n 0.500,\n 0.667,\n 0.333,\n 0.500,\n 0.667,\n 0.667,\n 0.500,\n 0.667,\n 1.000,\n 0.500,\n 1.000,\n 0.000,\n 0.500,\n 1.000,\n 0.333,\n 0.500,\n 1.000,\n 0.667,\n 0.500,\n 1.000,\n 1.000,\n 0.500,\n 0.000,\n 0.333,\n 1.000,\n 0.000,\n 0.667,\n 1.000,\n 0.000,\n 1.000,\n 1.000,\n 0.333,\n 0.000,\n 1.000,\n 0.333,\n 0.333,\n 1.000,\n 0.333,\n 0.667,\n 1.000,\n 0.333,\n 1.000,\n 1.000,\n 0.667,\n 0.000,\n 1.000,\n 0.667,\n 0.333,\n 1.000,\n 0.667,\n 0.667,\n 1.000,\n 0.667,\n 1.000,\n 1.000,\n 1.000,\n 0.000,\n 1.000,\n 1.000,\n 0.333,\n 1.000,\n 1.000,\n 0.667,\n 1.000,\n 0.333,\n 0.000,\n 0.000,\n 0.500,\n 0.000,\n 0.000,\n 0.667,\n 0.000,\n 0.000,\n 0.833,\n 0.000,\n 0.000,\n 1.000,\n 0.000,\n 0.000,\n 0.000,\n 0.167,\n 0.000,\n 0.000,\n 0.333,\n 0.000,\n 0.000,\n 0.500,\n 0.000,\n 0.000,\n 0.667,\n 0.000,\n 0.000,\n 0.833,\n 0.000,\n 0.000,\n 1.000,\n 0.000,\n 0.000,\n 0.000,\n 0.167,\n 0.000,\n 0.000,\n 0.333,\n 0.000,\n 0.000,\n 0.500,\n 0.000,\n 0.000,\n 0.667,\n 0.000,\n 0.000,\n 0.833,\n 0.000,\n 0.000,\n 1.000,\n 0.000,\n 0.000,\n 0.000,\n 0.143,\n 0.143,\n 0.143,\n 0.286,\n 0.286,\n 0.286,\n 0.429,\n 0.429,\n 0.429,\n 0.571,\n 0.571,\n 0.571,\n 0.714,\n 0.714,\n 0.714,\n 0.857,\n 0.857,\n 0.857,\n 0.000,\n 0.447,\n 0.741,\n 0.314,\n 0.717,\n 0.741,\n 0.50,\n 0.5,\n 0,\n ]\n )\n .astype(np.float32)\n .reshape(-1, 3)\n)\n", "id": "453055", "language": "Python", "matching_score": 1.8014978170394897, "max_stars_count": 0, "path": "mmdet3d/models/utils/visualization.py" }, { "content": "import cv2\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ..utils import (\n bbox2distance,\n distance2bbox,\n images_to_levels,\n multi_apply,\n overlay_bbox_cv,\n)\n\nfrom ...datasets.pipelines.warp import warp_boxes\nfrom ..losses.gfocal_loss import DistributionFocalLoss, QualityFocalLoss\nfrom ..losses.iou_loss import GIoULoss, bbox_overlaps\nfrom ..module.conv import ConvModule\nfrom ..module.init_weights import normal_init\nfrom ..module.nms import multiclass_nms\nfrom ..module.scale import Scale\nfrom .assigner.atss_assigner import ATSSAssigner\n\n\ndef reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(\n tensor.true_divide(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor\n\n\nclass Integral(nn.Module):\n \"\"\"A fixed layer for calculating integral result from distribution.\n This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,\n P(y_i) denotes the softmax vector that represents the discrete distribution\n y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}\n Args:\n reg_max (int): The maximal value of the discrete set. Default: 16. You\n may want to reset it according to your new dataset or related\n settings.\n \"\"\"\n\n def __init__(self, reg_max=16):\n super(Integral, self).__init__()\n self.reg_max = reg_max\n self.register_buffer(\"project\",\n torch.linspace(0, self.reg_max, self.reg_max + 1))\n\n def forward(self, x):\n \"\"\"Forward feature from the regression head to get integral result of\n bounding box location.\n Args:\n x (Tensor): Features of the regression head, shape (N, 4*(n+1)),\n n is self.reg_max.\n Returns:\n x (Tensor): Integral result of box locations, i.e., distance\n offsets from the box center in four directions, shape (N, 4).\n \"\"\"\n x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)\n x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)\n return x\n\n\nclass GFLHead(nn.Module):\n \"\"\"Generalized Focal Loss: Learning Qualified and Distributed Bounding\n Boxes for Dense Object Detection.\n\n GFL head structure is similar with ATSS, however GFL uses\n 1) joint representation for classification and localization quality, and\n 2) flexible General distribution for bounding box locations,\n which are supervised by\n Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively\n\n https://arxiv.org/abs/2006.04388\n\n :param num_classes: Number of categories excluding the background category.\n :param loss: Config of all loss functions.\n :param input_channel: Number of channels in the input feature map.\n :param feat_channels: Number of conv layers in cls and reg tower. Default: 4.\n :param stacked_convs: Number of conv layers in cls and reg tower. Default: 4.\n :param octave_base_scale: Scale factor of grid cells.\n :param strides: Down sample strides of all level feature map\n :param conv_cfg: Dictionary to construct and config conv layer. Default: None.\n :param norm_cfg: Dictionary to construct and config norm layer.\n :param reg_max: Max value of integral set :math: `{0, ..., reg_max}`\n in QFL setting. Default: 16.\n :param kwargs:\n \"\"\"\n\n def __init__(self,\n num_classes,\n loss,\n input_channel,\n feat_channels=256,\n stacked_convs=4,\n octave_base_scale=4,\n strides=[8, 16, 32],\n conv_cfg=None,\n norm_cfg=dict(type=\"GN\", num_groups=32, requires_grad=True),\n reg_max=16,\n **kwargs):\n super(GFLHead, self).__init__()\n self.num_classes = num_classes\n self.in_channels = input_channel\n self.feat_channels = feat_channels\n self.stacked_convs = stacked_convs\n self.grid_cell_scale = octave_base_scale\n self.strides = strides\n self.reg_max = reg_max\n\n self.loss_cfg = loss\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.use_sigmoid = self.loss_cfg.loss_qfl.use_sigmoid\n if self.use_sigmoid:\n self.cls_out_channels = num_classes\n else:\n self.cls_out_channels = num_classes + 1\n\n self.assigner = ATSSAssigner(topk=9)\n self.distribution_project = Integral(self.reg_max)\n\n self.loss_qfl = QualityFocalLoss(\n use_sigmoid=self.use_sigmoid,\n beta=self.loss_cfg.loss_qfl.beta,\n loss_weight=self.loss_cfg.loss_qfl.loss_weight,\n )\n self.loss_dfl = DistributionFocalLoss(\n loss_weight=self.loss_cfg.loss_dfl.loss_weight)\n self.loss_bbox = GIoULoss(\n loss_weight=self.loss_cfg.loss_bbox.loss_weight)\n self._init_layers()\n self.init_weights()\n\n def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n ))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n ))\n self.gfl_cls = nn.Conv2d(\n self.feat_channels, self.cls_out_channels, 3, padding=1)\n self.gfl_reg = nn.Conv2d(\n self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n def init_weights(self):\n for m in self.cls_convs:\n normal_init(m.conv, std=0.01)\n for m in self.reg_convs:\n normal_init(m.conv, std=0.01)\n bias_cls = -4.595\n normal_init(self.gfl_cls, std=0.01, bias=bias_cls)\n normal_init(self.gfl_reg, std=0.01)\n\n def forward(self, feats):\n return multi_apply(self.forward_single, feats, self.scales)\n\n def forward_single(self, x, scale):\n cls_feat = x\n reg_feat = x\n for cls_conv in self.cls_convs:\n cls_feat = cls_conv(cls_feat)\n for reg_conv in self.reg_convs:\n reg_feat = reg_conv(reg_feat)\n cls_score = self.gfl_cls(cls_feat)\n bbox_pred = scale(self.gfl_reg(reg_feat)).float()\n return cls_score, bbox_pred\n\n def loss(self, preds, gt_meta):\n cls_scores, bbox_preds = preds\n batch_size = cls_scores[0].shape[0]\n device = cls_scores[0].device\n gt_bboxes = gt_meta[\"gt_bboxes\"]\n gt_labels = gt_meta[\"gt_labels\"]\n gt_bboxes_ignore = None\n\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n\n cls_reg_targets = self.target_assign(\n batch_size,\n featmap_sizes,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n device=device,\n )\n if cls_reg_targets is None:\n return None\n\n (\n grid_cells_list,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n bbox_weights_list,\n num_total_pos,\n num_total_neg,\n ) = cls_reg_targets\n\n num_total_samples = reduce_mean(\n torch.tensor(num_total_pos).to(device)).item()\n num_total_samples = max(num_total_samples, 1.0)\n\n losses_qfl, losses_bbox, losses_dfl, avg_factor = multi_apply(\n self.loss_single,\n grid_cells_list,\n cls_scores,\n bbox_preds,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n self.strides,\n num_total_samples=num_total_samples,\n )\n\n avg_factor = sum(avg_factor)\n avg_factor = reduce_mean(avg_factor).item()\n if avg_factor <= 0:\n loss_qfl = torch.tensor(\n 0, dtype=torch.float32, requires_grad=True).to(device)\n loss_bbox = torch.tensor(\n 0, dtype=torch.float32, requires_grad=True).to(device)\n loss_dfl = torch.tensor(\n 0, dtype=torch.float32, requires_grad=True).to(device)\n else:\n losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))\n losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))\n\n loss_qfl = sum(losses_qfl)\n loss_bbox = sum(losses_bbox)\n loss_dfl = sum(losses_dfl)\n\n loss = loss_qfl + loss_bbox + loss_dfl\n loss_states = dict(\n loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)\n\n return loss, loss_states\n\n def loss_single(\n self,\n grid_cells,\n cls_score,\n bbox_pred,\n labels,\n label_weights,\n bbox_targets,\n stride,\n num_total_samples,\n ):\n\n grid_cells = grid_cells.reshape(-1, 4)\n cls_score = cls_score.permute(0, 2, 3,\n 1).reshape(-1, self.cls_out_channels)\n bbox_pred = bbox_pred.permute(0, 2, 3,\n 1).reshape(-1, 4 * (self.reg_max + 1))\n bbox_targets = bbox_targets.reshape(-1, 4)\n labels = labels.reshape(-1)\n label_weights = label_weights.reshape(-1)\n\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n bg_class_ind = self.num_classes\n pos_inds = torch.nonzero(\n (labels >= 0) & (labels < bg_class_ind), as_tuple=False).squeeze(1)\n\n score = label_weights.new_zeros(labels.shape)\n\n if len(pos_inds) > 0:\n pos_bbox_targets = bbox_targets[pos_inds]\n pos_bbox_pred = bbox_pred[pos_inds] # (n, 4 * (reg_max + 1))\n pos_grid_cells = grid_cells[pos_inds]\n pos_grid_cell_centers = self.grid_cells_to_center(\n pos_grid_cells) / stride\n\n weight_targets = cls_score.detach().sigmoid()\n weight_targets = weight_targets.max(dim=1)[0][pos_inds]\n pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)\n pos_decode_bbox_pred = distance2bbox(pos_grid_cell_centers,\n pos_bbox_pred_corners)\n pos_decode_bbox_targets = pos_bbox_targets / stride\n score[pos_inds] = bbox_overlaps(\n pos_decode_bbox_pred.detach(),\n pos_decode_bbox_targets,\n is_aligned=True)\n pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)\n target_corners = bbox2distance(pos_grid_cell_centers,\n pos_decode_bbox_targets,\n self.reg_max).reshape(-1)\n\n # regression loss\n loss_bbox = self.loss_bbox(\n pos_decode_bbox_pred,\n pos_decode_bbox_targets,\n weight=weight_targets,\n avg_factor=1.0,\n )\n\n # dfl loss\n loss_dfl = self.loss_dfl(\n pred_corners,\n target_corners,\n weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n avg_factor=4.0,\n )\n else:\n loss_bbox = bbox_pred.sum() * 0\n loss_dfl = bbox_pred.sum() * 0\n weight_targets = torch.tensor(0).to(cls_score.device)\n\n # qfl loss\n loss_qfl = self.loss_qfl(\n cls_score,\n (labels, score),\n weight=label_weights,\n avg_factor=num_total_samples,\n )\n\n return loss_qfl, loss_bbox, loss_dfl, weight_targets.sum()\n\n def target_assign(\n self,\n batch_size,\n featmap_sizes,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n device,\n ):\n \"\"\"\n Assign target for a batch of images.\n :param batch_size: num of images in one batch\n :param featmap_sizes: A list of all grid cell boxes in all image\n :param gt_bboxes_list: A list of ground truth boxes in all image\n :param gt_bboxes_ignore_list: A list of all ignored boxes in all image\n :param gt_labels_list: A list of all ground truth label in all image\n :param device: pytorch device\n :return: Assign results of all images.\n \"\"\"\n # get grid cells of one image\n multi_level_grid_cells = [\n self.get_grid_cells(\n featmap_sizes[i],\n self.grid_cell_scale,\n stride,\n dtype=torch.float32,\n device=device,\n ) for i, stride in enumerate(self.strides)\n ]\n mlvl_grid_cells_list = [\n multi_level_grid_cells for i in range(batch_size)\n ]\n\n # pixel cell number of multi-level feature maps\n num_level_cells = [\n grid_cells.size(0) for grid_cells in mlvl_grid_cells_list[0]\n ]\n num_level_cells_list = [num_level_cells] * batch_size\n # concat all level cells and to a single tensor\n for i in range(batch_size):\n mlvl_grid_cells_list[i] = torch.cat(mlvl_grid_cells_list[i])\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(batch_size)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(batch_size)]\n # target assign on all images, get list of tensors\n # list length = batch size\n # tensor first dim = num of all grid cell\n (\n all_grid_cells,\n all_labels,\n all_label_weights,\n all_bbox_targets,\n all_bbox_weights,\n pos_inds_list,\n neg_inds_list,\n ) = multi_apply(\n self.target_assign_single_img,\n mlvl_grid_cells_list,\n num_level_cells_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n )\n # no valid cells\n if any([labels is None for labels in all_labels]):\n return None\n # sampled cells of all images\n num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n # merge list of targets tensors into one batch then split to multi levels\n mlvl_grid_cells = images_to_levels(all_grid_cells, num_level_cells)\n mlvl_labels = images_to_levels(all_labels, num_level_cells)\n mlvl_label_weights = images_to_levels(all_label_weights,\n num_level_cells)\n mlvl_bbox_targets = images_to_levels(all_bbox_targets, num_level_cells)\n mlvl_bbox_weights = images_to_levels(all_bbox_weights, num_level_cells)\n return (\n mlvl_grid_cells,\n mlvl_labels,\n mlvl_label_weights,\n mlvl_bbox_targets,\n mlvl_bbox_weights,\n num_total_pos,\n num_total_neg,\n )\n\n def target_assign_single_img(self, grid_cells, num_level_cells, gt_bboxes,\n gt_bboxes_ignore, gt_labels):\n \"\"\"\n Using ATSS Assigner to assign target on one image.\n :param grid_cells: Grid cell boxes of all pixels on feature map\n :param num_level_cells: numbers of grid cells on each level's feature map\n :param gt_bboxes: Ground truth boxes\n :param gt_bboxes_ignore: Ground truths which are ignored\n :param gt_labels: Ground truth labels\n :return: Assign results of a single image\n \"\"\"\n device = grid_cells.device\n gt_bboxes = torch.from_numpy(gt_bboxes).to(device)\n gt_labels = torch.from_numpy(gt_labels).to(device)\n\n assign_result = self.assigner.assign(grid_cells, num_level_cells,\n gt_bboxes, gt_bboxes_ignore,\n gt_labels)\n\n pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds = self.sample(\n assign_result, gt_bboxes)\n\n num_cells = grid_cells.shape[0]\n bbox_targets = torch.zeros_like(grid_cells)\n bbox_weights = torch.zeros_like(grid_cells)\n labels = grid_cells.new_full((num_cells, ),\n self.num_classes,\n dtype=torch.long)\n label_weights = grid_cells.new_zeros(num_cells, dtype=torch.float)\n\n if len(pos_inds) > 0:\n pos_bbox_targets = pos_gt_bboxes\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n if gt_labels is None:\n # Only rpn gives gt_labels as None\n # Foreground is the first class\n labels[pos_inds] = 0\n else:\n labels[pos_inds] = gt_labels[pos_assigned_gt_inds]\n\n label_weights[pos_inds] = 1.0\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n return (\n grid_cells,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n pos_inds,\n neg_inds,\n )\n\n def sample(self, assign_result, gt_bboxes):\n pos_inds = (\n torch.nonzero(assign_result.gt_inds > 0,\n as_tuple=False).squeeze(-1).unique())\n neg_inds = (\n torch.nonzero(assign_result.gt_inds == 0,\n as_tuple=False).squeeze(-1).unique())\n pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n\n if gt_bboxes.numel() == 0:\n # hack for index error case\n assert pos_assigned_gt_inds.numel() == 0\n pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)\n else:\n if len(gt_bboxes.shape) < 2:\n gt_bboxes = gt_bboxes.view(-1, 4)\n pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]\n return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds\n\n def post_process(self, preds, meta):\n cls_scores, bbox_preds = preds\n result_list = self.get_bboxes(cls_scores, bbox_preds, meta)\n det_results = {}\n warp_matrixes = (\n meta[\"warp_matrix\"] if isinstance(meta[\"warp_matrix\"], list) else\n [meta[\"warp_matrix\"]])\n img_heights = (\n meta[\"img_info\"][\"height\"].cpu().numpy() if isinstance(\n meta[\"img_info\"][\"height\"], torch.Tensor) else\n [meta[\"img_info\"][\"height\"]])\n img_widths = (\n meta[\"img_info\"][\"width\"].cpu().numpy() if isinstance(\n meta[\"img_info\"][\"width\"], torch.Tensor) else\n [meta[\"img_info\"][\"width\"]])\n img_ids = (\n meta[\"img_info\"][\"id\"].cpu().numpy() if isinstance(\n meta[\"img_info\"][\"id\"], torch.Tensor) else\n [meta[\"img_info\"][\"id\"]])\n\n for result, img_width, img_height, img_id, warp_matrix in zip(\n result_list, img_widths, img_heights, img_ids, warp_matrixes):\n det_result = {}\n det_bboxes, det_labels = result\n det_bboxes = det_bboxes.cpu().numpy()\n det_bboxes[:, :4] = warp_boxes(det_bboxes[:, :4],\n np.linalg.inv(warp_matrix),\n img_width, img_height)\n classes = det_labels.cpu().numpy()\n for i in range(self.num_classes):\n inds = classes == i\n det_result[i] = np.concatenate(\n [\n det_bboxes[inds, :4].astype(np.float32),\n det_bboxes[inds, 4:5].astype(np.float32),\n ],\n axis=1,\n ).tolist()\n det_results[img_id] = det_result\n return det_results\n\n def show_result(self,\n img,\n dets,\n class_names,\n score_thres=0.3,\n show=True,\n save_path=None):\n result, all_predicted_boxed = overlay_bbox_cv(\n img, dets, class_names, score_thresh=score_thres)\n if show:\n cv2.imshow(\"det\", result)\n return result, all_predicted_boxed\n\n def get_bboxes(self, cls_scores, bbox_preds, img_metas, rescale=False):\n\n assert len(cls_scores) == len(bbox_preds)\n num_levels = len(cls_scores)\n device = cls_scores[0].device\n\n input_height, input_width = img_metas[\"img\"].shape[2:]\n input_shape = [input_height, input_width]\n\n result_list = []\n for img_id in range(cls_scores[0].shape[0]):\n cls_score_list = [\n cls_scores[i][img_id].detach() for i in range(num_levels)\n ]\n bbox_pred_list = [\n bbox_preds[i][img_id].detach() for i in range(num_levels)\n ]\n scale_factor = 1\n dets = self.get_bboxes_single(\n cls_score_list,\n bbox_pred_list,\n input_shape,\n scale_factor,\n device,\n rescale,\n )\n\n result_list.append(dets)\n return result_list\n\n def get_bboxes_single(self,\n cls_scores,\n bbox_preds,\n img_shape,\n scale_factor,\n device,\n rescale=False):\n \"\"\"\n Decode output tensors to bboxes on one image.\n :param cls_scores: classification prediction tensors of all stages\n :param bbox_preds: regression prediction tensors of all stages\n :param img_shape: shape of input image\n :param scale_factor: scale factor of boxes\n :param device: device of the tensor\n :return: predict boxes and labels\n \"\"\"\n assert len(cls_scores) == len(bbox_preds)\n mlvl_bboxes = []\n mlvl_scores = []\n for stride, cls_score, bbox_pred in zip(self.strides, cls_scores,\n bbox_preds):\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n featmap_size = cls_score.size()[-2:]\n y, x = self.get_single_level_center_point(\n featmap_size, stride, cls_score.dtype, device, flatten=True)\n center_points = torch.stack([x, y], dim=-1)\n scores = (\n cls_score.permute(1, 2,\n 0).reshape(-1,\n self.cls_out_channels).sigmoid())\n bbox_pred = bbox_pred.permute(1, 2, 0)\n bbox_pred = self.distribution_project(bbox_pred) * stride\n\n nms_pre = 1000\n if scores.shape[0] > nms_pre:\n max_scores, _ = scores.max(dim=1)\n _, topk_inds = max_scores.topk(nms_pre)\n center_points = center_points[topk_inds, :]\n bbox_pred = bbox_pred[topk_inds, :]\n scores = scores[topk_inds, :]\n\n bboxes = distance2bbox(\n center_points, bbox_pred, max_shape=img_shape)\n mlvl_bboxes.append(bboxes)\n mlvl_scores.append(scores)\n\n mlvl_bboxes = torch.cat(mlvl_bboxes)\n if rescale:\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n\n mlvl_scores = torch.cat(mlvl_scores)\n # add a dummy background class at the end of all labels\n # same with mmdetection2.0\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n\n det_bboxes, det_labels = multiclass_nms(\n mlvl_bboxes,\n mlvl_scores,\n score_thr=0.05,\n nms_cfg=dict(type=\"nms\", iou_threshold=0.6),\n max_num=100,\n )\n return det_bboxes, det_labels\n\n def get_single_level_center_point(self,\n featmap_size,\n stride,\n dtype,\n device,\n flatten=True):\n \"\"\"\n Generate pixel centers of a single stage feature map.\n :param featmap_size: height and width of the feature map\n :param stride: down sample stride of the feature map\n :param dtype: data type of the tensors\n :param device: device of the tensors\n :param flatten: flatten the x and y tensors\n :return: y and x of the center points\n \"\"\"\n h, w = featmap_size\n x_range = (torch.arange(w, dtype=dtype, device=device) + 0.5) * stride\n y_range = (torch.arange(h, dtype=dtype, device=device) + 0.5) * stride\n y, x = torch.meshgrid(y_range, x_range)\n if flatten:\n y = y.flatten()\n x = x.flatten()\n return y, x\n\n def get_grid_cells(self, featmap_size, scale, stride, dtype, device):\n \"\"\"\n Generate grid cells of a feature map for target assignment.\n :param featmap_size: Size of a single level feature map.\n :param scale: Grid cell scale.\n :param stride: Down sample stride of the feature map.\n :param dtype: Data type of the tensors.\n :param device: Device of the tensors.\n :return: Grid_cells xyxy position. Size should be [feat_w * feat_h, 4]\n \"\"\"\n cell_size = stride * scale\n y, x = self.get_single_level_center_point(\n featmap_size, stride, dtype, device, flatten=True)\n grid_cells = torch.stack(\n [\n x - 0.5 * cell_size,\n y - 0.5 * cell_size,\n x + 0.5 * cell_size,\n y + 0.5 * cell_size,\n ],\n dim=-1,\n )\n return grid_cells\n\n def grid_cells_to_center(self, grid_cells):\n \"\"\"\n Get center location of each gird cell\n :param grid_cells: grid cells of a feature map\n :return: center points\n \"\"\"\n cells_cx = (grid_cells[:, 2] + grid_cells[:, 0]) / 2\n cells_cy = (grid_cells[:, 3] + grid_cells[:, 1]) / 2\n return torch.stack([cells_cx, cells_cy], dim=-1)\n", "id": "8361867", "language": "Python", "matching_score": 4.961032390594482, "max_stars_count": 0, "path": "mmdet3d/models/dense_heads/gfl_head.py" }, { "content": "model = dict(\n type='OneStageDetector',\n backbone=dict(\n type='EfficientNetLite',\n model_name='efficientnet_lite2',\n out_stages=[1, 2, 4, 6],\n activation='LeakyReLU',\n pretrain=True),\n neck=dict(\n type='TAN',\n in_channels=[48, 120, 352],\n out_channels=128,\n feature_hw=[32, 32],\n num_heads=8,\n num_encoders=1,\n mlp_ratio=4,\n dropout_ratio=0.1,\n activation='LeakyReLU'),\n head=dict(\n type='NanoDetHead',\n num_classes=80,\n input_channel=128,\n feat_channels=128,\n stacked_convs=4,\n activation='LeakyReLU',\n share_cls_reg=True,\n octave_base_scale=5,\n scales_per_octave=1,\n strides=[16],\n reg_max=10,\n norm_cfg=dict(type='BN'),\n loss=dict(\n loss_qfl=dict(\n type='QualityFocalLoss',\n use_sigmoid=True,\n beta=2.0,\n loss_weight=1.0),\n loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n loss_bbox=dict(type='GIoULoss', loss_weight=2.0))),\n head_semantic_stuff=dict(\n type='SemanticHeadStuff',\n in_ch32=128,\n in_ch64=48,\n in_ch128=24,\n hidden_ch=256,\n class_ts=133,\n droprate=0))\n", "id": "4771838", "language": "Python", "matching_score": 1.907011866569519, "max_stars_count": 0, "path": "configs/_base_/models/nanodet.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor3d_head import Anchor3DHead\nfrom .anchor_free_mono3d_head import AnchorFreeMono3DHead\nfrom .base_conv_bbox_head import BaseConvBboxHead\nfrom .base_mono3d_dense_head import BaseMono3DDenseHead\nfrom .centerpoint_head import CenterHead\nfrom .fcos_mono3d_head import FCOSMono3DHead\nfrom .free_anchor3d_head import FreeAnchor3DHead\nfrom .groupfree3d_head import GroupFree3DHead\nfrom .parta2_rpn_head import PartA2RPNHead\nfrom .shape_aware_head import ShapeAwareHead\nfrom .ssd_3d_head import SSD3DHead\nfrom .vote_head import VoteHead\nfrom .center3d_head import Center3DHead\nfrom .points2d_classification_head import Points2DClassificationHead\nfrom .gfl_head import GFLHead\nfrom .nanodet_head import NanoDetHead\nfrom .semantic_head_stuff import SemanticHeadStuff\n\n__all__ = [\n 'Anchor3DHead', 'FreeAnchor3DHead', 'PartA2RPNHead', 'VoteHead',\n 'SSD3DHead', 'BaseConvBboxHead', 'CenterHead', 'ShapeAwareHead',\n 'BaseMono3DDenseHead', 'AnchorFreeMono3DHead', 'FCOSMono3DHead',\n 'GroupFree3DHead', 'Center3DHead', 'Points2DClassificationHead', 'GFLHead',\n 'NanoDetHead', 'SemanticHeadStuff'\n]\n", "id": "6725936", "language": "Python", "matching_score": 2.6340131759643555, "max_stars_count": 0, "path": "mmdet3d/models/dense_heads/__init__.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .transformer import GroupFree3DMHA\nfrom .vote_module import VoteModule\nfrom .deform_conv_layers import DeformConvBlock, ModulatedDeformConvBlock\n\n__all__ = [\n 'VoteModule', 'GroupFree3DMHA', 'DeformConvBlock',\n 'ModulatedDeformConvBlock'\n]\n", "id": "10014034", "language": "Python", "matching_score": 0.47647717595100403, "max_stars_count": 0, "path": "mmdet3d/models/model_utils/__init__.py" }, { "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .pillar_scatter import PointPillarsScatter\nfrom .sparse_encoder import SparseEncoder\nfrom .sparse_unet import SparseUNet\nfrom .sparse_encoder_aux import SparseEncoder_AUX\nfrom .sparse_encoderv2 import SparseEncoderV2\n\n__all__ = [\n 'PointPillarsScatter', 'SparseEncoder', 'SparseUNet', 'SparseEncoder_AUX',\n 'SparseEncoderV2'\n]\n", "id": "12843813", "language": "Python", "matching_score": 0.37799662351608276, "max_stars_count": 0, "path": "mmdet3d/models/middle_encoders/__init__.py" }, { "content": "# %%\nimport open3d as o3d\nimport multiprocessing\nfrom os import path\nimport numpy as np\nimport struct\nimport glob\nimport pandas as pd\nfrom multiprocessing import pool\nimport multiprocessing\n\n\n# %%\ndef read_points(points_path):\n points_list = []\n with open(points_path, 'rb') as f:\n byte = f.read(16)\n while byte:\n x, y, z, intensity = struct.unpack('ffff', byte)\n points_list.append([x, y, z])\n byte = f.read(16)\n return points_list\n\n\ndef pcd2img(pcd: o3d.geometry.PointCloud, filename=None):\n \"\"\"save or show pcd to image with filename.\n\n Args:\n pcd (o3d.geometry.PointCloud): point cloud type from open3d.\n filename: if None then show img, else save to local. \n \"\"\"\n vis = o3d.visualization.Visualizer()\n vis.create_window(visible=False)\n vis.add_geometry(pcd)\n vis.poll_events()\n vis.update_renderer()\n filename = filename if filename else \"./output_pcd.png\"\n vis.capture_screen_image(filename)\n vis.destroy_window()\n\n\n# %%\npoints_paths = glob.glob('data/sunrgbd/points/*.bin')\n\n# %%\n# Try to figure out the format of the bin file first.\n# save one point cloud and show it.\n# save one point cloud\ntmp_path = points_paths[0]\npcd_np = np.array(read_points(tmp_path))\n# %%\npcd_o3d = o3d.geometry.PointCloud()\npcd_o3d.points = o3d.utility.Vector3dVector(pcd_np)\npcd2img(pcd_o3d)\n\n# %%\n", "id": "10157115", "language": "Python", "matching_score": 0.12816166877746582, "max_stars_count": 0, "path": "data_wrangle.py" } ]
2.178023
jamiebdavis
[ { "content": "import unittest\n\nfrom api import app\n\n\nclass GetBooksTest(unittest.TestCase):\n\n def setUp(self):\n self.app = app.test_client()\n\n def test_successful_getbooks(self):\n\n response = self.app.get(\n '/books', headers={\"Content-Type\": \"application/json\"})\n self.assertEqual(200, response.status_code)\n\n def test_successful_getbookssize(self):\n\n response = self.app.get(\n '/books', headers={\"Content-Type\": \"application/json\"})\n self.assertEqual(5, len(response.json['books']))\n", "id": "1999019", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/test_getbooks.py" } ]
0
gregd33
[ { "content": "import sys\n\n### General context manager usage:\n\n# ctx = CaptureExecution()\n# with ctx:\n# while True:\n# if <something -- widget change or validation function returns True>:\n# break\n# ctx.step()\n\nclass CaptureExecution:\n \"A context manager to capture execute_request events then either replay or disregard them after exiting the manager\"\n def __init__(self, replay=True):\n self.captured_events = []\n self._replay = replay\n self.shell = get_ipython()\n self.kernel = self.shell.kernel\n \n def step(self):\n self.kernel.do_one_iteration() \n \n def capture_event(self, stream, ident, parent):\n \"A 'capture' function to register instead of the default execute_request handling\"\n self.captured_events.append((stream, ident, parent))\n\n def start_capturing(self):\n \"Overwrite the kernel shell handler to capture instead of executing new cell-execution requests\"\n self.kernel.shell_handlers['execute_request'] = self.capture_event\n\n def stop_capturing(self):\n \"revert the kernel shell handler to the default execute_request behavior\"\n self.kernel.shell_handlers['execute_request'] = self.kernel.execute_request\n \n def replay_captured_events(self):\n \"Called at end of context -- replays all captured events once the default execution handler is in place\"\n # need to flush before replaying so messages show up in current cell not replay cells\n sys.stdout.flush() \n sys.stderr.flush()\n for stream, ident, parent in self.captured_events:\n # Using kernel.set_parent is the key to getting the output of the replayed events\n # to show up in the cells that were captured instead of the current cell\n self.kernel.set_parent(ident, parent) \n self.kernel.execute_request(stream, ident, parent)\n \n def __enter__(self):\n self.start_capturing()\n self.shell.execution_count += 1 # increment execution count to avoid collision error\n \n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is not None:\n # let the error propogate up, such as a keyboard interrupt while capturing cell execution\n return False\n self.stop_capturing()\n if self._replay:\n self.replay_captured_events()\n\n ", "id": "9909093", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ipython_blocking/ipython_blocking.py" }, { "content": "from .ipython_blocking import CaptureExecution\nimport ipywidgets as widgets\nimport time\nimport types\n\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import (\n Magics,\n magics_class,\n line_magic,\n)\nfrom IPython.display import Javascript, display\n\ndef run_all_below():\n js = 'Jupyter.notebook.select_next().execute_cells_below()'\n display(Javascript(js))\n\n@magics_class \nclass CaptureMagic(Magics):\n def capture(self, breaking_func, timeout=None, replay=True):\n if timeout:\n timeout = int(timeout)\n start = time.time()\n ctx = CaptureExecution(replay=replay)\n with ctx:\n while True:\n if breaking_func():\n break\n if timeout:\n if (time.time() - start) >= timeout:\n break\n ctx.step()\n \n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('break_value', help='Widget object or function that defines a break from the blocking context')\n @magic_arguments.argument('-t', '--timeout', default=None, help=\"Timeout in seconds to stop capturing\")\n def block(self, line):\n line = line.strip()\n args = magic_arguments.parse_argstring(self.block, line)\n \n obj = get_ipython().user_ns[args.break_value]\n ### Support the following options for a break value:\n ### 1) a callable function that will break when the function returns True\n ### 2) a ValueWidget, which will break when the value changes\n ### 3) a ButtonWidget, which will break when it is clicked\n if isinstance(obj, (types.FunctionType, types.MethodType)):\n func = obj\n elif isinstance(obj, widgets.ValueWidget):\n starting_value = obj.value\n func = lambda: obj.value != starting_value\n elif isinstance(obj, widgets.Button):\n obj._has_been_clicked = False\n def handler(w):\n w._has_been_clicked = True\n obj.on_click(handler)\n func = lambda: obj._has_been_clicked\n else:\n raise Exception('The positional argument to %block should be a ValueWidget, Button, or a function/method')\n return self.capture(func, args.timeout)\n \n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('button_widget', help='A Button Widget to block-and-run-all-below on')\n @magic_arguments.argument('-t', '--timeout', default=None, help=\"Timeout in seconds to stop capturing\")\n def blockrun(self, line):\n line = line.strip()\n args = magic_arguments.parse_argstring(self.blockrun, line)\n \n obj = get_ipython().user_ns[args.button_widget]\n ### When a cell -> run all is executed, add a _has_been_clicked attribute to the widget\n ### and a handler for a \"run all cells below\"\n \n ### If _has_been_clicked is True, do nothing (let the rest of the notebook run)\n ### Otherwise block and replay=False\n if not hasattr(obj, '_has_been_clicked'):\n obj._has_been_clicked = False\n \n def handler(w):\n w._has_been_clicked = True\n run_all_below()\n obj.on_click(handler) \n\n if not getattr(obj, '_has_been_clicked'):\n return self.capture(lambda: obj._has_been_clicked, replay=False)\n \n\ndef load_ipython_extensions():\n get_ipython().register_magics(CaptureMagic)", "id": "1825570", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ipython_blocking/ipython_magic.py" } ]
0
kkroening
[ { "content": "from setuptools import setup\n\n\nversion = '0.1.0'\ndownload_url = 'https://github.com/kkroening/daglet/archive/v{}.zip'.format(version)\n\n\nkeywords = [\n 'actor',\n 'acyclic'\n 'analyze',\n 'async',\n 'bfs',\n 'breadth-first',\n 'concurrent sequential processes',\n 'connected',\n 'csp',\n 'cycle',\n 'dag',\n 'depth-first',\n 'dfs',\n 'dijkstra',\n 'directed',\n 'dsp',\n 'edge',\n 'fp',\n 'frp',\n 'functional programming',\n 'functional reactive programming',\n 'functional',\n 'graph',\n 'hamiltonian',\n 'monad',\n 'pipe',\n 'pipeline',\n 'pipes',\n 'routing',\n 'search',\n 'sort',\n 'spanning',\n 'spanning',\n 'stream',\n 'structure',\n 'topological',\n 'topology',\n 'toposort',\n 'undirected',\n 'vertex',\n 'vertices',\n]\n\n\nsetup(\n name='daglet',\n packages=['daglet'],\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n version=version,\n description='DAG tools for Python',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/kkroening/daglet',\n download_url=download_url,\n keywords=keywords,\n long_description='DAG tools for Python',\n install_requires=['future'],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "id": "10420834", "language": "Python", "matching_score": 4.161722183227539, "max_stars_count": 0, "path": "setup.py" }, { "content": "from setuptools import setup\n\n\nversion = '0.1.0'\n\n\nsetup(\n name='jupyter-renderer-widget',\n packages=['jupyter_renderer_widget'],\n version=version,\n description='Renderer widget for JupyterLab',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/kkroening/jupyterlab-renderer-widget',\n long_description='Renderer widget for JupyterLab',\n install_requires=[\n 'ffmpeg-python',\n 'ipywidgets',\n 'matplotlib',\n 'numpy',\n 'Pillow',\n 'tqdm',\n ],\n extras_require={\n 'dev': [\n 'pytest',\n 'pytest-runner',\n 'sphinx',\n ],\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n", "id": "1295395", "language": "Python", "matching_score": 1.116199254989624, "max_stars_count": 0, "path": "setup.py" }, { "content": "from contextlib import closing\nfrom contextlib import contextmanager\nfrom IPython.display import display\nfrom matplotlib import pyplot as plt\nfrom tqdm.notebook import tqdm\nimport ffmpeg\nimport io\nimport IPython.display\nimport ipywidgets\nimport numpy as np\nimport PIL.Image\n\n\nDEFAULT_WIDTH = 960\nDEFAULT_HEIGHT = 540\nDEFAULT_DPI = 96\n\n\n@contextmanager\ndef video_pipe_context(filename, width, height):\n process = (\n ffmpeg.input(\n 'pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height)\n )\n .output(filename, pix_fmt='yuv420p')\n .overwrite_output()\n .run_async(pipe_stdin=True)\n )\n with closing(process.stdin) as pipe:\n yield pipe\n process.wait()\n\n\ndef to_uint8_rgb(image):\n if image.dtype != np.uint8:\n # TODO: possibly normalize values.\n image = image.astype(np.uint8)\n if len(image.shape) == 2:\n image = image[:, :, np.newaxis]\n depth = 1\n elif len(image.shape) == 3:\n depth = image.shape[-1]\n else:\n raise ValueError('image shape must be 2D or 3D; got {}'.format(image.shape))\n if depth == 1:\n image = np.tile(image, [1, 1, 3])\n elif depth == 4:\n image = image[:, :, :3]\n elif depth != 3:\n raise ValueError(\n 'image depth must be either 1 (grayscale), 3 (RGB), or 4 (RGBA); got {}'.format(\n image.shape\n )\n )\n return image\n\n\ndef render_video(\n out_filename, render_func, frame_count, tqdm=None, start_frame=0, end_frame=-1\n):\n first_frame = to_uint8_rgb(render_func(0))\n height, width, depth = first_frame.shape\n if end_frame < 0:\n end_frame = frame_count + end_frame\n assert end_frame >= 0\n frame_nums = range(start_frame, end_frame + 1)\n if tqdm is not None:\n frame_nums = tqdm(frame_nums)\n with video_pipe_context(out_filename, width, height) as pipe:\n for frame_num in frame_nums:\n if frame_num == 0:\n frame = first_frame\n else:\n frame = to_uint8_rgb(render_func(frame_num))\n if frame.shape != first_frame.shape:\n raise ValueError(\n 'image shape changed from {} to {}'.format(\n first_frame.shape, frame.shape\n )\n )\n pipe.write(frame.tobytes())\n\n\ndef save_pyplot_fig_as_numpy(fig, dpi=DEFAULT_DPI):\n # matplotlib.rcParams['savefig.pad_inches'] = 0\n buf = io.BytesIO()\n fig.savefig(buf, format='png', dpi=dpi)\n buf.seek(0)\n pil_image = PIL.Image.open(buf).convert('RGB')\n return np.array(pil_image)\n\n\ndef _init_pyplot(width, height, dpi):\n figsize = (int(width / dpi), int(height / dpi))\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n fig.tight_layout()\n return fig, ax\n\n\ndef render_pyplot_video(\n out_filename,\n render_func,\n frame_count,\n tqdm=None,\n width=DEFAULT_WIDTH,\n height=DEFAULT_HEIGHT,\n dpi=DEFAULT_DPI,\n):\n # old_backend = plt.get_backend()\n # plt.switch_backend('agg')\n try:\n fig, ax = _init_pyplot(width, height, dpi)\n\n def do_render(frame_num):\n ax.clear()\n render_func(ax, frame_num)\n return save_pyplot_fig_as_numpy(fig, dpi=dpi)\n\n render_video(out_filename, do_render, frame_count, tqdm)\n finally:\n # plt.switch_backend(old_backend)\n fig.clear()\n\n\nclass AutoplayVideo(IPython.display.Video):\n def __init__(\n self,\n data=None,\n url=None,\n filename=None,\n embed=False,\n mimetype=None,\n width=None,\n height=None,\n controls=True,\n autoplay=True,\n loop=True,\n ):\n \"\"\"An ipywidgets :obj:`Video` that automatically starts playing when displayed.\"\"\"\n super(AutoplayVideo, self).__init__(\n data, url, filename, embed, mimetype, width, height\n )\n self.controls = controls\n self.autoplay = autoplay\n self.loop = loop\n\n def _repr_html_(self):\n assert not self.embed, 'Embedding not implemented (yet)'\n options = []\n if self.width:\n options.append('width={}'.format(self.width))\n if self.height:\n options.append('height={}'.format(self.height))\n if self.autoplay:\n options.append('autoplay')\n if self.controls:\n options.append('controls')\n if self.loop:\n options.append('loop')\n url = self.url if self.url is not None else self.filename\n disclaimer = 'Your browser does not support the <code>video</code> element.'\n return '<video src=\"{}\" {}>{}</video>'.format(\n url, ' '.join(options), disclaimer\n )\n\n\nclass Renderer(ipywidgets.VBox):\n def __init__(\n self,\n render_func,\n frame_count,\n out_filename='out.mp4',\n width=None,\n height=None,\n preview=True,\n preview_frame=0,\n ):\n render_button = ipywidgets.Button(description='Render')\n render_button.on_click(self._on_render)\n out = ipywidgets.Output()\n super(Renderer, self).__init__([render_button, out])\n self.render_func = render_func\n self.frame_count = frame_count\n self.preview_frame = min(preview_frame, self.frame_count - 1)\n self.out_filename = out_filename\n self.width = width\n self.height = height\n self.out = out\n self.start_frame = 0\n self.end_frame = frame_count - 1\n if preview:\n with self.out:\n frame_range = (0, frame_count - 1, 1)\n\n @ipywidgets.interact(\n preview_frame=frame_range,\n start_frame=frame_range,\n end_frame=frame_range,\n )\n def show_preview(\n preview_frame=self.preview_frame,\n start_frame=self.start_frame,\n end_frame=self.end_frame,\n ):\n self._show_preview(preview_frame)\n\n def _show_preview(self, preview_frame):\n image = self.render_func(preview_frame)\n rgb_image = to_uint8_rgb(image)\n display(PIL.Image.fromarray(rgb_image, 'RGB'))\n\n def _render(self):\n render_video(\n self.out_filename,\n self.render_func,\n self.frame_count,\n tqdm=tqdm,\n start_frame=self.start_frame,\n end_frame=self.end_frame,\n )\n\n def _on_render(self, event):\n with self.out:\n self._render()\n self.out.clear_output()\n display(AutoplayVideo(self.out_filename, width=self.width))\n\n\nclass PyplotRenderer(Renderer):\n def __init__(\n self,\n render_func,\n frame_count,\n out_filename='out.mp4',\n width=DEFAULT_WIDTH,\n height=DEFAULT_HEIGHT,\n dpi=DEFAULT_DPI,\n preview=True,\n ):\n self.dpi = dpi\n super(PyplotRenderer, self).__init__(\n render_func, frame_count, out_filename, width, height, preview\n )\n\n def _show_preview(self, preview_frame):\n fig, ax = _init_pyplot(self.width, self.height, self.dpi)\n self.render_func(ax, preview_frame)\n\n def _render(self):\n render_pyplot_video(\n self.out_filename,\n self.render_func,\n self.frame_count,\n width=self.width,\n height=self.height,\n tqdm=tqdm,\n dpi=self.dpi,\n )\n", "id": "10723225", "language": "Python", "matching_score": 1.288582682609558, "max_stars_count": 0, "path": "jupyter_renderer_widget/__init__.py" }, { "content": "from __future__ import unicode_literals\n\nfrom ._utils import get_hash_int\nfrom builtins import object\nfrom collections import defaultdict\nimport copy\n\n\ndef _arg_kwarg_repr(args=[], kwargs={}):\n items = ['{}'.format(arg) for arg in args]\n items += ['{}={}'.format(key, kwargs[key]) for key in sorted(kwargs)]\n return ', '.join(items)\n\n\nclass Vertex(object):\n \"\"\"Vertex in a directed-acyclic graph (DAG).\n\n Hashing:\n Vertices must be hashable, and two vertices are considered to be equivalent if they have the same hash value.\n\n Vertices are immutable, and the hash should remain constant as a result. If a vertex with new contents is\n required, create a new vertex and throw the old one away.\n \"\"\"\n def __init__(self, label=None, parents=[], extra_hash=None):\n for parent in parents:\n if not isinstance(parent, Vertex):\n raise TypeError('Expected Vertex instance; got {}'.format(parent))\n parents = sorted(parents)\n self.__parents = parents\n self.__label = copy.copy(label)\n self.__extra_hash = copy.copy(extra_hash)\n self.__hash = get_hash_int([label, parents, extra_hash]) % (2**63)\n\n @property\n def parents(self):\n return self.__parents\n\n def get_parents(self):\n return self.parents\n\n @property\n def label(self):\n return self.__label\n\n @property\n def extra_hash(self):\n return self.__extra_hash\n\n def __hash__(self):\n return self.__hash\n\n def __lt__(self, other):\n return hash(self) < hash(other)\n\n def __le__(self, other):\n return hash(self) <= hash(other)\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def __ne__(self, other):\n return hash(self) != hash(other)\n\n def __ge__(self, other):\n return hash(self) >= hash(other)\n\n def __gt__(self, other):\n return hash(self) > hash(other)\n\n @property\n def short_hash(self):\n return '{:x}'.format(abs(hash(self)))[:8]\n\n def get_repr(self, include_hash=True):\n args = []\n if self.__label is not None:\n label_text = repr(self.__label)\n if label_text.startswith('u'):\n label_text = label_text[1:]\n args.append(label_text)\n if self.__parents or self.__extra_hash:\n args.append('...')\n ret = 'daglet.Vertex({})'.format(_arg_kwarg_repr(args))\n if include_hash:\n ret = '{} <{}>'.format(ret, self.short_hash)\n return ret\n\n def __repr__(self):\n return self.get_repr()\n\n def clone(self, **kwargs):\n base_kwargs = {\n 'label': self.__label,\n 'parents': self.__parents,\n 'extra_hash': self.__extra_hash,\n }\n base_kwargs.update(kwargs)\n return Vertex(**base_kwargs)\n\n def transplant(self, new_parents):\n \"\"\"Create a copy of this Vertex with new parent edges.\"\"\"\n return Vertex(self.__label, new_parents, self.__extra_hash)\n\n def vertex(self, label=None, extra_hash=None):\n \"\"\"Create downstream vertex with specified label.\n\n Example:\n The following example creates a DAG with three vertices connected with two edges (``n1 -> n2 -> n3``):\n ```\n n3 = daglet.Vertex('n1').vertex('n2').vertex('n3')\n ```\n \"\"\"\n return Vertex(label, [self], extra_hash)\n\n\ndef __check_parent_func(objs, parent_func):\n if parent_func is None:\n if any(not isinstance(obj, Vertex) for obj in objs):\n raise TypeError('`parent_func` must be specified if objects are not daglet.Vertex instances')\n parent_func = Vertex.get_parents\n return parent_func\n\n\ndef toposort(objs, parent_func=None, tree=False):\n parent_func = __check_parent_func(objs, parent_func)\n marked_objs = set()\n sorted_objs = []\n\n def visit(obj, child_obj):\n if not tree and obj in marked_objs:\n # TODO: optionally break cycles.\n raise RuntimeError('Graph is not a DAG; recursively encountered {}'.format(obj))\n\n if tree or obj not in sorted_objs:\n parent_objs = parent_func(obj)\n\n marked_objs.add(obj)\n for parent_obj in parent_objs:\n visit(parent_obj, obj)\n marked_objs.remove(obj)\n\n sorted_objs.append(obj)\n\n unvisited_objs = copy.copy(objs)\n while unvisited_objs:\n obj = unvisited_objs.pop()\n visit(obj, None)\n return sorted_objs\n\n\ndef transform(objs, parent_func=None, vertex_func=None, edge_func=None, vertex_map={}):\n parent_func = __check_parent_func(objs, parent_func)\n if vertex_func is None:\n vertex_func = lambda obj, parent_values: None\n if vertex_map is not None:\n old_parent_func = parent_func\n parent_func = lambda x: old_parent_func(x) if x not in vertex_map else []\n if edge_func is None:\n edge_func = lambda parent_obj, obj, parent_value: parent_value\n\n sorted_objs = toposort(objs, parent_func)\n\n new_vertex_map = {}\n new_edge_map = {}\n for obj in sorted_objs:\n if obj in vertex_map:\n value = vertex_map[obj]\n else:\n parent_objs = parent_func(obj)\n parent_values = []\n for parent_obj in parent_objs:\n value = edge_func(parent_obj, obj, new_vertex_map[parent_obj])\n new_edge_map[parent_obj, obj] = value\n parent_values.append(value)\n value = vertex_func(obj, parent_values)\n new_vertex_map[obj] = value\n\n return new_vertex_map, new_edge_map\n\n\ndef transform_vertices(objs, parent_func, vertex_func, vertex_map={}):\n vertex_map, _ = transform(objs, parent_func, vertex_func, None, vertex_map)\n return vertex_map\n\n\ndef transform_edges(objs, parent_func, edge_func):\n _, edge_map = transform(objs, parent_func, None, edge_func)\n return edge_map\n\n\ndef get_parent_map(objs, parent_func):\n sorted_objs = toposort(objs, parent_func)\n parent_map = defaultdict(set)\n for obj in sorted_objs:\n for parent in parent_func(obj):\n parent_map[obj].append(parent)\n return parent_map\n\n\ndef get_child_map(objs, parent_func):\n sorted_objs = toposort(objs, parent_func)\n child_map = defaultdict(set)\n for obj in sorted_objs:\n for parent in parent_func(obj):\n child_map[parent].add(obj)\n return child_map\n\n\nfrom .view import view\n(view) # silence linter\n", "id": "269584", "language": "Python", "matching_score": 2.8342926502227783, "max_stars_count": 0, "path": "daglet/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom builtins import object\nfrom builtins import range\nfrom daglet._utils import get_hash_int, get_hash\nfrom functools import reduce\nfrom past.builtins import basestring\nfrom textwrap import dedent\nimport copy\nimport daglet\nimport operator\nimport subprocess\n\n\ndef test__get_hash():\n assert get_hash(None) == '6adf97f83acf6453d4a6a4b1070f3754'\n assert get_hash(5) == 'e4da3b7fbbce2345d7772b0674a318d5'\n assert get_hash({'a': 'b'}) == '31ee3af152948dc06066ec1a7a4c5f31'\n\n\ndef test__vertex_parents():\n v1 = daglet.Vertex()\n v2 = daglet.Vertex()\n v3 = v1.vertex('v3')\n v4 = v1.vertex('v4')\n v5 = v2.vertex('v5')\n assert v1.parents == []\n assert v3.vertex().parents == [v3]\n assert v4.vertex().parents == [v4]\n assert daglet.Vertex(parents=[v3, v5]).parents == sorted([v3, v5])\n assert daglet.Vertex(parents=[v5, v3]).parents == sorted([v3, v5])\n\n\ndef test__vertex_label():\n assert daglet.Vertex().label == None\n assert daglet.Vertex('v1').label == 'v1'\n\n\ndef test__vertex_hash():\n v1 = daglet.Vertex('v1')\n v2 = daglet.Vertex('v2', [v1])\n assert isinstance(hash(v1), int)\n assert hash(v1) == 352423289548818779\n assert hash(v2) == 5230371954595182985\n assert hash(v1) == hash(daglet.Vertex('v1'))\n assert hash(v2) != hash(v1)\n assert hash(v1) != hash(daglet.Vertex('v3'))\n\n\ndef test__vertex_extra_hash():\n assert daglet.Vertex().extra_hash == None\n assert daglet.Vertex(extra_hash=5).extra_hash == 5\n\n\ndef test__vertex_eq():\n assert daglet.Vertex() == daglet.Vertex()\n assert daglet.Vertex('v1') == daglet.Vertex('v1')\n assert daglet.Vertex('v1') != daglet.Vertex('v2')\n assert not (daglet.Vertex('v1') != daglet.Vertex('v1'))\n assert daglet.Vertex(extra_hash=1) == daglet.Vertex(extra_hash=1)\n assert daglet.Vertex(extra_hash=1) != daglet.Vertex(extra_hash=2)\n assert daglet.Vertex().vertex() == daglet.Vertex().vertex()\n assert daglet.Vertex().vertex() != daglet.Vertex()\n assert daglet.Vertex().vertex('v1') == daglet.Vertex().vertex('v1')\n assert daglet.Vertex().vertex('v1') != daglet.Vertex().vertex('v2')\n assert daglet.Vertex('v1').vertex() == daglet.Vertex('v1').vertex()\n assert daglet.Vertex('v1').vertex() != daglet.Vertex('v2').vertex()\n\n v1 = daglet.Vertex('v1')\n v2 = daglet.Vertex('v2')\n assert daglet.Vertex(parents=[v1, v2]) == daglet.Vertex(parents=[v1, v2])\n assert daglet.Vertex(parents=[v1, v2]) == daglet.Vertex(parents=[v2, v1])\n\n\ndef test__vertex_cmp():\n v1 = daglet.Vertex('v1')\n v2 = daglet.Vertex('v2')\n vs = sorted([v1, v2])\n assert vs == sorted([v2, v1])\n va = vs[0]\n vb = vs[1]\n assert hash(va) < hash(vb)\n assert va < vb\n assert not (vb < va)\n assert va <= vb\n assert not (vb <= va)\n assert va <= va\n assert va == va\n assert not (va == vb)\n assert va != vb\n assert not (va != va)\n assert vb >= va\n assert not (va >= vb)\n assert vb >= vb\n assert vb > va\n assert not (va > vb)\n\n\ndef test__vertex_short_hash():\n h1 = daglet.Vertex('v1').short_hash\n h2 = daglet.Vertex('v1').short_hash\n h3 = daglet.Vertex('v2').short_hash\n assert isinstance(h1, basestring)\n assert int(h1, base=16)\n assert len(h1) == 8\n assert h1 == h2\n assert h1 != h3\n\n\ndef test__vertex_get_repr():\n v1 = daglet.Vertex('v1')\n v2 = daglet.Vertex('v2', parents=[v1])\n v3 = v2.vertex('v3')\n assert v1.get_repr() == repr(v1)\n assert repr(v1) == \"daglet.Vertex('{}') <{}>\".format('v1', v1.short_hash)\n assert repr(v3) == \"daglet.Vertex('{}', ...) <{}>\".format('v3', v3.short_hash)\n assert v3.get_repr(include_hash=False) == \"daglet.Vertex('{}', ...)\".format('v3')\n assert daglet.Vertex().get_repr(include_hash=False) == 'daglet.Vertex()'\n assert daglet.Vertex(extra_hash=5).get_repr(include_hash=False) == 'daglet.Vertex(...)'\n assert v2.vertex().get_repr(include_hash=False) == 'daglet.Vertex(...)'\n assert daglet.Vertex(0).get_repr(include_hash=False) == 'daglet.Vertex(0)'\n\n\ndef test__vertex_clone():\n assert daglet.Vertex().clone(label='v2').label == 'v2'\n\n\ndef test__vertex_transplant():\n v2 = daglet.Vertex('v2')\n assert daglet.Vertex().transplant([v2]).parents == [v2]\n\n\ndef test__toposort():\n get_parents = lambda x: x.parents\n v1 = daglet.Vertex()\n v2 = v1.vertex()\n assert daglet.toposort([], get_parents) == []\n assert daglet.toposort([v1], get_parents) == [v1]\n assert daglet.toposort([v2], get_parents) == [v1, v2]\n\n v3 = daglet.Vertex('v3')\n v4 = v3.vertex('v4')\n v5 = v3.vertex('v5')\n v6 = v5.vertex('v6')\n v7 = v5.vertex('v7')\n v8 = daglet.Vertex('v8')\n v9 = daglet.Vertex('v9', [v4, v6, v7])\n v10 = daglet.Vertex('v10', [v3, v8])\n v11 = daglet.Vertex('v11')\n sorted_vertices = daglet.toposort([v4, v9, v10, v11], get_parents)\n assert sorted_vertices == [v11, v8, v3, v10, v5, v7, v4, v6, v9]\n\n\ndef test__transform():\n get_parents = lambda x: x.parents\n v1 = daglet.Vertex()\n v2 = v1.vertex()\n assert daglet.transform([], get_parents) == ({}, {})\n assert daglet.transform([v1], get_parents) == ({v1: None}, {})\n assert daglet.transform([v2], get_parents) == ({v1: None, v2: None}, {(v1, v2): None})\n vertex_dummy_func = lambda obj, parent_values: (obj, parent_values)\n edge_dummy_func = lambda parent, child, parent_value: 'test'\n assert daglet.transform([v2], get_parents, vertex_dummy_func, edge_dummy_func) == (\n {\n v1: (v1, []),\n v2: (v2, ['test'])\n },\n {\n (v1, v2): 'test'\n }\n )\n\n v3 = daglet.Vertex('v3')\n v4 = v3.vertex('v4')\n v5 = v3.vertex('v5')\n v6 = v5.vertex('v6')\n v7 = v5.vertex('v7')\n v8 = daglet.Vertex('v8')\n v9 = daglet.Vertex('v9', [v4, v6, v7])\n v10 = daglet.Vertex('v10', [v3, v8])\n v11 = daglet.Vertex('v11')\n vertex_rank_func = lambda obj, parent_ranks: max(parent_ranks) + 1 if len(parent_ranks) else 0\n vertex_map, edge_map = daglet.transform([v4, v9, v10, v11], get_parents, vertex_rank_func)\n assert vertex_map == {\n v3: 0,\n v4: 1,\n v5: 1,\n v6: 2,\n v7: 2,\n v8: 0,\n v9: 3,\n v10: 1,\n v11: 0,\n }\n assert edge_map == {\n (v3, v4): 0,\n (v3, v5): 0,\n (v3, v10): 0,\n (v4, v9): 1,\n (v5, v6): 1,\n (v5, v7): 1,\n (v6, v9): 2,\n (v7, v9): 2,\n (v8, v10): 0,\n }\n\n debug = False\n if debug:\n vertex_labels = {\n v3: 'v3',\n v4: 'v4',\n v5: 'v5',\n v6: 'v6',\n v7: 'v7',\n v8: 'v8',\n v9: 'v9',\n v10: 'v10',\n v11: 'v11',\n }\n vertex_colors = {\n v3: 'red',\n v4: 'yellow',\n v5: 'purple',\n v6: 'purple',\n v7: 'lightblue',\n v8: 'green',\n v9: 'white',\n v11: 'orange',\n }\n daglet.view([v4, v9, v10, v11], get_parents, vertex_label_func=vertex_labels.get,\n vertex_color_func=vertex_colors.get)\n\n\ndef test__example__git():\n REPO_DIR = '.'\n\n def get_parent_hashes(commit_hash):\n return (subprocess\n .check_output(['git', 'rev-list', '--parents', '-n1', commit_hash], cwd=REPO_DIR)\n .decode()\n .strip()\n .split(' ')[1:]\n )\n\n def get_commit_message(commit_hash):\n return subprocess.check_output(['git', 'log', '-n1', '--pretty=short', commit_hash], cwd=REPO_DIR)\n\n class Commit(object):\n def __init__(self, commit_hash, parents):\n self.commit_hash = commit_hash\n self.parents = parents\n self.log = get_commit_message(commit_hash)\n\n vertex_map = daglet.transform_vertices(['HEAD'], get_parent_hashes, Commit)\n assert 'HEAD' in vertex_map\n assert all(isinstance(x, basestring) for x in vertex_map.keys())\n assert all(isinstance(x, Commit) for x in vertex_map.values())\n\n debug = False\n if debug:\n daglet.view(\n vertex_map.values(),\n rankdir=None,\n parent_func=lambda x: x.parents,\n vertex_label_func=lambda x: x.log,\n vertex_color_func=lambda x: 'lightblue',\n )\n\n\ndef test__example__vdom():\n class TextBuffer(object):\n def __init__(self, row_count, col_count):\n self.rows = [' ' * col_count] * row_count\n\n @property\n def row_count(self):\n return len(self.rows)\n\n @property\n def col_count(self):\n return len(self.rows[0])\n\n @property\n def text(self):\n return '\\n'.join(self.rows)\n\n def draw_text(self, row, col, text):\n assert len(text.split('\\n')) == 1 # FIXME\n if 0 <= row < len(self.rows):\n start = self.rows[row][:col]\n end = self.rows[row][col+len(text):]\n self.rows[row] = '{}{}{}'.format(start, text, end)[:self.col_count]\n\n def draw_border(self, row, col, row_count, col_count):\n V_CHAR = u'\\u2502'\n TL_CHAR = u'\\u250c'\n TR_CHAR = u'\\u2510'\n H_CHAR = u'\\u2500'\n BL_CHAR = u'\\u2514'\n BR_CHAR = u'\\u2518'\n self.draw_text(row, col, u'{}{}{}'.format(TL_CHAR, H_CHAR * (col_count - 2), TR_CHAR))\n for row2 in range(row + 1, row + row_count - 1):\n self.draw_text(row2, col, V_CHAR)\n self.draw_text(row2, col + col_count - 1, V_CHAR)\n self.draw_text(row + row_count - 1, col, u'{}{}{}'.format(BL_CHAR, H_CHAR * (col_count - 2), BR_CHAR))\n\n def draw_buf(self, row, col, buf):\n for row2 in range(buf.row_count):\n self.draw_text(row + row2, col, buf.rows[row2])\n\n class Component(object):\n def __init__(self, props={}, children=[]):\n self.props = props\n self.children = children\n\n def __hash__(self):\n child_hashes = [hash(x) for x in self.children]\n return get_hash_int([self.props, self.__class__.__name__, child_hashes])\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def expand(self):\n return self._expand()\n\n def collapse(self, children):\n return self._collapse(children)\n\n def textify(self, child_bufs):\n return self._textify(child_bufs)\n\n def _expand(self):\n return self.children\n\n def _collapse(self, children):\n collapsed = copy.copy(self)\n collapsed.children = children\n return collapsed\n\n def _textify(self, child_bufs):\n raise NotImplementedError()\n\n class Div(Component):\n def __init__(self, props, children):\n super(Div, self).__init__(props, children)\n\n def _textify(self, child_bufs):\n child_bufs = child_bufs or []\n row_count = reduce(operator.add, [x.row_count for x in child_bufs]) + 2\n col_count = max([x.col_count for x in child_bufs]) + 4\n buf = TextBuffer(row_count, col_count)\n row = 1\n for child_buf in child_bufs:\n buf.draw_buf(row, 2, child_buf)\n row += child_buf.row_count\n buf.draw_border(0, 0, row_count, col_count)\n return buf\n\n class Text(Component):\n def __init__(self, text):\n super(Text, self).__init__({'text': text})\n\n def _textify(self, child_bufs):\n buf = TextBuffer(1, len(self.props['text']))\n buf.draw_text(0, 0, self.props['text'])\n return buf\n\n class CompositeComponent(Component):\n def render(self):\n raise NotImplementedError()\n\n def _expand(self):\n return [self.render()]\n\n def _collapse(self, children):\n assert len(children) == 1\n return children[0]\n\n subpage_render_count = [0]\n\n class SubPage(CompositeComponent):\n def render(self):\n subpage_render_count[0] += 1\n out = Div({}, [\n Text('# sub page #'),\n ])\n return out\n\n class MainPage(CompositeComponent):\n def __init__(self, text):\n super(MainPage, self).__init__({'text': text})\n\n def render(self):\n return Div({}, [\n Div({}, [\n Text('# main page #'),\n Text('sub item'),\n Text(self.props['text']),\n ]),\n SubPage(),\n Text('sub sub item'),\n ])\n\n assert subpage_render_count[0] == 0\n\n # Create initial vdom.\n root = MainPage('some text')\n vdom = daglet.transform_vertices([root], Component.expand, Component.collapse)\n # FIXME: combine `toposort` and `transform` so that the parent_func only gets hit once; expect only one render.\n assert subpage_render_count[0] == 2\n\n # Turn vdom into text.\n rendered_root = vdom[root]\n buf_map = daglet.transform_vertices([rendered_root], lambda x: x.children, Component.textify)\n buf = buf_map[rendered_root]\n assert buf.text == dedent(\"\"\"\\\n ┌───────────────────┐\n │ ┌───────────────┐ │\n │ │ # main page # │ │\n │ │ sub item │ │\n │ │ some text │ │\n │ └───────────────┘ │\n │ ┌──────────────┐ │\n │ │ # sub page # │ │\n │ └──────────────┘ │\n │ sub sub item │\n └───────────────────┘\"\"\"\n )\n\n # Create new vdom incrementally.\n root2 = MainPage('some other text')\n vdom2 = daglet.transform_vertices([root2], Component.expand, Component.collapse, vertex_map=vdom)\n assert subpage_render_count[0] == 2\n\n # Turn vdom into text again, incrementally. Only redraw changed portions.\n rendered_root2 = vdom2[root2]\n buf_map2 = daglet.transform_vertices([rendered_root2], lambda x: x.children, Component.textify, vertex_map=buf_map)\n buf2 = buf_map2[rendered_root2]\n assert buf2.text == dedent(\"\"\"\\\n ┌─────────────────────┐\n │ ┌─────────────────┐ │\n │ │ # main page # │ │\n │ │ sub item │ │\n │ │ some other text │ │\n │ └─────────────────┘ │\n │ ┌──────────────┐ │\n │ │ # sub page # │ │\n │ └──────────────┘ │\n │ sub sub item │\n └─────────────────────┘\"\"\"\n )\n", "id": "12391480", "language": "Python", "matching_score": 3.204484224319458, "max_stars_count": 0, "path": "tests/test_daglet.py" }, { "content": "from __future__ import unicode_literals\n\nfrom builtins import str\nimport daglet\nimport tempfile\n\n\ndef __import_graphviz():\n try:\n import graphviz\n except ImportError:\n raise ImportError('failed to import graphviz; please make sure graphviz is installed (e.g. `pip install '\n 'graphviz`)')\n return graphviz\n\n\ndef __make_graph(objs, parent_func, rankdir, vertex_color_func, vertex_label_func, edge_label_func):\n graphviz = __import_graphviz()\n graph = graphviz.Digraph()\n graph.attr(rankdir=rankdir)\n\n sorted_objs = daglet.toposort(objs, parent_func)\n for child in sorted_objs:\n id = str(hash(child))\n label = vertex_label_func(child)\n color = vertex_color_func(child) if vertex_color_func is not None else None\n graph.node(id, label, shape='box', style='filled', fillcolor=color)\n\n for parent in parent_func(child):\n kwargs = {}\n edge_label = edge_label_func((parent, child))\n if edge_label is not None:\n kwargs['label'] = edge_label\n upstream_obj_id = str(hash(parent))\n downstream_obj_id = str(hash(child))\n graph.edge(upstream_obj_id, downstream_obj_id, **kwargs)\n\n return graph\n\n\ndef render(objs, parent_func, filename=None, rankdir='LR', vertex_color_func={}.get, vertex_label_func={}.get,\n edge_label_func={}.get):\n graph = __make_graph(objs, parent_func, rankdir, vertex_color_func, vertex_label_func, edge_label_func)\n if filename is None:\n filename = tempfile.mktemp()\n graph.render(filename)\n return filename\n\n\ndef view(objs, parent_func, filename=None, rankdir='LR', vertex_color_func={}.get, vertex_label_func={}.get,\n edge_label_func={}.get):\n graph = __make_graph(objs, parent_func, rankdir, vertex_color_func, vertex_label_func, edge_label_func)\n if filename is None:\n filename = tempfile.mktemp()\n graph.view(filename)\n return filename\n", "id": "10315636", "language": "Python", "matching_score": 1.0572088956832886, "max_stars_count": 0, "path": "daglet/view.py" } ]
2.061438
b05102139
[ { "content": "\"\"\"Initialize class variables\"\"\"\n__version__='0.1'\n", "id": "9805889", "language": "Python", "matching_score": 0, "max_stars_count": 4, "path": "acoustic_distance/__init__.py" }, { "content": "import numpy as np\nfrom sklearn import preprocessing\nfrom scipy.io.wavfile import read\nfrom python_speech_features import mfcc\nfrom python_speech_features import delta\nfrom speechpy.processing import cmvn\nfrom dtw import dtw\n\ndef acoustic_distance(file1, file2):\n \"\"\"Computes the acoustic distance between audio files based on Bartelds (2020).\"\"\"\n rate1, audio1 = read(file1)\n rate2, audio2 = read(file2) \n mfcc_feature1 = mfcc(audio1,\n rate1,\n winlen = 0.025,\n winstep = 0.01,\n preemph = 0.97,\n numcep = 12,\n appendEnergy = True,\n winfunc = np.hamming,\n nfft=1024)\n mfcc_feature2 = mfcc(audio2,\n rate2,\n winlen = 0.025,\n winstep = 0.01,\n preemph = 0.97,\n numcep = 12,\n appendEnergy = True,\n winfunc = np.hamming,\n nfft=1024)\n deltas1 = delta(mfcc_feature1, 2)\n double_deltas1 = delta(deltas1, 2)\n deltas2 = delta(mfcc_feature2, 2)\n double_deltas2 = delta(deltas2, 2)\n combined1 = np.hstack((mfcc_feature1, deltas1, double_deltas1))\n combined2 = np.hstack((mfcc_feature2, deltas2, double_deltas2))\n combined1 = cmvn(combined1, variance_normalization=True)\n combined2 = cmvn(combined2, variance_normalization=True)\n res = dtw(combined1, combined2, window_type=\"slantedband\", window_args={\"window_size\" : 200}, distance_only=True)\n return res.distance / (combined1.shape[1] + combined2.shape[1])\n", "id": "5811871", "language": "Python", "matching_score": 0, "max_stars_count": 4, "path": "acoustic_distance/acoustic_distance.py" } ]
0
Ann-Kristin
[ { "content": "#!/usr/bin/python3\nimport os\nimport csv\nimport sys\nimport getopt\nimport logging\nimport logging.config\n\ndef readCsvFile(filename):\n if os.path.isfile(filename):\n with open(filename, newline='') as csvFile:\n data = list(csv.reader(csvFile, delimiter='\\t', quotechar='\"'))\n else:\n # logger.error(\"Failed to open {}!\".format(filename))\n print(\"Failed to open {}!\".format(filename))\n\n return data\n\ndef filterData(data, upperMatch, lowerMatch, minLength):\n filtered = []\n for row in data:\n if float(row[2]) > lowerMatch and float(row[2]) < upperMatch and float(row[3]) > minLength:\n filtered.append(row)\n\n return filtered\n\ndef matchData(data1, data2):\n matched = []\n for row1 in data1:\n for row2 in data2:\n if row1[0] == row2[0]:\n matched.append(row1)\n# matched.append(row2)\n\n return matched\n\ndef main(argv):\n try:\n opts, args = getopt.getopt(argv, \"hvd\", [\"help\", \"version\", \"debug\", \"upper-match=\", \"lower-match=\", \"min-length=\"])\n\n print(opts)\n print(args)\n except getopt.GetoptError as err:\n # Print usage information and exit.\n print(err)\n usage()\n sys.exit(2)\n\n debugging = False\n upperMatch = 90\n lowerMatch = 80\n minLength = 500\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit(1)\n elif o in (\"-v\", \"--version\"):\n print(\"Version 0.1\")\n sys.exit(1)\n elif o in (\"-d\", \"--debug\"):\n debugging = True\n elif o in (\"--upper-match\"):\n upperMatch = float(a)\n elif o in (\"--lower-match\"):\n lowerMatch = float(a)\n elif o in (\"--min-length\"):\n minLength = float(a)\n else:\n assert False, \"unhandled option\"\n\n # Inititalize the logging api ...\n logging.config.fileConfig('config/logging.conf')\n\n # ... and fetch a logger.\n if debugging:\n logger = logging.getLogger('development')\n else:\n logger = logging.getLogger('production')\t\n\n # Check given bounds.\n if upperMatch < 0 or upperMatch > 100:\n logger.error(\"Upper Match must be in range [0, 100]!\")\n\n if upperMatch < 0 or upperMatch > 100:\n logger.error(\"Lower Match must be in range [0, 100]!\")\n \n if upperMatch < lowerMatch:\n logger.error(\"Upper match has to be greater than lower match!\")\n\n filteredData = []\n for filename in args:\n data = readCsvFile(filename)\n filteredData.append(filterData(data, upperMatch, lowerMatch, minLength))\n\n resultData = matchData(filteredData[0], filteredData[1])\n\n for row in resultData:\n print(\", \".join(row))\n\n print(\"\\n----------------------------------\\n\")\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n", "id": "8788100", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "main.py" } ]
0
hhhnz
[ { "content": "import cv2\r\nfrom PySide6 import QtGui\r\nfrom PySide6.QtGui import QBrush\r\n\r\nfrom gScene import GText\r\n\r\n\r\nclass GridBox:\r\n\r\n def __init__(self,topLX, topLY, bottomRX, bottomRY):\r\n self.topLeftX = topLX\r\n self.topLeftY = topLY\r\n self.bottomRightX = bottomRX\r\n self.bottomRightY = bottomRY\r\n self.matBox = None\r\n self.row =0\r\n self.column=0\r\n self.flagPatternFound=False\r\n self.canvasView = None\r\n self.canvasScene = None\r\n self.textCode = None\r\n self.samePatternAs = None\r\n self.mainWindow = None\r\n def clearPattern(self):\r\n self.flagPatternFound=False\r\n self.canvasScene.removeItem(self.textCode)\r\n self.canvasView.update()\r\n def showCode(self):\r\n w= self.bottomRightX-self.topLeftX\r\n h= self.bottomRightY-self.topLeftY\r\n if w<=h:\r\n textSize = round(w/4)\r\n else:\r\n textSize = round(h/4)\r\n if self.canvasScene is not None or self.canvasView is not None:\r\n self.textCode = GText(self.samePatternAs)\r\n self.textCode.setCode(self.samePatternAs)\r\n self.textCode.setScale(0.5)\r\n self.textCode.setMainWindow(self.mainWindow)\r\n #self.textCode = self.canvasScene.addText(self.samePatternAs, QtGui.QFont('Arial Black', textSize, QtGui.QFont.Light))\r\n self.canvasScene.addItem(self.textCode)\r\n self.textCode.setPos(self.topLeftX-textSize+2, self.topLeftY-textSize)\r\n self.textCode.setDefaultTextColor(QtGui.QColor(\"yellow\"))\r\n #self.textCode.setColor(QtGui.QColor(\"green\"))\r\n\r\n def getTopLeft(self):\r\n return self.topLeftX, self.topLeftY\r\n def getBottomRight(self):\r\n return self.bottomRightX, self.bottomRightY\r\n def __str__(self):\r\n return str(self.topLeftX)+\",\"+str(self.topLeftY)+\",\"+str(self.bottomRightX)+\",\"+str(self.bottomRightY)\r\n def setMatBox (self,mat):\r\n self.matBox = mat\r\n pass\r\n def getMatBox(self):\r\n return self.matBox\r\n def setCoord(self,r,c):\r\n self.row=r\r\n self.column=c\r\n def getCoord(self):\r\n return self.row,self.column\r\n def getCoordString(self):\r\n return str(self.row)+\".\"+str(self.column)\r\n def setPattern(self,coord):\r\n self.flagPatternFound=True\r\n self.samePatternAs=coord\r\n\r\nif __name__ == \"__main__\":\r\n gb1=GridBox(1,2,3,4)\r\n print(gb1.getTopLeft())\r\n print(gb1.getBottomRight())\r\n print (gb1)", "id": "11039253", "language": "Python", "matching_score": 4.1129937171936035, "max_stars_count": 0, "path": "gridBox.py" }, { "content": "import sys\r\nimport random\r\n\r\nimport cv2\r\nfrom PySide6 import QtCore, QtWidgets, QtGui\r\nfrom PySide6.QtCore import Qt, QRectF\r\nfrom PySide6.QtGui import QImage, QPixmap\r\nfrom PySide6.QtWidgets import QStatusBar, QPushButton, QMainWindow, QHBoxLayout, QVBoxLayout, QFrame, QTextEdit, \\\r\n QSplitter, QWidget, QLabel, QLineEdit, QFileDialog, QGraphicsItem\r\n\r\nfrom cvAction import CvAction\r\nfrom gScene import GScene\r\nfrom gridBox import GridBox\r\n\r\n\r\nclass MyApp(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.flagSSpriteClicked = False\r\n self.setWindowTitle(\"Road Fighter\")\r\n self.createMenuBar()\r\n self.createStatusBar()\r\n self.createToolBar()\r\n self.cva = CvAction()\r\n self.currentViewScale = 1.0\r\n self.flagImageLoad = False\r\n self.flagGridDrawn = False\r\n self.listHLine = []\r\n self.listVLine = []\r\n self.list2dGridBox = []\r\n self.dictSprite = {}\r\n\r\n @QtCore.Slot()\r\n def magic(self):\r\n self.text.setText(random.choice(self.hello))\r\n\r\n def createStatusBar(self):\r\n self.statusBar = QStatusBar()\r\n self.setStatusBar(self.statusBar)\r\n self.statusBar.showMessage('Hello')\r\n self.initUI()\r\n\r\n def actionOpenImage(self):\r\n # TODO: check if image already load, and clean\r\n if self.flagSSpriteClicked:\r\n self.textedit.clear()\r\n self.dictSprite.clear()\r\n if self.flagSSpriteClicked:\r\n for row in self.list2dGridBox:\r\n for cell in row:\r\n cell.clearPattern()\r\n self.flagSSpriteClicked = False\r\n if self.flagGridDrawn:\r\n # remove all grid lines\r\n self.flagGridDrawn = False\r\n pass\r\n if self.flagImageLoad:\r\n self.flagImageLoad = False\r\n pass\r\n\r\n path_to_file, _ = QFileDialog.getOpenFileName(self, self.tr(\"Load Image\"), self.tr(\".\"),\r\n self.tr(\"Images (*.*)\"))\r\n print(path_to_file)\r\n if path_to_file != \"\":\r\n self.cva.loadImage(path_to_file)\r\n\r\n self.image = self.cva.matOriginalImg # cv mat image\r\n self.image = self.cva.cvToQImage(self.image)\r\n # self.topleft.setPixmap(QtGui.QPixmap.fromImage(self.image))\r\n self.scene.clear()\r\n w = self.image.width()\r\n h = self.image.height()\r\n pixMap = QPixmap.fromImage(self.image)\r\n self.scene.addPixmap(pixMap)\r\n self.view.fitInView(QRectF(0, 0, w, h), Qt.KeepAspectRatio)\r\n self.scene.update()\r\n self.statusBar.showMessage(\"image size: \" + str(self.image.width()) + \" * \" + str(self.image.height()))\r\n self.flagImageLoad = True\r\n else:\r\n self.statusBar.showMessage(\"file open cancelled\")\r\n\r\n def actionSaveAllStripe(self):\r\n fileName = QFileDialog.getSaveFileName(self, 'Save Stripes', '.',selectedFilter=\"*.png\")\r\n if fileName[0]!='':\r\n print (fileName)\r\n for item in self.dictSprite.items():\r\n newFileName=fileName[0]+item[0]+\".png\"\r\n gb = item[1]\r\n cv2.imwrite(newFileName, gb.getMatBox())\r\n else:\r\n self.statusBar.showMessage(\"File save cancelled!\")\r\n\r\n\r\n def createMenuBar(self):\r\n self.openAction = QtGui.QAction(QtGui.QIcon('openFile32.png'), 'Open', self)\r\n self.openAction.setShortcut('Ctrl+O')\r\n self.openAction.triggered.connect(self.actionOpenImage)\r\n self.saveStripeAction = QtGui.QAction(QtGui.QIcon('saveFileAll32.png'), 'Save All Stripe', self)\r\n self.saveStripeAction.setShortcut('Ctrl+Shift+S')\r\n self.saveStripeAction.triggered.connect(self.actionSaveAllStripe)\r\n\r\n self.exitAction = QtGui.QAction(QtGui.QIcon('exit24.png'), 'Exit', self)\r\n self.exitAction.setShortcut('Ctrl+Q')\r\n self.exitAction.triggered.connect(self.close)\r\n menubar = self.menuBar()\r\n fileMenu = menubar.addMenu(\"&File\")\r\n fileMenu.addAction(self.openAction)\r\n fileMenu.addAction(self.saveStripeAction)\r\n fileMenu.addAction(self.exitAction)\r\n def actionSaveOne(self,code):\r\n print (\"action save one in main window\")\r\n fileName = QFileDialog.getSaveFileName(self, 'Save One Stripe', '.', selectedFilter=\"*.png\")\r\n if fileName[0] != '':\r\n gb = self.dictSprite.get(code)\r\n newFileName = fileName[0] + code + \".png\"\r\n cv2.imwrite(newFileName, gb.getMatBox())\r\n else:\r\n self.statusBar.showMessage(\"File save cancelled!\")\r\n def createToolBar(self):\r\n toolbar = self.addToolBar('Tools')\r\n toolbar.addAction(self.openAction)\r\n toolbar.addAction(self.saveStripeAction)\r\n lbOffsetX = QLabel(\"Offset X\")\r\n toolbar.addWidget(lbOffsetX)\r\n self.tbOffsetX = QLineEdit('0')\r\n self.tbOffsetX.setFixedWidth(30)\r\n toolbar.addWidget(self.tbOffsetX)\r\n lbOffsetY = QLabel(\"Offset Y\")\r\n toolbar.addWidget(lbOffsetY)\r\n self.tbOffsetY = QLineEdit('0')\r\n self.tbOffsetY.setFixedWidth(30)\r\n toolbar.addWidget(self.tbOffsetY)\r\n # grid input\r\n lbGridX = QLabel(\"Grid X\")\r\n toolbar.addWidget(lbGridX)\r\n self.tbGridX = QLineEdit('16')\r\n toolbar.addWidget(self.tbGridX)\r\n lbGridY = QLabel(\"Grid Y\")\r\n toolbar.addWidget(lbGridY)\r\n self.tbGridY = QLineEdit('16')\r\n toolbar.addWidget(self.tbGridY)\r\n # add grid buttons\r\n self.btnDrawGrid = QPushButton(\"Draw Grid\")\r\n toolbar.addWidget(self.btnDrawGrid)\r\n self.btnDrawGrid.clicked.connect(self.actionDrawGrid)\r\n\r\n # add zoom\r\n lbZoom = QLabel(\"zoom\")\r\n toolbar.addWidget(lbZoom)\r\n self.leditZoomRatio = QLineEdit('1.0')\r\n toolbar.addWidget(self.leditZoomRatio)\r\n self.btnZoom = QPushButton(\"Zoom\", self)\r\n self.btnZoom.clicked.connect(self.actionZoomClicked)\r\n toolbar.addWidget(self.btnZoom)\r\n self.btnZoomFit = QPushButton(\"Zoom to Fit\", self)\r\n self.btnZoomFit.clicked.connect(self.actionZoomFitClicked)\r\n toolbar.addWidget(self.btnZoomFit)\r\n # add strip sprite\r\n self.btnSSprite = QPushButton(\"Strip Sprite\", self)\r\n self.btnSSprite.clicked.connect(self.actionSSpriteClicked)\r\n toolbar.addWidget(self.btnSSprite)\r\n\r\n def actionZoomFitClicked(self):\r\n vw = self.view.width()\r\n vh = self.view.height()\r\n w, h, _ = self.cva.getMat().shape\r\n scale = 1\r\n if w >= h:\r\n scale = round(vw / w)\r\n r = (float(scale / self.currentViewScale))\r\n else:\r\n scale = round(vh / h)\r\n r = (float(scale / self.currentViewScale))\r\n self.view.scale(r, r)\r\n\r\n self.leditZoomRatio.setText(str(scale))\r\n self.currentViewScale = float(self.leditZoomRatio.text())\r\n def actionSSpriteClicked(self):\r\n self.textedit.clear()\r\n self.dictSprite.clear()\r\n if self.flagSSpriteClicked:\r\n for row in self.list2dGridBox:\r\n for cell in row:\r\n cell.clearPattern()\r\n if self.flagGridDrawn:\r\n # print(\"strip sprite clicked\")\r\n list2dGridBox2 = self.list2dGridBox.copy()\r\n for row in self.list2dGridBox:\r\n for cell in row:\r\n template = cell.getMatBox()\r\n r, c = cell.getCoord()\r\n # cv2.imshow(str(r)+\"-\"+str(c),template)\r\n for r2 in list2dGridBox2:\r\n for c2 in r2:\r\n if not c2.flagPatternFound:\r\n template2 = c2.getMatBox()\r\n # diff = CvAction.compareMat(template, template2)\r\n # TODO:pixel compare\r\n diff = CvAction.pixelCompareMat(template, template2)\r\n if diff <= 0.0:\r\n c2.setPattern(cell.getCoordString())\r\n # print(cell.getCoordString() + \"vs\" + c2.getCoordString() + \" difference:\" + str(\r\n # CvAction.compareMat(template, template2)))\r\n c2.showCode()\r\n # TODO: show pattern in gui view\r\n\r\n if cell.getCoordString() not in self.dictSprite:\r\n self.dictSprite[cell.getCoordString()] = cell\r\n self.showCellText(cell)\r\n self.flagSSpriteClicked = True\r\n self.statusBar.showMessage(\"total stripes found = \" + str(self.dictSprite.__len__()))\r\n else:\r\n self.statusBar.showMessage(\"Please load image and draw grid first!\")\r\n\r\n def showCellText(self, c):\r\n bText = c.getCoordString()\r\n # m = c.getMatBox()\r\n # print (m.shape)\r\n # cv2.imshow(bText,c.getMatBox())\r\n # bImage = CvAction.cvToQImage(m)\r\n self.textedit.append(bText)\r\n\r\n def actionZoomClicked(self):\r\n if (self.flagImageLoad):\r\n r = (float(self.leditZoomRatio.text()) / self.currentViewScale)\r\n self.view.scale(r, r)\r\n self.currentViewScale = float(self.leditZoomRatio.text())\r\n else:\r\n self.statusBar.showMessage(\"Please open an image first!\")\r\n\r\n def removeAllFromScene(self, list):\r\n for o in list:\r\n self.scene.removeItem(o)\r\n self.view.update()\r\n del o\r\n\r\n def generateGridBox(self, sizeX, sizeY, rr, cc, offsetX, offsetY):\r\n self.list2dGridBox.clear()\r\n if offsetX == 0:\r\n noCol = cc\r\n else:\r\n noCol = cc - 1\r\n if offsetY == 0:\r\n noRow = rr\r\n else:\r\n noRow = rr - 1\r\n for r in range(noRow):\r\n listRow = []\r\n for c in range(noCol):\r\n topX = c * sizeX + offsetX\r\n topY = r * sizeY + offsetY\r\n bottomX = (c + 1) * sizeX + offsetX\r\n bottomY = (r + 1) * sizeY + offsetY\r\n gb = GridBox(topX, topY, bottomX, bottomY)\r\n gb.mainWindow = self\r\n gb.canvasView = self.view\r\n gb.canvasScene = self.scene\r\n m = self.cva.getMat().copy()\r\n w, h, _ = m.shape\r\n print(str(topX) + \" \" + str(topY) + \" \" + str(bottomX) + \" \" + str(bottomY))\r\n print(str(w) + \" \" + str(h))\r\n crop = m[topY:bottomY, topX:bottomX]\r\n\r\n gb.setMatBox(crop)\r\n gb.setCoord(r, c)\r\n\r\n # cv2.imshow(gb.getCoordString(), gb.getMatBox())\r\n # cv2.waitKey(0)\r\n listRow.append(gb)\r\n\r\n self.list2dGridBox.append(listRow)\r\n\r\n def actionDrawGrid(self):\r\n if self.flagSSpriteClicked:\r\n for row in self.list2dGridBox:\r\n for cell in row:\r\n cell.clearPattern()\r\n if (self.flagGridDrawn):\r\n self.removeAllFromScene(self.listHLine)\r\n self.removeAllFromScene(self.listVLine)\r\n self.view.update()\r\n if (self.flagImageLoad):\r\n totalW = self.image.width()\r\n totalH = self.image.height()\r\n gridW = int(self.tbGridX.text())\r\n gridH = int(self.tbGridY.text())\r\n offsetX = int(self.tbOffsetX.text())\r\n offsetY = int(self.tbOffsetY.text())\r\n self.listVLine.clear()\r\n self.listHLine.clear()\r\n if gridW == 0 or gridH == 0:\r\n self.statusBar.showMessage(\"Grid size can not be zero!\")\r\n return\r\n totalHLine = round(totalH / gridH)\r\n print(totalHLine)\r\n totalVLine = round(totalW / gridW)\r\n print(totalVLine)\r\n if offsetX != 0:\r\n pass\r\n # totalVLine -= 1\r\n if offsetY != 0:\r\n pass\r\n # totalHLine -= 1\r\n\r\n for i in range(totalHLine):\r\n line = self.scene.addLine(0, (i * gridH) + offsetY, totalW, (i * gridH) + offsetY)\r\n # print(\"0\" + \",\" + str(i * gridH) + \",\" + str(totalW) + \",\" + str(i * gridH))\r\n pen = QtGui.QPen()\r\n pen.setColor(QtGui.QColor(\"red\"))\r\n line.setPen(pen)\r\n self.listHLine.append(line)\r\n for j in range(totalVLine):\r\n line = self.scene.addLine((j * gridW) + offsetX, 0, (j * gridW) + offsetX, totalH)\r\n pen = QtGui.QPen()\r\n pen.setColor(QtGui.QColor(\"red\"))\r\n line.setPen(pen)\r\n self.listVLine.append(line)\r\n self.generateGridBox(gridW, gridH, totalHLine, totalVLine, offsetX, offsetY)\r\n # print(self.list2dGridBox)\r\n self.flagGridDrawn = True\r\n else:\r\n self.statusBar.showMessage(\"Please open an image first!\")\r\n\r\n def initUI(self):\r\n w = QWidget()\r\n hbox = QHBoxLayout(w)\r\n # self.topleft = QtWidgets.QLabel(\"image\")\r\n # self.topleft.setFrameShape(QFrame.StyledPanel)\r\n #self.scene = QtWidgets.QGraphicsScene(QtCore.QRectF(0, 0, 300, 300))\r\n self.scene = GScene(QtCore.QRectF(0, 0, 300, 300))\r\n self.view = QtWidgets.QGraphicsView(\r\n self.scene, alignment=QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft\r\n )\r\n # self.view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\r\n # self.view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\r\n self.view.setBackgroundBrush(\r\n QtWidgets.QApplication.style()\r\n .standardPalette()\r\n .brush(QtGui.QPalette.Window)\r\n )\r\n # line1 = self.scene.addLine(0, 0, 200, 100)\r\n # line2 = self.scene.addLine(0, 100, 200, 0)\r\n # pen = QtGui.QPen()\r\n # pen.setDashPattern((4, 4))\r\n # pen.setColor(QtGui.QColor(\"red\"))\r\n # line2.setPen(pen)\r\n\r\n # rect = self.scene.addRect(QtCore.QRectF(QtCore.QPointF(50, 25), QtCore.QPointF(150, 75)))\r\n # rect.setBrush(QtGui.QColor(\"blue\"))\r\n # rect.setFlag(QGraphicsItem.ItemIsMovable)\r\n # self.topleft.resize(200, 100)\r\n bottom = QFrame()\r\n bottom.setFrameShape(QFrame.StyledPanel)\r\n splitter1 = QSplitter(Qt.Horizontal)\r\n self.textedit = QTextEdit()\r\n splitter1.addWidget(self.view)\r\n splitter1.addWidget(self.textedit)\r\n splitter1.setSizes([300, 100])\r\n splitter2 = QSplitter(Qt.Vertical)\r\n splitter2.addWidget(splitter1)\r\n splitter2.addWidget(bottom)\r\n splitter2.setSizes([300, 100])\r\n hbox.addWidget(splitter2)\r\n w.setLayout(hbox)\r\n self.setCentralWidget(w)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication([])\r\n\r\n widget = MyApp()\r\n widget.resize(800, 600)\r\n widget.show()\r\n\r\n sys.exit(app.exec())\r\n", "id": "6872046", "language": "Python", "matching_score": 5.738007068634033, "max_stars_count": 0, "path": "main.py" }, { "content": "import PySide6\r\nfrom PySide6 import QtCore, QtGui\r\nfrom PySide6.QtWidgets import QGraphicsTextItem, QGraphicsScene, QMenu\r\n\r\n\r\nclass GScene(QGraphicsScene):\r\n itemRightClicked = QtCore.Signal(object)\r\n\r\nclass GText(QGraphicsTextItem):\r\n\r\n def mousePressEvent(self, event):\r\n self.saveOneAction = QtGui.QAction(QtGui.QIcon('saveFile32.png'), 'Save One Stripe',self)\r\n self.saveOneAction.setShortcut('Ctrl+Shift+S')\r\n self.saveOneAction.triggered.connect(self.actionSaveOne)\r\n if event.button() == QtCore.Qt.LeftButton:\r\n print (\"txt left button clicked\")\r\n if event.button() == QtCore.Qt.RightButton:\r\n menu = QMenu()\r\n menu.addAction(self.saveOneAction)\r\n menu.exec_(QtGui.QCursor.pos())\r\n\r\n def setMainWindow(self,mw):\r\n self.mainWin = mw\r\n def actionSaveOne(self):\r\n self.mainWin.actionSaveOne(self.code)\r\n def setCode(self,c):\r\n self.code = c\r\n\r\n", "id": "2329662", "language": "Python", "matching_score": 0.7884834408760071, "max_stars_count": 0, "path": "gScene.py" }, { "content": "#!/usr/bin/python2.7 python2.7\r\n# -*- coding: utf-8 -*-\r\n\r\n# kivy modules first, if not Kivy may cause problems\r\nimport kivy\r\nfrom kivy.app import App\r\nfrom kivy.lang import Builder\r\nfrom kivy.uix.label import Label\r\nfrom kivy.uix.floatlayout import FloatLayout\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen\r\nkivy.require('1.10.0')\r\n\r\n\r\n# common modules\r\nimport sys\r\nimport signal\r\nfrom multiprocessing import Process\r\n\r\n\r\n# Flask & similar modules\r\nfrom flask import Flask\r\nfrom flask_restful import reqparse, abort, Api, Resource\r\nimport eventlet\r\nfrom eventlet import wsgi\r\n\r\n\r\n# async server setup\r\napp = Flask(__name__)\r\napi = Api(app)\r\n\r\n\r\ndef start_Flask():\r\n print(\"Starting server...\")\r\n # start an eventlet WSGI server on port 5000\r\n wsgi.server(eventlet.listen(('', 5000)), app)\r\n\r\n\r\ndef signal_handler(signal, frame):\r\n # for fetching CTRL+C and relatives\r\n print (\" CTRL + C detected, exiting ... \")\r\n exit(1)\r\n\r\n\r\n# Kivy screen class\r\nclass MainScreen(Screen):\r\n def __init__(self, **kwargs):\r\n self.name=\"MAIN SCREEN\"\r\n super(Screen, self).__init__(**kwargs)\r\n\r\n\r\n# Kivy app class\r\nclass Kivy(App):\r\n w_MessageBox10_1 = \"MAIN SCREEN\"\r\n w_MessageBox10_2 = \"One golden glance of what should be\"\r\n w_MessageBox30_2 = \"CHORUS\"\r\n w_MessageBox30_3 = \"EXIT\"\r\n\r\n\r\n # exit button action\r\n def exit(self):\r\n print (\"exiting... one shaft of light will show the way...\")\r\n p1.terminate() # terminate Flask by pressing on cancel\r\n exit(1)\r\n\r\n\r\n # do magic button action\r\n def do_magic(self):\r\n # your code goes here or maybe not\r\n print (\"***** it's a kind of magic *************************\")\r\n\r\n\r\n # Kivy UI builder file\r\n def build(self):\r\n sm = Builder.load_string(\"\"\"\r\n\r\nScreenManager\r\n MainScreen:\r\n size_hint: 1, .7\r\n auto_dismiss: False\r\n title: app.w_MessageBox10_1 \r\n title_align: \"center\"\r\n\r\n BoxLayout:\r\n orientation: \"vertical\"\r\n Label:\r\n text: app.w_MessageBox10_2\r\n BoxLayout:\r\n orientation: \"horizontal\"\r\n spacing: 10\r\n size_hint: 1, .5\r\n Button:\r\n text: app.w_MessageBox30_2 # DO MAGIC\r\n on_press:\r\n app.do_magic()\r\n Button:\r\n text: app.w_MessageBox30_3 # EXIT\r\n on_press:\r\n app.exit()\r\n\r\n\r\n \"\"\")\r\n\r\n return sm\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # #CTRL+C signal handler\r\n signal.signal(signal.SIGINT, signal_handler)\r\n signal.signal(signal.SIGTERM, signal_handler)\r\n\r\n global p1\r\n p1 = Process(target=start_Flask) # assign Flask to a process\r\n p1.start() # run Flask as process\r\n Kivy().run() # run Kivy UI", "id": "9503259", "language": "Python", "matching_score": 1.671938419342041, "max_stars_count": 0, "path": "test flask process.py" }, { "content": "#!/usr/bin/env python\n#import for system and time function\nimport sys\nimport time\nfrom datetime import timedelta\n#this is for encode\nimport ctypes\nimport secrets\n\n#import flask pkg\nfrom flask import Flask, request, send_file, render_template, url_for,flash, redirect,session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom forms import LoginForm\nfrom flask_login import LoginManager, UserMixin, login_user,login_required,current_user,logout_user\nfrom flask_bcrypt import Bcrypt\n#import for grab image\nfrom PIL import ImageGrab\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\nfrom io import BytesIO\n\n#import gui kivy package\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import ObjectProperty\nfrom kivy.core.window import Window\n#from kivy.uix.popup import Popup\n#import tkinter as tk\nimport threading\n\nimport random\nimport string\nfrom requests import get\n#import for url handling on client side\nimport pycurl\nfrom io import BytesIO\n#dataase handling\nimport sqlite3\n\n# from multiprocessing import Process\n# http://msdn.microsoft.com/en-us/library/windows/desktop/ms646260%28v=vs.85%29.aspx\nMOUSEEVENTF_LEFTDOWN = 2\nMOUSEEVENTF_LEFTUP = 4\n###FLASK#######################################\napp = Flask(__name__)\napp.config['SECRET_KEY']='35a60fe01bbe505d2597abda0cfbab26' #key for cookie\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db' #local library for\n\n\nrandomURL=secrets.token_urlsafe(16) #this is to generate random nubers for shut down routing\ndb = SQLAlchemy(app) #db is the database\nbcrypt = Bcrypt(app) #encrupyt fucntion for slask\n#from models import User\n# class User(db.Model):\n# id = 1\n#**************login manager***************\n#step 2. add login manager to routes\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'desktop'\nlogin_manager.login_view = 'click'\nlogin_manager.login_view = 'indexscript'\n\ndef shutdown_server(): #this is for shution down the server discuss later\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\[email protected]_request #session management discusss later\ndef make_session_permanent():\n session.permanent = True\n #app.permanent_session_lifetime = timedelta(seconds=10)\n app.permanent_session_lifetime = timedelta(minutes=1)\n return\[email protected]('/',methods=['GET','POST']) # log in page\ndef index():\n if current_user.is_authenticated:\n return redirect(url_for('desktop')) #if user logged in direct to desktop\n #return 'Hello, Flask' #app.send_static_file('index.html')\n form=LoginForm()\n if form.validate_on_submit():\n user = load_user(1)#just one suer\n #login_user(user,remember=form.remember.data)\n if user and bcrypt.check_password_hash(user.password, form.password.data):#compare password with in database\n login_user (user, remember=form.remember.data)#log in the user, flask will take care session control\n return redirect(url_for('indexscript'))#after login, direct to show desktop\n else:#if with wrong password\n flash('Login Unsuccessful. Please check password','danger')\n #login.html is template\n return render_template ('login.html', title = 'Login', form = form)#show login page\n\n# @app.route('/register', methods=['GET','POST'])\n# def register():\n# form = RegistrationForm()\n# if form.validate_on_submit():\n# flash(f\"Account created for {form.username.data}!\",'success')\n# return redirect (url_for('about'))\n# return render_template('register.html', title='Register', form=form)\n# @app.route('/login')\n# def login():\n# form = LoginForm()\n# return render_template('login.html', title='Login', form=form)\n# @app.route('/about')\n# def about():\n# return render_template('about.html', title='about')\[email protected]('/indexscript')#sending click to server\n@login_required #step 3 add login_required to each route\ndef indexscript():\n return app.send_static_file('index.html')\n #return \"indexscript\"\[email protected]('/desktop.jpeg')\n@login_required\ndef desktop():\n #return 'desktop jpg'\n screen = ImageGrab.grab()\n buf = BytesIO()\n screen.save(buf, 'JPEG', quality=50)\n buf.seek(0)\n return send_file(buf, mimetype='image/jpeg')\n # if current_user:\n # return current_user.username\n # else:\n # return \"no current user\"\n\[email protected]('/click')\n@login_required\ndef click():\n try:\n x = int(request.args.get('x'))\n y = int(request.args.get('y'))\n except:\n return 'error'\n user32 = ctypes.windll.user32\n user32.SetCursorPos(x, y)\n user32.mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)\n user32.mouse_event(MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)\n return 'done'\[email protected](('/shutdown'+randomURL), methods=['GET'])\ndef shutdown():\n shutdown_server()\n return 'Server shutting down...'\n#########MODEL DATA BASE#############################\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n@login_manager.unauthorized_handler #message when unauthorized action\ndef unauthorized_handler():\n flash(\"Please Log in first!\")\n return redirect(url_for('index'))\n\n\nclass User(db.Model,UserMixin):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=True, nullable=False)\n #email = db.Column(db.String(120), unique=True, nullable=False) #do not need email only 1 user\n #image_file = db.Column(db.String(20), nullable=False, default='default.jpg')\n password = db.Column(db.String(60), nullable=False)\n #posts = db.relationship('Post', backref='author', lazy=True)\n\n def __repr__(self): #return a presentation\n return f\"User('{self.username}', '{self.password}')\"\n####GUI Kivy################################################################################\nclass MyGrid(Widget):\n # initialize property to link back to kv file\n #txtServerIP = ObjectProperty(None)\n #butServerCheckIp = ObjectProperty(None)\n txtStatus = ObjectProperty(None)\n txtPw=ObjectProperty(None)\n txtPort=ObjectProperty(None)\n txtLicense=ObjectProperty(None)\n butStopServer=ObjectProperty(None)\n flagFlaskStarted = False\n #flagIPget=False\n flagPWget=False\n flagButStopServerEnabled = False\n\n # def getIP(self):\n # self.txtStatus.text='check computer ip address...'\n #\n # try:\n # ip = get('https://api.ipify.org').text\n # except:\n # self.txtStatus.text = 'computer ip address retrieve failed'\n # else:\n # self.txtServerIP.text=ip\n # self.flagIPget = True\n # self.txtStatus.text = 'computer ip address received'\n def genPW(self):\n global gPassword\n self.txtPw.text = self.get_random_alphanumeric_string(4,4)\n gPassword = self.txtPw.text\n self.flagPWget=True\n hashed_password = bcrypt.generate_password_hash(gPassword).decode('utf-8')\n try:\n sqliteConnection = sqlite3.connect('site.db')\n c = sqliteConnection.cursor()\n c.execute(\"Update User set password = (:pw) where id = 1\",{'pw':hashed_password})\n #sql_update_query='''Update User set password = '''+str(gPassword)+''' where id =1'''\n #username freeRemoteUser571894635218\n sqliteConnection.commit()\n c.close()\n except sqlite3.Error as error:\n print(\"Failed to update sqlite table\", error)\n finally:\n if (sqliteConnection):\n sqliteConnection.close()\n print(\"The SQLite connection is closed\")\n self.txtStatus.text = 'password generated'\n\n\n def get_random_alphanumeric_string(self, letters_count, digits_count):\n sample_str = ''.join((random.choice(string.ascii_letters) for i in range(letters_count)))\n sample_str += ''.join((random.choice(string.digits) for i in range(digits_count)))\n\n # Convert string to list and shuffle it to mix letters and digits\n sample_list = list(sample_str)\n random.shuffle(sample_list)\n final_string = ''.join(sample_list)\n return final_string\n def runFlask(self,):\n app.run(host='0.0.0.0', port=int(self.txtPort.text), debug=False,ssl_context='adhoc')\n def startServer(self):\n if self.flagPWget: #check if passward has been generated\n self.t=threading.Thread(target=self.runFlask) #run flask under kivy as a thread\n self.t.start()\n time.sleep(2)\n self.butStartServer.disabled=True #disable start server button\n self.butStopServer.disabled=False #enable stop server button\n self.txtStatus.text = 'Server started' #show in status bar\n self.flagButStopServerEnabled = True\n else:\n self.txtStatus.text = \"Please get public IP and generate Password before start Server.\" #if no password\n\n\n def stopServer(self):\n buffer = BytesIO()\n c = pycurl.Curl() #use pycurl to send message to server\n c.setopt(c.URL, 'https://127.0.0.1:'+self.txtPort.text+'/shutdown'+randomURL) #use shutdown url to shut down server on server machine\n c.setopt(c.WRITEDATA, buffer)\n #for self certificate\n c.setopt(pycurl.SSL_VERIFYPEER, 0)\n c.setopt(pycurl.SSL_VERIFYHOST, 0)\n c.perform()\n c.close()\n body = buffer.getvalue()\n self.txtStatus.text=body.decode('iso-8859-1')#show message in status\n time.sleep(2)\n self.t.join()\n self.butStartServer.disabled=False #enable start server\n self.butStopServer.disabled=True #disable stop server button\n self.flagButStopServerEnabled=False\n\nclass ServerApp(App):#kivy app\n def build(self):\n Window.bind(on_request_close=self.on_request_close)\n self.my = MyGrid()\n return self.my\n\n def on_request_close(self, *args):\n if self.my.flagButStopServerEnabled == True:\n self.my.stopServer()\n exit()\n return True\n\n\nif __name__ == '__main__':\n arg = sys.argv[1:]\n if arg.__len__()!=0:\n if arg[0] == \"test\":\n app.run(host='0.0.0.0', port=7080, debug=True, ssl_context='adhoc') #debug mode\n else:#running with GUI\n kivyApp = ServerApp()\n kivyApp.run()\n\n\n\n\n\n\n", "id": "2004918", "language": "Python", "matching_score": 2.5253758430480957, "max_stars_count": 0, "path": "myserver.py" }, { "content": "import tkinter as tk\r\nfrom tkinter import ttk\r\n\r\n\r\nclass View:\r\n def __init__(self, parent): # initialize class/object\r\n self.container = parent # parent is the gui entry point given\r\n self.setup()\r\n\r\n def setup(self):\r\n self.frame=tk.Frame(self.container)\r\n self.b1Start = tk.Button(self.frame, text=\"Start Server\", command = self.startServer, width = 20, height =1)\r\n self.frame.pack(fill=tk.BOTH)\r\n self.b1Start.pack(side=tk.TOP)\r\n\r\n def startServer(self):\r\n print (\"starting Flask as process...\")\r\n #global p1\r\n #p1 = Process(target=self.startFlask)# assign Flask to a process\r\n #p1.daemon = True\r\n #p1.start() # launch Flask as separate process\r\n #self.startFlask()\r\n def startFlask(self):\r\n print (\"inside start flask method...\")\r\n #app.run(host='0.0.0.0', port=7080, debug=True)\r\n\r\nif __name__ == '__main__':\r\n #app.run(host='0.0.0.0', port=7080, debug=True)\r\n mainwin = tk.Tk() # tk instance created in operating system\r\n WIDTH = 800\r\n HEIGHT = 600\r\n mainwin.geometry(\"%sx%s\" % (WIDTH, HEIGHT))\r\n mainwin.title(\"Remote Desk Server\")\r\n view = View(mainwin) # give reference of mainwin to view object\r\n mainwin.mainloop()", "id": "2238964", "language": "Python", "matching_score": 2.0096182823181152, "max_stars_count": 0, "path": "test.py" }, { "content": "import tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom pubsub import pub\r\nfrom PIL import Image\r\nfrom PIL import ImageTk\r\nclass View:\r\n\r\n def __init__(self, parent):\r\n # initialize variables\r\n self.container = parent\r\n self.flagLoadImage = FALSE\r\n self.hut_width = 40\r\n self.hut_height = 56\r\n\r\n #Publishes a message to notify the Controller.\r\n\r\n #Uses PyPubSub to publish a message to a topic Radio_button_pressed.\r\n #The 'subscriber' here is the Controller which gets notified.\r\n\r\n\r\n def setup(self): # run first\r\n \"\"\"Calls methods to setup the user interface.\"\"\"\r\n self.create_widgets()\r\n self.setup_layout()\r\n\r\n def loadImg(self):\r\n pub.sendMessage(\"OpenFile_Button_Pressed\")\r\n self.flagLoadImage = TRUE\r\n\r\n\r\n def create_widgets(self):\r\n \"\"\"Create various widgets in the tkinter main window.\"\"\"\r\n self.var = tk.IntVar()\r\n self.background_label = tk.Label(self.container)\r\n self.topFrame = Frame(self.container,borderwidth=2,highlightbackground=\"black\",highlightcolor=\"red\",highlightthickness=1,width=300, height=600)\r\n self.bottomFrame = Frame(self.container,borderwidth=2,highlightbackground=\"black\",highlightcolor=\"red\",highlightthickness=1,width=500, height=600)\r\n self.topFrame2 = Frame(self.topFrame)\r\n #button\r\n self.b1LoadImg = tk.Button(self.topFrame2, text = \"Load Image\",command = self.loadImg)\r\n self.b2LineDetect = tk.Button(self.topFrame2,text = \"Line Detection\",command = self.lineDetect)\r\n #scale bar\r\n self.scale1 = tk.Scale(self.topFrame, from_=1, to=20, orient = HORIZONTAL, length = 500,label ='pixel', command = self.scalerChange)\r\n self.scale1.set(1)\r\n self.scale2 = tk.Scale(self.topFrame, from_=1, to=130, orient = HORIZONTAL, length = 500,label ='threshold', command = self.scalerChange)\r\n self.scale2.set(50)\r\n self.scale3 = tk.Scale(self.topFrame, from_=1, to=500, orient = HORIZONTAL, length = 500,label ='mini line length', command = self.scalerChange)\r\n self.scale3.set(10)\r\n self.scale4 = tk.Scale(self.topFrame, from_=1, to=100, orient=HORIZONTAL, length=500, label='max line gap', command = self.scalerChange)\r\n self.scale4.set(50)\r\n #image panel\r\n self.panelA = tk.Label(self.bottomFrame, text = 'image here')\r\n \r\n\r\n\r\n def setup_layout(self):\r\n self.topFrame.pack(side = TOP)\r\n self.bottomFrame.pack (side=BOTTOM)\r\n self.topFrame2.pack(side = TOP)\r\n self.b1LoadImg.pack( side=LEFT)\r\n self.b2LineDetect.pack(side = RIGHT)\r\n self.scale4.pack(side=BOTTOM) #max line gap\r\n self.scale3.pack(side=BOTTOM) #min line lenght\r\n self.scale2.pack(side=BOTTOM) #threshold\r\n self.scale1.pack(side=BOTTOM) # pixel\r\n self.panelA.pack()\r\n\r\n \r\n def updateImg (self,img):\r\n self.panelA.configure(image=img)\r\n self.panelA.image = img\r\n return\r\n def scalerChange (self, val):\r\n if (self.flagLoadImage):\r\n pub.sendMessage(\"LineDetect_Button_Pressed\")\r\n def lineDetect(self):\r\n pub.sendMessage(\"LineDetect_Button_Pressed\")\r\n#test view\r\nif __name__ == \"__main__\":\r\n mainwin = tk.Tk()\r\n WIDTH = 800\r\n HEIGHT = 600\r\n mainwin.geometry(\"%sx%s\" % (WIDTH, HEIGHT))\r\n #mainwin.resizable(0, 0)\r\n mainwin.title(\"Open CV\")\r\n\r\n view=View(mainwin)\r\n view.setup()\r\n mainwin.mainloop()", "id": "7765123", "language": "Python", "matching_score": 5.132299900054932, "max_stars_count": 2, "path": "hnzView.py" }, { "content": "from hnzView import View\r\nfrom hnzModel import Model\r\nfrom tkinter import *\r\nfrom pubsub import pub\r\nclass Controller:\r\n def __init__(self, parent):\r\n\r\n self.parent = parent\r\n self.model = Model()\r\n self.view = View(parent)\r\n self.view.setup()\r\n\r\n pub.subscribe(self.openfile_btn_pressed, \"OpenFile_Button_Pressed\")\r\n pub.subscribe(self.model_change_handler, \"model_updated\")\r\n pub.subscribe(self.line_detection,\"LineDetect_Button_Pressed\")\r\n\r\n\r\n\r\n def openfile_btn_pressed(self):\r\n #print ('controller receive message - OpenFile_Button_Pressed')\r\n self.model.loadImg()\r\n\r\n def model_change_handler(self, data):\r\n self.view.updateImg(data)\r\n\r\n def line_detection(self):\r\n self.model.lineDetection(self.view.scale1.get(), self.view.scale2.get(),self.view.scale3.get(),self.view.scale4.get())\r\n print (\"control line detetection\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Create an instance of Tk. This is popularly called 'root' But let's\r\n # call it mainwin (the 'main window' of the application. )\r\n mainwin = Tk()\r\n WIDTH = 800\r\n HEIGHT = 800\r\n mainwin.geometry(\"%sx%s\" % (WIDTH, HEIGHT))\r\n #mainwin.resizable(0, 0)\r\n mainwin.title(\"Image Line Detection\")\r\n\r\n game_app = Controller(mainwin)\r\n\r\n mainwin.mainloop()", "id": "1376473", "language": "Python", "matching_score": 0.5543169975280762, "max_stars_count": 2, "path": "hnzController.py" }, { "content": "import cv2\r\n\r\nfrom PySide6.QtGui import QImage\r\nimport numpy as np\r\n\r\n\r\nclass CvAction:\r\n def __init__(self):\r\n pass\r\n\r\n def loadImage(self, path):\r\n self.matOriginalImg = cv2.imread(path)\r\n #cv2.imshow('image', self.matOriginalImg)\r\n # cv2.waitKey(0)\r\n\r\n def getMat(self):\r\n return self.matOriginalImg\r\n\r\n @staticmethod\r\n def cvToQImage(cvImg):\r\n height, width, channel = cvImg.shape\r\n rgb_image = cv2.cvtColor(cvImg, cv2.COLOR_BGR2RGB)\r\n bytesPerLine = 3 * width\r\n qImg = QImage(rgb_image.data, width, height, bytesPerLine, QImage.Format_RGB888)\r\n return qImg\r\n\r\n @staticmethod\r\n def templateMatching(source, template):\r\n img_rgb = source.copy()\r\n # img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)\r\n cv2.imshow(\"img_rgb\", img_rgb)\r\n cv2.waitKey(0)\r\n cv2.imshow(\"template\", template)\r\n cv2.waitKey(0)\r\n h, w = template.shape[:2]\r\n res = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)\r\n threshold = 0.8\r\n loc = np.where(res >= threshold)\r\n for pt in zip(*loc[::-1]):\r\n cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\r\n cv2.imshow(\"detected image\", img_rgb)\r\n cv2.waitKey(0)\r\n\r\n @staticmethod\r\n def compareMat(image_1, image_2):\r\n # TODO: check if it works\r\n # reject if sizes are different\r\n height1, width1, _ = image_1.shape\r\n height2, width2, _ = image_2.shape\r\n if height1 != height2 or width1 != width2:\r\n return 1000\r\n first_image_hist = cv2.calcHist([image_1], [0], None, [256], [0, 256])\r\n second_image_hist = cv2.calcHist([image_2], [0], None, [256], [0, 256])\r\n img_hist_diff = cv2.compareHist(first_image_hist, second_image_hist, cv2.HISTCMP_BHATTACHARYYA)\r\n img_template_probability_match = \\\r\n cv2.matchTemplate(first_image_hist, second_image_hist, cv2.TM_CCOEFF_NORMED)[0][0]\r\n img_template_diff = 1 - img_template_probability_match\r\n\r\n # taking only 10% of histogram diff, since it's less accurate than template method\r\n commutative_image_diff = (img_hist_diff / 10) + img_template_diff\r\n return commutative_image_diff\r\n\r\n @staticmethod\r\n def pixelCompareMat(img1, img2):\r\n if img1.shape == img2.shape:\r\n #cv2.imshow(\"img1\",img1)\r\n #cv2.imshow(\"img2\",img2)\r\n dst = cv2.bitwise_xor(img1, img2)\r\n #cv2.imshow(\"dst\",dst)\r\n gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)\r\n count=cv2.countNonZero(gray)\r\n print(count)\r\n if count>0:\r\n return count\r\n else:\r\n return 0\r\n\r\n else:\r\n return 100\r\n", "id": "11984757", "language": "Python", "matching_score": 2.9049136638641357, "max_stars_count": 0, "path": "cvAction.py" }, { "content": "from tkinter.filedialog import askopenfilename\r\nimport cv2\r\nfrom pubsub import pub\r\nimport PIL.ImageTk, PIL.Image\r\nimport numpy as np\r\n\r\nclass Model:\r\n def __init__(self):\r\n\r\n \r\n return\r\n\r\n\r\n def loadImg(self):\r\n path= askopenfilename(initialdir=\"./\",\r\n filetypes=[(\"Image File\", \"*.jpg\"),(\"All Files\",\"*.*\")],\r\n title = \"Choose a file.\"\r\n )\r\n \r\n if len(path)>0:\r\n self.originalImg = cv2.imread(path)\r\n self.currentImg = self.originalImg.copy()\r\n ##image = PIL.Image.fromarray(self.originalImg)#.resize(300,300)\r\n #update view image\r\n pub.sendMessage(\"model_updated\", data=self.toTkImg(self.currentImg))\r\n\r\n print (path)\r\n return\r\n \r\n def getCurrentImg(self):\r\n return \r\n def getOriginalImg(self):\r\n return self.originalImg\r\n def lineDetection(self,p,th,minlen,maxgap):\r\n img = self.originalImg.copy() #opencv img\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n #cv2.imshow('gray img',gray)\r\n #img_blur = cv2.medianBlur(gray, 5)\r\n #cv2.imshow('gray blur',img_blur)\r\n edges = cv2.Canny(gray, 50, 150, apertureSize = 3)\r\n #minLineLength - Minimum length of line. Line segments shorter than this are rejected\r\n #maxLineGap - Maximum allowed gap between line segments to treat them as single line\r\n lines = cv2.HoughLinesP(edges,p,np.pi/360,th,minlen,maxgap)\r\n for line in lines:\r\n x1, y1, x2, y2 = line[0]\r\n cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)\r\n #print (x1,y1,x2,y2)\r\n #update image\r\n #cv2.imshow('line detection',img)\r\n #cv2.waitKey(0)\r\n #cv2.destroyAllWindows()\r\n\r\n self.currentImg=img\r\n pub.sendMessage(\"model_updated\", data=self.toTkImg(self.currentImg))\r\n\r\n #convert cv2 image to tk image\r\n def toTkImg(self,img):\r\n #scale_percent = 3 # percent of original size\r\n #width = int(img.shape[1] * scale_percent)\r\n #height = int(img.shape[0] * scale_percent)\r\n #dim = (width, height)\r\n # resize image\r\n #resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\r\n\r\n b,g,r = cv2.split(img)\r\n img = cv2.merge((r,g,b))\r\n im = PIL.Image.fromarray(img)\r\n imgtk = PIL.ImageTk.PhotoImage(image=im)\r\n return imgtk\r\n", "id": "12376507", "language": "Python", "matching_score": 2.6923575401306152, "max_stars_count": 2, "path": "hnzModel.py" } ]
2.608867
pdworzynski
[ { "content": "import unittest\nimport os, sys\nsys.path.insert(0, \"../\")\nfrom pyCSCS import read_bucket, read_css, cscs\n\n\nclass CSCStest(unittest.TestCase):\n\n def setUp(self):\n self.basepath = os.path.split(os.path.abspath(__file__))[0] + '/'\n self.sample_names, self.featureids, self.features = read_bucket(self.basepath + \"data/small_GNPS_buckettable.tsv\", normalization = True)\n self.observationids = {str(x):index for index, x in enumerate(self.featureids)}\n self.css = read_css(self.basepath + \"data/small_GNPS_edges.tsv\", self.observationids, self.features.shape[0], 0.6)\n\n \n def test_read_bucket_sample_names(self):\n self.assertEqual(self.sample_names, ['Sample1', 'Sample2', 'Sample3', 'Sample4', 'Sample5', 'Sample6'])\n\n def test_read_bucket_featureids(self):\n self.assertEqual(self.featureids, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n\n def test_read_bucket_normalized_sum(self):\n self.assertEqual(self.features.sum().sum(), 6)\n\n def test_read_bucket_unnormalized_sum(self):\n sample_names, featureids, features = read_bucket(self.basepath + \"data/small_GNPS_buckettable.tsv\", normalization = False)\n self.assertEqual(features.sum().sum(), 3470686603)\n\n def test_read_css(self):\n self.assertAlmostEqual(round(self.css.sum()),19)\n\n def test_cscs_1_cpu(self):\n dist = cscs(self.features, self.css, self.sample_names, cpus=1)\n self.assertAlmostEqual(dist.sum().sum(), 25.14, delta=0.01)\n \n def\ttest_cscs_2_cpu(self):\n dist = cscs(self.features, self.css, self.sample_names, cpus=2)\n self.assertAlmostEqual(dist.sum().sum(), 25.14, delta=0.01)\n \nif __name__ == '__main__':\n unittest.main()\n", "id": "8752137", "language": "Python", "matching_score": 2.3169076442718506, "max_stars_count": 1, "path": "tests/test_cscs.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom itertools import combinations,islice\nfrom skbio.stats.ordination import pcoa\nfrom skbio import DistanceMatrix\nfrom scipy.sparse import dok_matrix,csc_matrix\nimport skbio\nimport pandas as pd\nimport biom\nfrom q2_types.feature_table import FeatureTable, Frequency\nfrom multiprocessing import Process, Queue \nfrom collections import defaultdict\nimport itertools\nimport scipy.sparse\nimport pickle\nimport time\n\ndef filter_matrix(matrix, threshold=0.6):\n Xindices, Yindices = matrix.nonzero()\n for i in range(len(Xindices)):\n x, y = Xindices[i], Yindices[i]\n if matrix[x, y] < threshold:\n matrix[x, y] = 0.0\n matrix.eliminate_zeros()\n \n\ndef read_css(css_edges, observationids, p, cosine_threshold):\n edgesdok = dok_matrix((p, p), dtype=np.float32)\n fh = open(css_edges, \"r\")\n for line in fh.readlines():\n if line.find(\"CLUSTERID1\") > -1:\n continue\n linesplit = line.split(\"\\t\")\n if float(linesplit[4]) < cosine_threshold:\n edgesdok[int(observationids[linesplit[0]]), int(observationids[linesplit[1]])] = 0.0\n else:\n edgesdok[observationids[linesplit[0]], observationids[linesplit[1]]] = float(linesplit[4])\n edgesdok[observationids[linesplit[1]], observationids[linesplit[0]]] = float(linesplit[4])\n fh.close()\n edgesdok.setdiag(1)\n return(edgesdok)\n\ndef read_bucket(features_file, normalization):\n bucket = pd.read_csv(features_file, sep = \"\\t\",index_col=0, header=0 )\n featureids = list(bucket.index)\n sample_names = list(bucket.columns.values)\n\n if normalization == True:\n bucket = bucket.div(bucket.sum(axis=0), axis=1)\n\n return((sample_names, featureids,csc_matrix(bucket.values)))\n \ndef cscs_from_files(features_file, css_edges, cosine_threshold = 0.6, normalization = True, weighted = True, cpus = 1, chunk = 2):\n \"\"\" Compute CSCS from input files in GNPS buckettable and egdes format\n Args:\n features_file (table): A path to a buckettable file from GNPS\n edges (table): The CSS matrix as a edges file from GNPS\n cosine_threshold (float): Threshold under which all entries in the CSS matri will be set to 0. Set to 0.6 by default as in Sedio et al.\n normalization (boolean): Total Ion Current Sum Normalization or not\n weight (boolean): Weight all intensities or treat them as presence/absence\n cpus (int): Number of processes to run default = 1\n chunk (int): Number of samples to process in each process\n\n \"\"\"\n sample_names, featureids, features = read_bucket(features_file, normalization)\n observationids = {str(x):index for index, x in enumerate(featureids)}\n edgesdok = read_css(css_edges, observationids, features.shape[0], cosine_threshold)\n\n if weighted == False:\n features = features.pa #convert to scipy sparse\n\n return(cscs(features, edgesdok, sample_names, cosine_threshold = cosine_threshold, normalization = normalization, weighted = weighted, cpus = cpus, chunk = chunk))\n\ndef cscs(features, edges, sample_names, cosine_threshold = 0.6, normalization = True, weighted = True, cpus = 1, chunk = 2):\n \"\"\" Compute CSCS distance\n \n Computes a CSCS distance matrix from a features matrix and a matrix of cosine similarities\n\n Args:\n features (table): A pandas dataframe or numpy array of feature intensities\n edges (table): The CSS matrix as a scipy.sparse.csc_matrix\n sample_names (list): list of sample names\n cosine_threshold (float): Threshold under which all entries in the CSS matri will be set to 0. Set to 0.6 by default as in Sedio et al.\n normalization (boolean): Total Ion Current Sum Normalization or not\n weight (boolean): Weight all intensities or treat them as presence/absence\n cpus (int): Number of processes to run default = 1\n chunk (int): Number of samples to process in each process\n\n \"\"\"\n filter_matrix(features, 0.6)\n dist = parallel_make_distance_matrix(features, edges, sample_names, cpus, chunk)\n dist = 1 - dist\n return(dist)\n\ndef compute_sum(sampleA, sampleB, edges):\n outer = sampleA.multiply(sampleB.transpose())\n finaldok = outer.multiply(edges)\n if finaldok.nnz == 0:\n return(0)\n else:\n return(sum(finaldok.data))\n\ndef single_distance(sampleA, sampleB, edges):\n \"\"\" Compute the distance between one pair of samples\n \"\"\"\n start = time.time()\n cssab = compute_sum(sampleA, sampleB, edges)\n cssaa = compute_sum(sampleA, sampleA, edges)\n cssbb = compute_sum(sampleB, sampleB, edges)\n new = time.time()\n return(cssab/max(cssaa, cssbb))\n\ndef worker(input, output, edges):\n for worker_samples in iter(input.get, \"STOP\"):\n results=[]\n for index, sampleA, sampleB in worker_samples:\n results.append((index, single_distance(sampleA, sampleB, edges)))\n output.put([(index, result) for index, result in results])\n\ndef split_every(n, iterable):\n i = iter(iterable)\n piece = list(islice(i, n))\n while piece:\n yield piece\n piece = list(islice(i, n))\n\ndef parallel_make_distance_matrix(features, edges, sample_names, cpus, chunk):\n #Parallel stuff\n NUMBER_OF_PROCESSES = cpus\n work_chunk_size = chunk\n \n # Create queues\n task_queue = Queue()\n done_queue = Queue()\n \n #Scientific stuff\n dist = np.zeros([features.shape[1], features.shape[1]])\n \n feature_combinations = itertools.combinations(range(0,features.shape[1]), 2)\n comb_chunks_list = list([chunk for chunk in split_every(work_chunk_size, feature_combinations)])\n\n for chunk_index, chunk_comb in enumerate(comb_chunks_list):\n task_queue.put([(chunk_index*work_chunk_size + comb_index, features[:,comb[0]], features[:,comb[1]]) for comb_index, comb in enumerate(chunk_comb)])\n \n # Start worker processes\n for i in range(NUMBER_OF_PROCESSES):\n Process(target=worker, args=(task_queue, done_queue, edges)).start()\n \n indexed_distances = []\n for i in range(len(comb_chunks_list)):\n indexed_distances.extend(done_queue.get())\n \n # Tell child processes to stop\n for i in range(NUMBER_OF_PROCESSES):\n task_queue.put('STOP')\n \n distances = list([d for i, d in sorted(indexed_distances, key=lambda id_tuple: id_tuple[0])])\n \n xs,ys = np.triu_indices(dist.shape[0],k=1)\n dist[xs,ys] = distances\n dist[ys,xs] = distances\n dist[ np.diag_indices(dist.shape[0]) ] = 1\n distdf = pd.DataFrame(dist, sample_names, sample_names)\n return(distdf)\n\n", "id": "10008227", "language": "Python", "matching_score": 1.7876331806182861, "max_stars_count": 0, "path": "pyCSCS/cscs.py" }, { "content": "name = \"pyCSCS\"\nfrom .cscs import read_bucket, read_css, cscs, cscs_from_files\n", "id": "1568372", "language": "Python", "matching_score": 0.0790650025010109, "max_stars_count": 1, "path": "pyCSCS/__init__.py" } ]
1.787633
meshify-io
[ { "content": "import os\nimport boto3\nimport json\nimport urllib.request\n\ngsheets_url = \"https://sheets.googleapis.com/v4/spreadsheets/\"\n\ndef handler(event, context):\n #remove credentials from event\n accesstoken = event['accesstoken']\n event['accesstoken'] = '***'\n\n print(\"event\")\n print(event)\n\n sheetid = event['sheetid']\n request = urllib.request.Request(gsheets_url+sheetid+'?includeGridData=true')\n request.add_header('Authorization','Bearer '+accesstoken)\n result = []\n with urllib.request.urlopen(request) as response:\n content = response.read()\n # {\"spreadsheetId\": \"<KEY>\",\n # \"properties\": {\n # \"title\": \"Aufragsbuch\",\n # \"locale\": \"de_DE\",\n # \"autoRecalc\": \"ON_CHANGE\",\n # \"timeZone\": \"Europe/Berlin\",\n # \"defaultFormat\": {...}\n # }\n # },\n # \"sheets\": [\n # {\n # \"properties\": {\n # \"sheetId\": 0,\n # \"title\": \"Tabellenblatt1\",\n # \"index\": 0,\n # \"sheetType\": \"GRID\",\n # \"gridProperties\": {\n # \"rowCount\": 1000,\n # \"columnCount\": 26,\n # \"frozenRowCount\": 1\n # }\n # },\n # \"data\": [\n # {\"rowData\": [\n # {\n # \"values\": [\n # {\"userEnteredValue\": {\"stringValue\": \"DATUM\"},\n # \"effectiveValue\": {\"stringValue\": \"DATUM\"},\n # \"formattedValue\": \"DATUM\",\n # \"userEnteredFormat\": {..},\n # \"effectiveFormat\": {...}\n # },{\n # \"userEnteredValue\": {\"stringValue\": \"Auftraggeber\"},\n # \"effectiveValue\": {\"stringValue\": \"Auftraggeber\"},\n # \"formattedValue\": \"Auftraggeber\",\n # \"userEnteredFormat\": {...},\n # \"effectiveFormat\": {...}\n # },\n # ...\n sheetjson = json.loads(content)\n for sheet in sheetjson['sheets']:\n for idx,rowdata in enumerate(sheet['data']):\n row = rowdata['rowData']\n singleRow = []\n for cell in row['values']:\n singleRow.append(cell['formattedValue'])\n obj = {\"sheetid\":sheetid,\"rownum\":idx,\"rowvalues\":singleRow}\n result.append(obj)\n\n print(result)\n return result\n", "id": "10887287", "language": "Python", "matching_score": 0.503616213798523, "max_stars_count": 0, "path": "modifiedrow/main.py" }, { "content": "import os\nimport boto3\nimport json\nimport urllib.request\n\ngdrive_url = \"https://www.googleapis.com/drive/v3/files\"\n\ndef handler(event, context):\n #remove credentials from event\n accesstoken = event['accesstoken']\n event['accesstoken'] = '***'\n\n print(\"event\")\n print(event)\n result = []\n fileid = event['fileid']\n\n request = urllib.request.Request(gdrive_url+'/'+id+'?fields=*')\n request.add_header('Authorization','Bearer '+accesstoken)\n\n with urllib.request.urlopen(request) as response:\n data = response.read().decode('utf-8')\n print(data)\n result = json.loads(data)\n print(result)\n return result\n", "id": "6847595", "language": "Python", "matching_score": 1.9243457317352295, "max_stars_count": 0, "path": "fetchfilemetadata/main.py" }, { "content": "import os\nimport boto3\nimport json\nimport urllib.request\n\ngdrive_url = \"https://www.googleapis.com/drive/v3/files\"\n\ndef handler(event, context):\n #remove credentials from event\n accesstoken = event['accesstoken']\n event['accesstoken'] = '***'\n\n print(\"event\")\n print(event)\n result = []\n fileid = event['fileid']\n targeturl = event['targeturl']\n\n request = urllib.request.Request(gdrive_url+'/'+fileid+'?alt=media')\n request.add_header('Authorization','Bearer '+accesstoken)\n\n with urllib.request.urlopen(request) as response:\n data = response.read()\n #put file to s3\n request2 = urllib.request.Request(targeturl,data=data,method='PUT')\n request2.add_header('Content-Type','application/octet-stream')\n with urllib.request.urlopen(request2) as response2:\n data2 = response2.read()\n print(data2)\n\n return {\"status\":\"done\"}\n", "id": "3870658", "language": "Python", "matching_score": 2.353482484817505, "max_stars_count": 0, "path": "fetchfile/main.py" }, { "content": "import os\nimport boto3\nimport json\nimport urllib.request\n\ngdrive_url = \"https://www.googleapis.com/drive/v3/files\"\n\ndef handler(event, context):\n #remove credentials from event\n accesstoken = event['accesstoken']\n event['accesstoken'] = '***'\n\n print(\"event\")\n print(event)\n\n directory = event['directory']\n params = urllib.parse.urlencode({'q': 'parents in \\''+directory+'\\''})\n print(params)\n request = urllib.request.Request(gdrive_url+'?'+params)\n request.add_header('Authorization','Bearer '+accesstoken)\n result = []\n with urllib.request.urlopen(request) as response:\n content = response.read()\n # {\n # \"kind\": \"drive#fileList\",\n # \"incompleteSearch\": false,\n # \"files\": [\n # {\n # \"kind\": \"drive#file\",\n # \"id\": \"1qEXCqvQVNezQY6IiuQOebiARlq1SxUTC\",\n # \"name\": \"blach.pdf\",\n # \"mimeType\": \"application/pdf\"\n # }\n # ]\n # }\n filelist = json.loads(content)\n for fileentry in filelist['files']:\n #fileentry['dedupid']=fileentry['id']\n result.append(fileentry)\n\n print(result)\n return result\n", "id": "9567346", "language": "Python", "matching_score": 1.0321966409683228, "max_stars_count": 0, "path": "newfile/main.py" } ]
1.478271
ampcasd
[ { "content": "import enum\n\nfrom typing import List\nfrom dataclasses import dataclass\n\n\nclass Period(str, enum.Enum):\n MINS = '1m'\n MINS_5 = '5m'\n MINS_15 = '15m'\n MINS_30 = '30m'\n HOURS = '1h'\n HOURS_4 = '4h'\n HOURS_6 = '6h'\n HOURS_12 = '12h'\n DAY = '1D'\n WEEK = '7D'\n WEEK_2 = '14D'\n MONTH_1 = '1M'\n\n\nclass OrderSide(str, enum.Enum):\n BUY = 'BUY'\n SELL = 'SELL'\n\n\nclass OrderType(str, enum.Enum):\n LIMIT = 'LIMIT'\n MARKET = 'MARKET'\n\n\nclass OrderStatus(str, enum.Enum):\n ACTIVE = 'ACTIVE'\n FILLED = 'FILLED'\n CANCELED = 'CANCELED'\n REJECTED = 'REJECTED'\n EXPIRED = 'EXPIRED'\n\n\nclass Coin(str, enum.Enum):\n BTC = 'BTC'\n CRO = 'CRO'\n MCO = 'MCO'\n ETH = 'ETH'\n XRP = 'XRP'\n LTC = 'LTC'\n EOS = 'EOS'\n XLM = 'XLM'\n ATOM = 'ATOM'\n LINK = 'LINK'\n XTZ = 'XTZ'\n BCH = 'BCH'\n VET = 'VET'\n ICX = 'ICX'\n ADA = 'ADA'\n ENJ = 'ENJ'\n ALGO = 'ALGO'\n KNC = 'KNC'\n NEO = 'NEO'\n\n USDT = 'USDT'\n USDC = 'USDC'\n DAI = 'DAI'\n\n\nclass Pair(str, enum.Enum):\n CRO_BTC = 'CRO_BTC'\n MCO_BTC = 'MCO_BTC'\n ETH_BTC = 'ETH_BTC'\n XRP_BTC = 'XRP_BTC'\n LTC_BTC = 'LTC_BTC'\n EOS_BTC = 'EOS_BTC'\n XLM_BTC = 'XLM_BTC'\n ATOM_BTC = 'ATOM_BTC'\n LINK_BTC = 'LINK_BTC'\n XTZ_BTC = 'XTZ_BTC'\n BCH_BTC = 'BCH_BTC'\n VET_BTC = 'VET_BTC'\n ICX_BTC = 'ICX_BTC'\n ADA_BTC = 'ADA_BTC'\n ALGO_BTC = 'ALGO_BTC'\n NEO_BTC = 'NEO_BTC'\n\n USDC_USDT = 'USDC_USDT'\n BTC_USDT = 'BTC_USDT'\n CRO_USDT = 'CRO_USDT'\n MCO_USDT = 'MCO_USDT'\n ETH_USDT = 'ETH_USDT'\n XRP_USDT = 'XRP_USDT'\n LTC_USDT = 'LTC_USDT'\n EOS_USDT = 'EOS_USDT'\n XLM_USDT = 'XLM_USDT'\n ATOM_USDT = 'ATOM_USDT'\n LINK_USDT = 'LINK_USDT'\n XTZ_USDT = 'XTZ_USDT'\n BCH_USDT = 'BCH_USDT'\n VET_USDT = 'VET_USDT'\n ICX_USDT = 'ICX_USDT'\n ADA_USDT = 'ADA_USDT'\n ENJ_USDT = 'ENJ_USDT'\n ALGO_USDT = 'ALGO_USDT'\n KNC_USDT = 'KNC_USDT'\n NEO_USDT = 'NEO_USDT'\n DAI_USDT = 'DAI_USDT'\n\n MCO_CRO = 'MCO_CRO'\n ETH_CRO = 'ETH_CRO'\n XRP_CRO = 'XRP_CRO'\n LTC_CRO = 'LTC_CRO'\n EOS_CRO = 'EOS_CRO'\n XLM_CRO = 'XLM_CRO'\n ATOM_CRO = 'ATOM_CRO'\n LINK_CRO = 'LINK_CRO'\n XTZ_CRO = 'XTZ_CRO'\n BCH_CRO = 'BCH_CRO'\n VET_CRO = 'VET_CRO'\n ICX_CRO = 'ICX_CRO'\n ADA_CRO = 'ADA_CRO'\n ENJ_CRO = 'ENJ_CRO'\n ALGO_CRO = 'ALGO_CRO'\n KNC_CRO = 'KNC_CRO'\n NEO_CRO = 'NEO_CRO'\n DAI_CRO = 'DAI_CRO'\n\n CRO_USDC = 'CRO_USDC'\n\n\n@dataclass\nclass Candle:\n time: int\n open: float\n high: float\n low: float\n close: float\n volume: float\n pair: Pair\n\n\n@dataclass\nclass Trade:\n id: int\n time: int\n price: float\n quantity: float\n side: OrderSide\n pair: Pair\n\n\n@dataclass\nclass OrderInBook:\n price: float\n quantity: float\n count: int\n side: OrderSide\n\n @property\n def volume(self) -> float:\n return self.price * self.quantity\n\n\n@dataclass\nclass OrderBook:\n buys: List[OrderInBook]\n sells: List[OrderInBook]\n pair: Pair\n\n @property\n def spread(self) -> float:\n return (self.sells[0].price / self.buys[0].price - 1) * 100\n", "id": "5843545", "language": "Python", "matching_score": 2.2809135913848877, "max_stars_count": 0, "path": "src/cryptocom/exchange/structs.py" }, { "content": "import asyncio\n\nimport pytest\n\nimport cryptocom.exchange as cro\n\n\[email protected]\nasync def test_account_get_balance(account: cro.Account):\n data = await account.get_balance()\n assert data['CRO']['available'] > 2\n assert data['USDT']['available'] > 2\n\n\[email protected]\nasync def test_no_dublicated_mass_limit_orders(\n exchange: cro.Exchange, account: cro.Account):\n buy_price = round(await exchange.get_price(cro.Pair.CRO_USDT) / 2, 4)\n order_ids = await asyncio.gather(*[\n account.buy_limit(\n cro.Pair.CRO_USDT, 0.001,\n round(buy_price / 1000 + i / 10000.0, 4)\n )\n for i in range(100)\n ])\n\n real_orders = await asyncio.gather(*[\n account.get_order(id_)\n for id_ in order_ids\n ])\n for order in real_orders:\n assert order['status'] == 'ACTIVE', order\n\n assert len(real_orders) == 100\n\n orders = await account.get_open_orders(cro.Pair.CRO_USDT)\n assert sorted(o['id'] for o in orders) == sorted(order_ids)\n\n\[email protected]\nasync def test_account_buy_limit(exchange: cro.Exchange, account: cro.Account):\n buy_price = round(await exchange.get_price(cro.Pair.CRO_USDT) / 10, 4)\n order_ids = await asyncio.gather(*[\n account.buy_limit(cro.Pair.CRO_USDT, 0.001, buy_price)\n for i in range(25)\n ])\n all_orders = await account.get_orders(cro.Pair.CRO_USDT, page_size=50)\n\n await account.cancel_order(\n order_ids[0], cro.Pair.CRO_USDT, wait_for_cancel=True)\n order = await account.get_order(order_ids[0])\n assert order['status'] == cro.OrderStatus.CANCELED.value\n\n for order_id in order_ids[1:]:\n await account.cancel_order(order_id, cro.Pair.CRO_USDT)\n\n open_orders = [\n order\n for order in await account.get_open_orders(cro.Pair.CRO_USDT)\n if order['id'] in order_ids\n ]\n assert not open_orders\n\n all_orders = await account.get_orders(cro.Pair.CRO_USDT, page_size=50)\n ids = [order['id'] for order in all_orders]\n assert set(ids) & set(order_ids)\n\n\[email protected]\nasync def test_account_sell_limit(\n exchange: cro.Exchange, account: cro.Account):\n sell_price = round(await exchange.get_price(cro.Pair.CRO_USDT) * 10, 4)\n order_ids = [\n await account.sell_limit(cro.Pair.CRO_USDT, 0.001, sell_price)\n for _ in range(25)\n ]\n\n all_orders = await account.get_orders(cro.Pair.CRO_USDT, page_size=50)\n await account.cancel_open_orders(cro.Pair.CRO_USDT)\n\n open_orders = [\n order\n for order in await account.get_open_orders(cro.Pair.CRO_USDT)\n if order['id'] in order_ids\n ]\n\n for _ in range(10):\n for order in open_orders:\n assert order['status'] == cro.OrderStatus.CANCELED.value\n\n open_orders = [\n order\n for order in await account.get_open_orders(cro.Pair.CRO_USDT)\n if order['id'] in order_ids\n ]\n\n if not open_orders:\n break\n\n assert not open_orders\n\n all_orders = await account.get_orders(cro.Pair.CRO_USDT, page_size=50)\n ids = [order['id'] for order in all_orders]\n assert set(ids) & set(order_ids)\n\n\nasync def make_trades(account, exchange, order_ids):\n price = await exchange.get_price(cro.Pair.CRO_USDT)\n order_id = await account.buy_market(cro.Pair.CRO_USDT, round(price, 4))\n order = await account.get_order(order_id)\n assert order['status'] == cro.OrderStatus.FILLED.value\n assert order['id']\n order_ids['buy'].append(order_id)\n\n order_id = await account.sell_market(cro.Pair.CRO_USDT, 1)\n order = await account.get_order(order_id)\n assert order['status'] == cro.OrderStatus.FILLED.value\n order_ids['sell'].append(order_id)\n\n\[email protected]\nasync def test_account_market_orders(\n account: cro.Account, exchange: cro.Exchange):\n order_ids = {'buy': [], 'sell': []}\n await asyncio.gather(*[\n make_trades(account, exchange, order_ids) for _ in range(10)\n ])\n await asyncio.sleep(2)\n\n trades = await account.get_trades(cro.Pair.CRO_USDT, page_size=20)\n keys = sorted([\n 'side', 'instrument_name', 'fee', 'id', 'create_time', 'traded_price',\n 'traded_quantity', 'fee_currency', 'order_id'\n ])\n assert keys == sorted(trades[0].keys())\n\n for trade in trades:\n if trade['side'] == cro.OrderSide.BUY:\n assert trade['order_id'] in order_ids['buy']\n assert trade['order_id'] not in order_ids['sell']\n elif trade['side'] == cro.OrderSide.SELL:\n assert trade['order_id'] in order_ids['sell']\n assert trade['order_id'] not in order_ids['buy']\n", "id": "4884191", "language": "Python", "matching_score": 3.2220840454101562, "max_stars_count": 0, "path": "tests/test_account.py" }, { "content": "import asyncio\n\nfrom typing import List\n\nfrom .api import ApiProvider, ApiError\nfrom .structs import (\n Pair, OrderSide, OrderStatus, OrderType, Period, Candle, Trade,\n OrderInBook, OrderBook\n)\n\n\nclass Exchange:\n \"\"\"Interface to base exchange methods.\"\"\"\n def __init__(self, api: ApiProvider = None):\n self.api = api or ApiProvider(auth_required=False)\n\n async def get_pairs(self):\n \"\"\"List all available market pairs.\"\"\"\n data = await self.api.get('public/get-instruments')\n return {Pair(i.pop('instrument_name')): i for i in data['instruments']}\n\n async def get_tickers(self, pair: Pair = None):\n \"\"\"Get tickers in all available markets.\"\"\"\n params = {'instrument_name': pair.value} if pair else None\n data = await self.api.get('public/get-ticker', params)\n if pair:\n data.pop('i')\n return data\n return {Pair(ticker.pop('i')): ticker for ticker in data}\n\n async def get_trades(self, pair: Pair):\n \"\"\"Get last 200 trades in a specified market.\"\"\"\n data = await self.api.get(\n 'public/get-trades', {'instrument_name': pair.value})\n for trade in data:\n trade.pop('i')\n trade.pop('dataTime')\n return data\n\n async def get_price(self, pair: Pair):\n \"\"\"Get latest price of pair.\"\"\"\n data = await self.api.get('public/get-ticker', {\n 'instrument_name': pair.value\n })\n return float(data['a'])\n\n async def get_orderbook(self, pair: Pair, depth: int = 150):\n \"\"\"Get the order book for a particular market.\"\"\"\n data = await self.api.get('public/get-book', {\n 'instrument_name': pair.value,\n 'depth': depth\n })\n return data[0]\n\n async def listen_candles(self, period: Period, *pairs: List[Pair]):\n if not isinstance(period, Period):\n raise ValueError(f'Provide Period enum not {period}')\n\n channels = [\n f'candlestick.{period.value}.{pair.value}'\n for pair in pairs\n ]\n prev_time = {}\n\n async for data in self.api.listen('market', *channels):\n pair = Pair(data['instrument_name'])\n for candle in data['data']:\n current_time = int(candle['t'] / 1000)\n if pair not in prev_time or current_time > prev_time[pair]:\n yield Candle(\n current_time,\n candle['o'], candle['h'], candle['l'],\n candle['c'], candle['v'],\n Pair(data['instrument_name'])\n )\n prev_time[pair] = current_time\n\n async def listen_trades(self, *pairs: List[Pair]):\n channels = [f'trade.{pair}' for pair in pairs]\n async for data in self.api.listen('market', *channels):\n for trade in data['data']:\n trade.pop('dataTime')\n yield Trade(\n trade['d'], int(trade['t'] / 100),\n trade['p'], trade['q'],\n OrderSide(trade['s'].upper()),\n Pair(data['instrument_name'])\n )\n\n async def listen_orderbook(\n self, *pairs: List[Pair], depth: int = 150) -> OrderBook:\n channels = [f'book.{pair}.{depth}' for pair in pairs]\n async for data in self.api.listen('market', *channels):\n pair = Pair(data['instrument_name'])\n buys = [\n OrderInBook(*order, OrderSide.BUY)\n for order in data['data'][0]['bids']\n ]\n sells = [\n OrderInBook(*order, OrderSide.SELL)\n for order in reversed(data['data'][0]['asks'])\n ]\n yield OrderBook(buys, sells, pair)\n\n\nclass Account:\n \"\"\"Provides access to account actions and data. Balance, trades, orders.\"\"\"\n def __init__(\n self, *, api_key: str = '', api_secret: str = '',\n from_env: bool = False, api: ApiProvider = None):\n if not api and not (api_key and api_secret) and not from_env:\n raise ValueError(\n 'Pass ApiProvider or api_key with api_secret or from_env')\n self.api = api or ApiProvider(\n api_key=api_key, api_secret=api_secret, from_env=from_env)\n\n async def get_balance(self):\n \"\"\"Return balance.\"\"\"\n data = await self.api.post(\n 'private/get-account-summary', {'params': {}})\n return {acc['currency']: acc for acc in data['accounts']}\n\n async def get_orders(\n self, pair: Pair, page: int = 0, page_size: int = 200):\n \"\"\"Return all orders.\"\"\"\n data = await self.api.post('private/get-order-history', {\n 'params': {\n 'instrument_name': pair.value,\n 'page_size': page_size,\n 'page': page\n }\n })\n orders = data.get('order_list') or []\n for order in orders:\n order['id'] = int(order.pop('order_id'))\n return orders\n\n async def get_open_orders(\n self, pair: Pair, page: int = 0, page_size: int = 200):\n \"\"\"Return open orders.\"\"\"\n data = await self.api.post('private/get-open-orders', {\n 'params': {\n 'instrument_name': pair.value,\n 'page_size': page_size,\n 'page': page\n }\n })\n orders = data.get('order_list') or []\n for order in orders:\n order['id'] = int(order.pop('order_id'))\n return orders\n\n async def get_trades(\n self, pair: Pair, page: int = 0, page_size: int = 200):\n \"\"\"Return trades.\"\"\"\n data = await self.api.post('private/get-trades', {\n 'params': {\n 'instrument_name': pair.value,\n 'page_size': page_size,\n 'page': page\n }\n })\n trades = data.get('trade_list') or []\n for trade in trades:\n trade['order_id'] = int(trade['order_id'])\n trade['id'] = int(trade.pop('trade_id'))\n return trades\n\n async def create_order(\n self, pair: Pair, side: OrderSide, type_: OrderType,\n quantity: float, price: float = 0, client_id: int = None) -> int:\n \"\"\"Create raw order with buy or sell side.\"\"\"\n data = {\n 'instrument_name': pair.value, 'side': side.value,\n 'type': type_.value\n }\n\n if type_ == OrderType.MARKET and side == OrderSide.BUY:\n data['notional'] = quantity\n else:\n data['quantity'] = quantity\n\n if client_id:\n data['client_oid'] = str(client_id)\n\n if price:\n if type_ == OrderType.MARKET:\n raise ValueError(\n \"Error, MARKET execution do not support price value\")\n data['price'] = price\n\n resp = await self.api.post('private/create-order', {'params': data})\n return int(resp['order_id'])\n\n async def buy_limit(self, pair: Pair, quantity: float, price: float):\n \"\"\"Buy limit order.\"\"\"\n return await self.create_order(\n pair, OrderSide.BUY, OrderType.LIMIT, quantity, price\n )\n\n async def sell_limit(self, pair: Pair, quantity: float, price: float):\n \"\"\"Sell limit order.\"\"\"\n return await self.create_order(\n pair, OrderSide.SELL, OrderType.LIMIT, quantity, price\n )\n\n async def wait_for_status(\n self, order_id: int, pair: Pair, statuses, delay: int = 0.5):\n \"\"\"Wait for order status.\"\"\"\n order = await self.get_order(order_id)\n\n for _ in range(self.api.retries):\n if OrderStatus(order['status']) in statuses:\n break\n\n await asyncio.sleep(delay)\n order = await self.get_order(order_id)\n\n if OrderStatus(order['status']) not in statuses:\n raise ApiError(\n f\"Status not changed for: {order}, must be in: {statuses}\")\n\n async def buy_market(\n self, pair: Pair, spend: float, wait_for_fill=False):\n \"\"\"Buy market order.\"\"\"\n order_id = await self.create_order(\n pair, OrderSide.BUY, OrderType.MARKET, spend\n )\n if wait_for_fill:\n await self.wait_for_status(order_id, pair, (\n OrderStatus.FILLED, OrderStatus.CANCELED, OrderStatus.EXPIRED,\n OrderStatus.REJECTED\n ))\n\n return order_id\n\n async def sell_market(\n self, pair: Pair, quantity: float, wait_for_fill=False):\n \"\"\"Sell market order.\"\"\"\n order_id = await self.create_order(\n pair, OrderSide.SELL, OrderType.MARKET, quantity\n )\n\n if wait_for_fill:\n await self.wait_for_status(order_id, pair, (\n OrderStatus.FILLED, OrderStatus.CANCELED, OrderStatus.EXPIRED,\n OrderStatus.REJECTED\n ))\n\n return order_id\n\n async def get_order(self, order_id: int):\n \"\"\"Get order info.\"\"\"\n data = await self.api.post('private/get-order-detail', {\n 'params': {'order_id': str(order_id)}\n })\n data['order_info']['trade_list'] = data.pop('trade_list', [])\n data['order_info']['id'] = int(data['order_info'].pop('order_id'))\n return data['order_info']\n\n async def cancel_order(\n self, order_id: int, pair: Pair, wait_for_cancel=False):\n \"\"\"Cancel order.\"\"\"\n await self.api.post('private/cancel-order', {\n 'params': {'order_id': order_id, 'instrument_name': pair.value}\n })\n\n if not wait_for_cancel:\n return\n\n await self.wait_for_status(order_id, pair, (\n OrderStatus.CANCELED, OrderStatus.EXPIRED, OrderStatus.REJECTED\n ))\n\n async def cancel_open_orders(self, pair: Pair):\n \"\"\"Cancel all open orders.\"\"\"\n return await self.api.post('private/cancel-all-orders', {\n 'params': {'instrument_name': pair.value}\n })\n\n async def listen_balance(self):\n async for data in self.api.listen(\n 'user', 'user.balance', sign=True):\n for balance in data.get('data', []):\n yield balance\n\n async def listen_orders(self, pair: Pair):\n async for data in self.api.listen(\n 'user', f'user.order.{pair.value}', sign=True):\n for order in data.get('data', []):\n order['id'] = int(order.pop('order_id'))\n yield order\n\n # async def listen_trades(self, pair: Pair):\n # async for data in self.api.listen(\n # 'user', f'user.order.{pair.value}', sign=True):\n # yield data\n", "id": "5008574", "language": "Python", "matching_score": 4.092928886413574, "max_stars_count": 0, "path": "src/cryptocom/exchange/base.py" }, { "content": "from .structs import (\n OrderSide, OrderStatus, OrderType, Pair, Period, Candle, Trade\n)\nfrom .base import Exchange, Account\nfrom .api import ApiError, ApiProvider\n\n__all__ = [\n 'OrderSide', 'OrderStatus', 'OrderType', 'Pair',\n 'Period', 'Candle', 'Trade',\n 'Exchange', 'Account',\n 'ApiError', 'ApiProvider'\n]\n\nVERSION = '0.3.3'\n", "id": "7201062", "language": "Python", "matching_score": 0.9712311625480652, "max_stars_count": 0, "path": "src/cryptocom/exchange/__init__.py" } ]
2.751499
poojasaini22
[ { "content": "def adding_report(arg):\n if arg==\"A\".lower():\n print(\"Input an integer to add to the total or \\\"Q\\\" to quit\")\n total=0\n item=\"0\\n\"\n add=0\n while True:\n add=(input(\"Enter an integer or 'Q'\").lower())\n if add.isdigit()==True:\n item+=(add +\"\\n\")\n total+=int(add)\n elif add.startswith(\"q\")==True:\n break\n else:\n print(add,\"is invalid input\")\n print(\"items\")\n print(item)\n print(\"total\")\n print(total)\n\n elif arg==\"T\".lower():\n print(\"Input an integer to add to the total or \\\"Q\\\" to quit\")\n total=0\n sum=0\n while True:\n sum=input(\"Enter an integer or 'Q'\").lower()\n if sum.isdigit()==True:\n total+=int(sum)\n elif sum.startswith(\"q\")==True:\n break\n else:\n print(sum,\"is invalid input\")\n print(\"total\")\n print(total)\n else:\n adding_report(input(\"enter 'A' or 'T'\"))\n\nadding_report(input(\"enter 'A' or 'T'\").lower())\n", "id": "11985103", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "F_Project.py" }, { "content": "count_w_s=7\ncount_w_m=6\ncount_w_l=2\ncount_b_s=8\ncount_b_m=5\ncount_b_l=2\ndef available(color,size):\n global count_w_s\n global count_w_m\n global count_w_l\n global count_b_s\n global count_b_m\n global count_b_l\n if color==\"white\":\n if size==\"s\" and count_w_s != 0:\n count_w_s-=1\n print(\"white small size\")\n print(\"order confirm white shirt small size\")\n elif size==\"m\" and count_w_m != 0:\n count_w_m-=1\n print(\"white medium\")\n print(\"order confirm white shirt medium size\")\n elif size==\"l\" and count_w_l != 0:\n count_w_l\n count_w_l -= 1\n print(count_w_l)\n print(\"white large\")\n print(\"order confirm white shirt large size\")\n else:\n print(\"required stock is unavailable at the moment\")\n elif color==\"blue\":\n if size==\"s\" and count_b_s != 0:\n count_b_s-=1\n print(\"blue small size\")\n print(\"order confirm blue shirt small size\")\n elif size==\"m\" and count_b_m != 0:\n count_b_m-=1\n print(\"blue medium\")\n print(\"order confirm blue shirt medium size\")\n elif size==\"l\" and count_b_l != 0:\n count_b_l-=1\n print(\"blue large\")\n print(\"order confirm blue shirt large size\")\n else:\n print(\"required stock is unavailable at the moment\")\n else:\n print(\"color or size invalid\")\nwhile True:\n available(input(\"enter color\"),input(\"enter size\"))\n", "id": "3792691", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "test.py" }, { "content": "#Create the str_analysis() function that takes a string argument. In the body of the function:\n#Program: str_analysis() Function\ndef str_analysis(arg):\n while True:\n if arg==\"\":\n str_analysis(input(\"enter\"))\n break\n elif arg.isdigit()==True:\n if int(arg)<=99:\n print(arg,\"smaller number than expected\")\n break\n else:\n print(arg,\"is a pretty big number\")\n break\n elif arg.isalpha()==True:\n print(arg,\"is all alphabetical character\")\n break\n else:\n print(arg,\"is neither all digit characters nor all alpha\")\n break\n\n\nstr_analysis(input(\"enter\"))\n", "id": "4353714", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "str_analysis.py" }, { "content": "def quiz_item(question,solution):\n while True:\n if question==solution:\n print(\"correct\")\n break\n else:\n put=input(\"enter again\")\n if put==solution:\n print(\"correct 2\")\n break\nquiz_item(input(\"longest river in the world?\"),\"nile\")\nquiz_item(input(\"Number greater than 3?\"),\"4\")\n", "id": "7858242", "language": "Python", "matching_score": 0.42228496074676514, "max_stars_count": 0, "path": "whiletest.py" } ]
0.711142
alainthierry
[ { "content": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport math\nimport numpy as np\nimport pandas as pd\n\nclass KMeansFromScratch(object):\n \"\"\"\n Implementation of K-means, a clustering machine learning algorithm in the case of\n Unsupervised learning from scratch !\n \n Attributes:\n n_clusters(integer): The number of cluster chosen\n n_iterations(integer): The number of iterations to run the algorithm\n random_state(integer)\n centroids_(list): This is a class attribute that contains the centroid values after\n clusters_(list): List of clustered data\n training(fit)\n \"\"\"\n centroids_ = []\n \n def __init__(self, n_clusters, n_iterations, random_state):\n self.n_clusters = n_clusters\n self.n_iterations = n_iterations\n self.random_state = random_state\n \n def euclidean_distance(self, X, Y):\n \"\"\"\n Compute euclidean distance between X and Y\n\n Arg(s):\n X(array): List of coordinates\n Y(array): List of coordinates\n Return(s):\n distance(float): The distance between X and Y\n \"\"\"\n try:\n distance = np.linalg.norm(Y - X)\n return distance\n \n except Exception as e:\n print(f\"For distance computing, I got this{e}\")\n \n def get_centroids(self, dataset):\n \"\"\"\n Getting randomly centroids values\n\n Arg(s):\n dataset(numpy.array): The whole observations to cluster\n Return(s):\n centroids(nump.array): The randomly picked centroids\n \"\"\"\n np.random.seed(self.random_state)\n centroids = []\n rows = dataset.shape[0]\n\n for _ in range(self.n_clusters):\n centroid_index = np.random.randint(0, rows)\n\n centroids.append(dataset[centroid_index])\n\n return np.array(centroids)\n \n def get_min_index(slef, array):\n \"\"\"\n Given an array of at least two values, return the index of the minimim valu that\n it contains\n\n Arg(s):\n array(array): The list of values where to get the index of the minimum value\n Return(s):\n index(integer): The index of the minimum value that is in the array\n \"\"\"\n\n if len(array) >= 2:\n min_value = array[0]\n index = 0\n\n for i in range(len(array)):\n if min_value > array[i]:\n min_value = array[i]\n index = i\n\n return index\n else:\n return(\"Warning ! The array must contain at least two values ... !\")\n \n def get_centroids_mean(self, data):\n \"\"\"\n Getting the centroid as the mean of each previous cluster as the new centroid\n\n Arg(s):\n data(np.array): This is especially the return of clustering function\n Return(s):\n centroids(np.array): The centroids as means of the previously clustered\n \"\"\"\n\n centroids = []\n\n for k_cluster in range(self.n_clusters):\n centroids.append(data[k_cluster].mean(axis=0))\n\n return np.array(centroids)\n\n \n def clustering(self, data, centroids):\n \"\"\"\n Clustering data points using the euclidean distance between the observs and the\n centroid points\n\n Arg(s):\n data(np.array): The observations to cluster\n centroids(nump.array): The randomly picked centroids\n Return(s):\n data_per_cluster(list): The clusters of the whole observations\n cluster_indexes(list): The clusters' indexes\n \"\"\"\n\n temp = {}\n for k_cluster in range(self.n_clusters):\n liste = []\n\n for observation in data:\n liste.append(self.euclidean_distance(observation, centroids[k_cluster]))\n\n temp[f'k_{k_cluster}'] = liste\n\n \"\"\"\n In this data set below(distances), every column represents a k cluster.\n The row represents. The distance between one observation and the whole k clusters.\n \"\"\"\n distances = pd.DataFrame(data = temp).values\n\n \"\"\"\n Clustering observation, data\n \"\"\"\n cluster_indexes = []\n for row in distances:\n cluster_indexes.append(self.get_min_index(row))\n\n data_per_cluster = []\n for k_cluster in range(self.n_clusters):\n classified_observs = []\n\n for index in range(len(cluster_indexes)):\n if k_cluster == cluster_indexes[index]:\n classified_observs.append(data[index])\n\n data_per_cluster.append(np.array(classified_observs))\n\n return data_per_cluster, cluster_indexes\n \n def fit(self, data):\n \"\"\"\n Run K-means clustering n_iterations times\n\n Arg(s):\n data(np.array): The data to cluster\n Return(s):\n (list): The clusters after fitting\n cluster_indexes(list): The clusters' indexes\n \"\"\"\n try:\n centroids = self.get_centroids(data)\n clusters, cluster_indexes = self.clustering(data, centroids)\n \n if self.n_iterations <= 0:\n print(\"The number of iterations must be at least 3 ... !\")\n\n elif self.n_iterations == 1:\n KMeansFromScratch.centroids_ = centroids\n KMeansFromScratch.clusters_ = clusters\n \n return clusters, cluster_indexes\n else:\n for _ in range(self.n_iterations):\n centroids = self.get_centroids_mean(clusters)\n clusters, cluster_indexes = self.clustering(data, centroids) \n \n KMeansFromScratch.centroids_ = centroids\n KMeansFromScratch.clusters_ = clusters\n \n return clusters, cluster_indexes\n\n except Exception as e:\n print(f\"\"\"This {e} has been returned ! The variable data must have the wrong\n data structure ... !\\n Please check the fit function args type by running help(fit)\n ... !\"\"\")\n \n def inertia(self):\n \"\"\"\n This computes the inertia value, the lower is the inertia, the better the model is.\n Sum squares of the difference of each data point and its closest centroid.\n \n Return(s):\n inertia_value(float): The inertia value\n \"\"\"\n centroids = KMeansFromScratch.centroids_\n clusters = KMeansFromScratch.clusters_\n inertia_value = 0\n \n for index in range(self.n_clusters):\n \n for cluster_row in clusters[index]:\n inertia_value +=np.linalg.norm(cluster_row - centroids[index])**2\n \n return inertia_value\n \n \n def predict(self, new_entry):\n \"\"\"\n Predicting a new data point after training the model\n \n Arg(s):\n new_entry(np.array): The new data point to predict using the built model\n Return(s):\n clusters(list): List of clusters\n \"\"\"\n try:\n clusters = []\n centroids = KMeansFromScratch.centroids_\n \n if len(centroids) != 0:\n clusters = self.clustering(new_entry, centroids)[1]\n \n return clusters\n else:\n print(\"\"\" Oops ! I did it again...!\n Please, fit your model by providing the data to the fit method before predicting... !\"\"\")\n except Exception as e:\n print(f\"This {e} occurs !\")\n\n\nif __name__ == '__main__':\n\n X = np.array([2, 2])\n Y = np.array([3, 2])\n\n data = pd.read_csv('./data/data.csv').values\n\n model = KMeansFromScratch(n_clusters=4, n_iterations=500, random_state=47)\n clusters = model.fit(data)\n\n print(f\"Centroids \\n{model.centroids_}, centroids length \\n{len(clusters)}\")\n print(f\"Clusters\\n{clusters[1]}\")\n print(f\"The inertia is {model.inertia()}\")\n\n\t", "id": "4466691", "language": "Python", "matching_score": 3.188302755355835, "max_stars_count": 0, "path": "k_means_from_scratch.py" }, { "content": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport math\nimport numpy as np\nimport pandas as pd\nimport unittest \nfrom k_means_from_scratch import KMeansFromScratch\n\nclass TestKMeansFromScratchMethods(unittest.TestCase):\n \"\"\"\n Unit test in order to ensure that our previous class methods run always as defined,\n implemented, and correctly.\n\n Attributes:\n model(KMeansFromScratch): The K-means implementation from scratch\n data(np.array)\n \"\"\"\n model = KMeansFromScratch(n_clusters=3, n_iterations=3, random_state=47)\n data = pd.read_csv('./data/data.csv')[['energie', 'eau']].values\n\n def test_euclidean_distance(self):\n \"\"\"\n \"\"\"\n X = np.array([[2, 2], [3, 2]])\n self.assertEqual(self.model.euclidean_distance(X[0], X[1]), 1.0)\n\n def test_get_centroids(self):\n \"\"\"\n \"\"\"\n centroids = self.model.get_centroids(self.data)\n self.assertEqual(len(centroids), self.model.n_clusters)\n\n def test_get_min_index(self):\n \"\"\"\n \"\"\"\n self.assertEqual(self.model.get_min_index(self.data[-1]), 1)\n\n def test_clustering(self):\n \"\"\"\n \"\"\"\n centroids = self.model.get_centroids(self.data)\n clusters = self.model.clustering(self.data, centroids)\n\n self.assertEqual(len(clusters), 2)\n\n def test_fit(self):\n \"\"\"\n \"\"\"\n clusters = self.model.fit(self.data)\n self.assertEqual(len(clusters), 2)\n\n\nif __name__ == '__main__':\n \"\"\"\n Run\n \"\"\"\n unittest.main()\n", "id": "5261550", "language": "Python", "matching_score": 0.005832056514918804, "max_stars_count": 0, "path": "test.py" }, { "content": "from django.contrib import admin\nfrom store.models import (\n\tCustomer, Booking, Flower\n)\n\"\"\" Register models for admin \"\"\"\n\n\[email protected](Customer)\nclass CustomerAdmin(admin.ModelAdmin):\n\tlist_filter = ['name', 'email']\n\tlist_display = ('name', 'email', 'phone_number')\n\[email protected](Flower)\nclass FlowerAdmin(admin.ModelAdmin):\n list_filter = ['created_at',]\n list_display = ('name', 'description', 'price', 'available', 'image_url')\n\n\[email protected](Booking)\nclass BookingAdmin(admin.ModelAdmin):\n list_filter = ['delivered',]\n list_display = ('customer', 'flower', 'booked_date', 'delivered')\n \n\n\n\n\n", "id": "6028329", "language": "Python", "matching_score": 2.410073757171631, "max_stars_count": 0, "path": "store/admin.py" }, { "content": "from django.contrib import admin\nfrom .models import (User, UserData)\n\n\[email protected](User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"pseudonym\", \"predict_result\", \"predict_proba\", \"predict_date\")\n list_filter = (\"predict_date\", )\n\n\[email protected](UserData)\nclass UserDataAdmin(admin.ModelAdmin):\n\t\"\"\"docstring for UserDataAdmin\"\"\"\n\n\tlist_display = (\n\t\t\"user_age\", \"user_education\", \"husband_education\", \"number_children_ever_born\",\n\t\t\"user_religion\", \"user_working\", \"husband_occupation\", \"user\",\n\t)\n\tlist_filter = (\"user_age\", \"user_religion\", )\n\n", "id": "4936305", "language": "Python", "matching_score": 3.3801705837249756, "max_stars_count": 0, "path": "predictMethods/admin.py" }, { "content": "from django.shortcuts import render, get_object_or_404\nfrom django.db import transaction, IntegrityError\n\nfrom .forms import RegisterData, UserLogin, ParagraphErrorList\nfrom .models import User, UserData\n\nimport numpy as np\nimport joblib\n\nfrom django.utils.translation import gettext as _\n\nfrom django.core.mail import send_mail\nfrom django.conf import settings\n\n\ndef index(request):\n context = {\n 'title': _('Predict Contraceptive Method Choice'),\n 'page_title': _('Let\\'s predict your Contraceptive Method Choice'),\n }\n return render(request, _('predictMethods/en/form.html'), context)\n\ndef user_login(request):\n context = {\n 'title' : _('CMC login'),\n 'page_title': _('Check Prediction')\n }\n if request.method == 'GET':\n form = UserLogin(request.GET or None)\n if form.is_valid():\n pseudonym = form.cleaned_data['pseudonym']\n user_age = form.cleaned_data['user_age']\n\n user_pseudo_exist = get_object_or_404(User, pseudonym=pseudonym)\n user_age_exist = UserData.objects.filter(user_age=user_age).filter(\n user=User.objects.get(pseudonym=user_pseudo_exist).id\n )\n if not (User.objects.filter(pseudonym=user_pseudo_exist).exists() and user_age_exist.exists()):\n context['message'] = _(\" Sorry, you did not make any prediction before !\")\n return render(request, _('predictMethods/en/login.html'), context)\n else:\n \"\"\" Everything is OK \"\"\"\n\n context['found'] = True\n user = User.objects.filter(pseudonym=user_pseudo_exist).first()\n context['predict_result'] = user.predict_result\n context['predict_proba'] = user.predict_proba\n context['pseudonym'] = user.pseudonym\n context['title'] = pseudonym + _(\" prediction\")\n context['page_title'] = _(\"prediction of \")+pseudonym\n\n return render(request, _('predictMethods/en/login.html'), context)\n\n return render(request, _('predictMethods/en/login.html'), context)\n\n\ndef about_cmc(request):\n context = {\n 'title': _('About CMC'),\n 'page_title': _('About CMC'),\n }\n return render(request, _('predictMethods/en/about.html'), context)\n\ndef get_sent_data(request):\n context = {}\n if request.method == 'POST':\n form = RegisterData(request.POST, error_class=ParagraphErrorList)\n if form.is_valid():\n\n \"\"\" Load the prediction model \"\"\"\n rfcl_model = joblib.load('predictMethods/methodModels/finalized_model_rfcl.sav')\n\n user_age = int(form.cleaned_data['user_age'])\n pseudonym = form.cleaned_data['pseudonym']\n user_education = int(\"\".join(form.cleaned_data['user_education']))\n husband_education = int(\"\".join(form.cleaned_data['husband_education']))\n number_children_ever_born = int(form.cleaned_data['number_children_ever_born'])\n user_religion = int(\"\".join(form.cleaned_data['user_religion']))\n user_working = int(\"\".join(form.cleaned_data['user_working']))\n husband_occupation = int(\"\".join(form.cleaned_data['husband_occupation']))\n\n new_user_data = np.array(\n [[user_age, user_education, husband_education,\n number_children_ever_born, user_religion, user_working, husband_occupation]]\n )\n \"\"\" Make prediction on data \"\"\"\n context['predict_result'] = int(rfcl_model.predict(new_user_data))\n context['predict_proba'] = round( rfcl_model.predict_proba(new_user_data).max(), 3)\n context['page_title'] = _('Predicted method for ') + pseudonym\n context['pseudonym'] = pseudonym\n context['title'] = _(\"Predicted method for \") + pseudonym\n\n try:\n with transaction.atomic():\n user = User.objects.filter(pseudonym=pseudonym)\n if not user.exists():\n\n user = User.objects.create(\n pseudonym = pseudonym,\n predict_result = context['predict_result'],\n predict_proba = context['predict_proba'],\n )\n UserData.objects.create(\n user_age = user_age,\n \tuser_education = user_education,\n \thusband_education = husband_education,\n \tnumber_children_ever_born = number_children_ever_born,\n \tuser_religion = user_religion,\n \tuser_working = user_working,\n \thusband_occupation = husband_occupation,\n \tuser = user,\n )\n \"\"\"subject = 'A NEW PREDICTION HAS BEEN MADE !'\n message = \"\"\n This is to notify you that a new prediction has been made !\n The project is continuing receiving new users !\n \"\"\n email_from = settings.EMAIL_HOST_USER\n recipient_list = ['<EMAIL>',]\n if send_mail( subject, message, email_from, recipient_list ):\n return render(request, _('predictMethods/en/predict_result.html'), context)\n else:\"\"\"\n return render(request, _('predictMethods/en/predict_result.html'), context)\n else:\n \"\"\" The user exists in the database \"\"\"\n user = user.first()\n user_data = UserData.objects.filter(user_age=user_age).only('user')\n\n previous_predict = user.predict_result\n previous_proba = user.predict_proba\n\n if not user_data.exists():\n \"\"\" New user_age value \"\"\"\n\n if (context['predict_result']==previous_predict) and (previous_proba > context['predict_proba']):\n context['message'] = _(\"We're sorry, your data are still the same as the previous prediction you made on this website !\")\n context['predict_result'] = user.predict_result\n context['predict_proba'] = user.predict_proba\n context['page_title'] = _('Your new prediction is worse than your previous one !')\n context['title'] = _('Prediction not allowed')\n return render(request, _('predictMethods/en/predict_result.html'), context)\n\n elif (context['predict_result']==previous_predict) and (previous_proba < context['predict_proba']):\n \"\"\" Update proba and delete the previous data \"\"\"\n\n user.predict_proba = context['predict_proba']\n user_data = UserData.objects.get(user=user)\n user_data.user_age = user_age\n user_data.user_education = user_education\n user_data.husband_education = husband_education\n user_data.number_children_ever_born = number_children_ever_born\n user_data.user_religion = user_religion\n user_data.user_working = user_working\n user_data.husband_occupation = husband_occupation\n\n user.save()\n user_data.save()\n return render(request, _('predictMethods/en/predict_result.html'), context)\n else:\n if (context['predict_result'] !=previous_predict) and (previous_proba > context['predict_proba']):\n context['message'] = _(\"We're sorry, your data are still the same as the previous prediction you made on this website !\")\n context['predict_result'] = user.predict_result\n context['predict_proba'] = user.predict_proba\n context['page_title'] = _('Your new prediction is worse than your previous one !')\n context['title'] = _('Prediction not allowed')\n return render(request, _('predictMethods/en/predict_result.html'), context)\n\n elif (context['predict_result'] !=previous_predict) and (previous_proba < context['predict_proba']):\n \"\"\" Update proba and delete the previous data \"\"\"\n\n user.predict_proba = context['predict_proba']\n user.predict_result = context['predict_result']\n user_data = UserData.objects.get(user=user)\n user_data.user_age = user_age\n user_data.user_education = user_education\n user_data.husband_education = husband_education\n user_data.number_children_ever_born = number_children_ever_born\n user_data.user_religion = user_religion\n user_data.user_working = user_working\n user_data.husband_occupation = husband_occupation\n\n user.save()\n user_data.save()\n return render(request, _('predictMethods/en/predict_result.html'), context)\n else:\n context['message'] = _(\"We're sorry, your data are still the same as the previous prediction you made on this website.\")\n context['predict_result'] = user.predict_result\n context['predict_proba'] = user.predict_proba\n context['title'] = _('Prediction not allowed')\n context['page_title'] = _('Your data did not change since your last prediction !')\n return render(request, _('predictMethods/en/predict_result.html'), context)\n else:\n \"\"\" No new prediction allowed, age did not change \"\"\"\n\n context['message'] = _(\" We're sorry, your data are still the same as the previous prediction you made on this website.\")\n context['predict_result'] = user.predict_result\n context['predict_proba'] = user.predict_proba\n context['title'] = _('Prediction not allowed')\n context['page_title'] = _('Your data did not change since your last prediction !')\n return render(request, _('predictMethods/en/predict_result.html'), context)\n except IntegrityError:\n form.errors['internal'] = _(\" An error occurred ! Thank you to try again later ! \")\n else:\n context = {}\n context['title'] = _('Predict Contraceptive Method')\n context['page_title'] = _('Let\\'s predict your Contraceptive Method')\n return render(request, _('predictMethods/en/form.html'), context)\n else:\n pass\n return render(request, _('predictMethods/en/form.html'), context)\n", "id": "11182778", "language": "Python", "matching_score": 3.3011527061462402, "max_stars_count": 0, "path": "predictMethods/views.py" }, { "content": "from django import forms\nfrom django.forms.utils import ErrorList\n\nfrom .models import UserData\n\nUSER_EDUCATION = (\n\t(1, \"LOW\"),\n (2, \"MEDIUM\"),\n (3, \"HIGH\"),\n (4, \"VERY_HIGH\"),\n)\nUSER_OCCUPATION = (\n\t(1, \"LOW\"),\n (2, \"MEDIUM\"),\n (3, \"HIGH\"),\n (4, \"VERY_HIGH\"),\n)\nUSER_RELIGION = (\n\t(0, \"NONE_ISLAM\"),\n (1, \"ISLAM\"),\n)\nUSER_NOW_WORKING = (\n\t(0, \"YES\"),\n (1, \"NO\"),\n)\nclass RegisterData(forms.Form):\n\t\"\"\"docstring for RegisterData\"\"\"\n\n\tpseudonym = forms.CharField(required=True, max_length=20)\n\tuser_age = forms.IntegerField(required=True, min_value=16)\n\tuser_education = forms.MultipleChoiceField(required=True,choices=USER_EDUCATION)\n\thusband_education = forms.MultipleChoiceField(required=True, choices = USER_EDUCATION)\n\tnumber_children_ever_born = forms.IntegerField(required=True, min_value=0, max_value=36)\n\tuser_religion = forms.MultipleChoiceField(required=True, choices = USER_RELIGION)\n\tuser_working = forms.MultipleChoiceField(required=True, choices = USER_NOW_WORKING)\n\thusband_occupation = forms.MultipleChoiceField(required=True, choices = USER_OCCUPATION)\n\nclass UserLogin(forms.Form):\n\t\"\"\"docstring for UserLogin.\"\"\"\n\n\tpseudonym = forms.CharField(\n\t\trequired=True,\n\t\tmax_length=20,\n\t)\n\tuser_age = forms.IntegerField(\n\t\trequired=True,\n\t\tmin_value=16,\n\t\tmax_value=100,\n\t)\n\nclass ParagraphErrorList(ErrorList):\n\t\"\"\"docstring for ParagraphErrorList\"\"\"\n\tdef __str__(self):\n\t\treturn self.as_divs()\n\n\tdef as_divs(self):\n\t\tif not self: return ''\n\t\treturn '<div class=\"errorlist\">%s</div>' % ''.join(['<p class=\"small error\">%s</p>' % e for e in self])\n", "id": "5423950", "language": "Python", "matching_score": 2.182556390762329, "max_stars_count": 0, "path": "predictMethods/forms.py" }, { "content": "from django import forms\n\nclass CustomerForm(forms.Form):\n name = forms.CharField(max_length=100)\n email = forms.EmailField()\n phone_number = forms.CharField(max_length=100)\n flower_id = forms.IntegerField()", "id": "476091", "language": "Python", "matching_score": 1.3643933534622192, "max_stars_count": 0, "path": "store/forms.py" }, { "content": "from django.shortcuts import render, get_object_or_404\nfrom django.db import transaction, IntegrityError\n\nfrom django.core.paginator import (\n\tPaginator, PageNotAnInteger, EmptyPage\n)\n\nfrom .models import (\n\tFlower, Booking, Customer\n)\t\n\nfrom .forms import CustomerForm\n\ndef index(request):\n\t\"\"\"\n\tThe home view that renders the main template\n\t\n\tArgs:\n\t\trequest (request): The request\n\n Returns:\n \t(HttpResponse): The status_code of the HttpResponse\n\t\"\"\"\n\tflowers = Flower.objects.filter(available=True).order_by('-created_at')[:6]\n\tcontext = {\n\t\t'page_title': 'La beauté dans les Fleurs',\n\t\t'title': 'Les Fleurs raconte une vie, Trouvez la vôtre',\n\t\t'flowers': flowers,\n\t}\n\treturn render(request, 'store/list_flower.html', context)\n\n\ndef detail(request, flower_id):\n\t\"\"\"\n\tThe home view that renders the main template\n\t\n\tArgs:\n\t\trequest (request): The request\n\t\tflower_id (integer): The flower id\n\n Returns:\n \t(HttpResponse): The status_code of the HttpResponse\n\t\"\"\"\n\tflower = get_object_or_404(Flower, pk=flower_id)\n\tcontext = {\n\t\t'page_title': flower.name,\n\t\t'title': flower.name,\n\t\t'flower': flower,\n\t}\n\t\"\"\" Handle booking step \"\"\"\n\tif request.method == 'POST':\n\t\tform = CustomerForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\temail = form.cleaned_data['email']\n\t\t\tname = form.cleaned_data['name']\n\t\t\tphone_number = form.cleaned_data['phone_number']\n\t\t\tflower_id = form.cleaned_data['flower_id']\n\n\t\t\ttry:\n\t\t\t\twith transaction.atomic():\n\t\t\t\t\tcustomer = Customer.objects.filter(email=email)\n\t\t\t\t\tif not customer.exists():\n\t\t\t\t\t\tcustomer = Customer.objects.create(\n\t\t\t\t\t\t\temail=email,\n\t\t\t\t\t\t\tname=name,\n\t\t\t\t\t\t\tphone_number=phone_number\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcustomer = customer.first()\n\t\t\t\t\tflower = get_object_or_404(Flower, pk=flower_id)\n\n\t\t\t\t\tbooking = Booking.objects.filter(flower=flower)\n\t\t\t\t\tif not booking.exists():\n\t\t\t\t\t\tbooking = Booking.objects.create(\n\t\t\t\t\t\t\tcustomer=customer,\n\t\t\t\t\t\t\tflower=flower\n\t\t\t\t\t\t)\n\t\t\t\t\t\tflower.available = False\n\t\t\t\t\t\tflower.save()\n\t\t\t\t\t\tcontext = {\n\t\t\t\t\t\t\t'page_title': flower.name,\n\t\t\t\t\t\t\t'title': \"Merci de nous faire confiance !\",\n\t\t\t\t\t\t}\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontext = {\n\t\t\t\t\t\t\t'page_title': flower.name,\n\t\t\t\t\t\t\t'title': \"Merci de nous faire confiance !\",\n\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcontext['message'] = \"\"\"\n\t\t\t\t\t\tCette fleur n'est plus disponible dans le magasin. Veuillez choisir une \n\t\t\t\t\t\tautre !\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\treturn render(request, 'store/thank.html', context)\n\t\t\t\t\t\t\n\t\t\t\t\treturn render(request, 'store/thank.html', context)\n\n\t\t\texcept IntegrityError:\n\t\t\t\tform.errors['internal'] = \"Une erreur interne est apparue. Merci de recommencer votre requête.\"\n\n\treturn render(request, 'store/detail.html', context)\n\n\ndef listing(request):\n\t\"\"\"\n\tThe home view that renders the main template\n\t\n\tArgs:\n\t\trequest (request): The request\n\n Returns:\n \t(HttpResponse): The status_code of the HttpResponse\n\t\"\"\"\n\tflower_list = Flower.objects.filter(available=True)\n\tpaginator = Paginator(flower_list, 6)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tflowers = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tflowers = paginator.page(1)\n\texcept EmptyPage:\n\t\tflowers = paginator.page(paginator.num_pages)\n\tcontext = {\n\t'page_title': 'La beauté dans les Fleurs',\n\t'title': 'Toutes nos fleurs',\n\t'flowers': flowers\n\t}\n\treturn render(request, 'store/listing.html', context)\n\n\ndef search(request):\n\t\"\"\"\n\tThe home view that renders the main template\n\t\n\tArgs:\n\t\trequest (request): The request\n\n Returns:\n \t(HttpResponse): The status_code of the HttpResponse\n\t\"\"\"\n\tif request.method == 'GET':\n\t\tquery = request.GET.get('query')\n\t\tcontext = {\n\t\t\t'page_title': 'La beauté dans les Fleurs',\n\t\t\t'query': query\n\t\t}\n\t\tif not query:\n\t\t\tflowers = Flower.objects.filter(available=True).order_by('-created_at')[:6]\n\t\telse:\n\t\t\tflowers = Flower.objects.filter(name__icontains=query, available=True)\n\t\tif not flowers.exists():\n\t\t\tflowers = Flower.objects.filter(description__icontains=query, available=True)\n\telse:\n\t\tflowers = Flower.objects.filter(available=True).order_by('-created_at')[:6]\n\t\t\n\ttitle = \"Résultats pour la requête %s\"%query\n\tcontext['flowers'] = flowers\n\tcontext['title'] = title\n\n\treturn render(request, 'store/search_result.html', context)\n\n\ndef about(request):\n\t\"\"\"\n\tThe home view that renders the main template\n\t\n\tArgs:\n\t\trequest (request): The request\n\n Returns:\n \t(HttpResponse): The status_code of the HttpResponse\n\t\"\"\"\n\tcontext = {\n\t\t'page_title': 'A propos de la La beauté dans les Fleurs',\n\t\t'title': 'A propos de nous !',\n\t}\n\treturn render(request, 'store/about.html', context)", "id": "8006847", "language": "Python", "matching_score": 2.498227834701538, "max_stars_count": 0, "path": "store/views.py" }, { "content": "from django.test import TestCase\nfrom django.urls import reverse\n\nfrom .models import Flower, Customer, Booking\n\n\nclass IndexPageTestCase(TestCase):\n\t\"\"\"docstring for IndexPageTestCase\n\n\tThe IndexPageTestCase object is to test page return status,\n\n\t\"\"\"\n\t\n\tdef test_inddex_page(self):\n\t\tresponse = self.client.get(reverse('home'))\n\t\tself.assertEqual(response.status_code, 200)\n\n\nclass DetailPageTestCase(TestCase):\n\n\tdef setUp(self):\n\t\tfleur = Flower.objects.create(\n\t\t\tname=\"Les fleurs\",\n\t\t\tdescription=\"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\",\n\t\t\tprice=45.52,\n\t\t\timage_url=\"https://www.cdiscount.com/pdt2/1/1/6/1/700x700/neu8435300764116/rw/24-tetes-marguerite-bouquet-de-fleur-artificielle.jpg\"\n\t\t)\n\t\tself.flower = Flower.objects.get(name=\"Les fleurs\")\n\n\n\t\"\"\" test that detail page returns a 200 if the item exists \"\"\"\n\tdef test_detail_page_returns_200(self):\n\n\t\tresponse = self.client.get(reverse('store:detail', args=(self.flower.id, )))\n\t\tself.assertEqual(response.status_code, 200)\n\n\n\tdef test_detail_page_returns_404(self):\n\n\t\tresponse = self.client.get(reverse('store:detail', args=(self.flower.id+1, )))\n\t\tself.assertEqual(response.status_code, 404)\n\n\nclass BookingPageTestCase(TestCase):\n\n\tdef setUp(self):\n\t\tfleur = Flower.objects.create(\n\t\t\tname=\"Les fleurs\",\n\t\t\tdescription=\"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\",\n\t\t\tprice=45.52,\n\t\t\timage_url=\"https://www.cdiscount.com/pdt2/1/1/6/1/700x700/neu8435300764116/rw/24-tetes-marguerite-bouquet-de-fleur-artificielle.jpg\"\n\t\t)\n\n\t\tcustomer = Customer.objects.create(\n\t\t\temail=\"<EMAIL>\",\n\t\t\tname=\"<NAME>\",\n\t\t\tphone_number=\"212650038174\"\n\t\t)\n\n\t\tself.flower = Flower.objects.get(name=\"Les fleurs\")\n\t\tself.customer = Customer.objects.get(email=\"<EMAIL>\")\n\n\t\"\"\" test that a new booking is made \"\"\"\n\tdef test_new_booking_is_registered(self):\n\t\tcustomer = self.customer\n\t\tflower_id = self.flower.id\n\t\told_bookings = Booking.objects.count()\n\n\t\tresponse = self.client.post(reverse('store:detail', args=(flower_id,)), {\n\t\t\t'email': customer.email,\n\t\t\t'name': customer.name,\n\t\t\t'phone_number': customer.phone_number,\n\t\t\t'flower_id': flower_id\n\t\t})\n\t\tnew_bookings = Booking.objects.count()\n\t\tself.assertEqual(new_bookings, old_bookings + 1)\n\n\t\"\"\" test that a booking belongs to a customer \"\"\"\n\n\tdef test_new_booking_belongs_to_a_contact(self):\n\t\tcustomer = self.customer\n\t\tflower_id = self.flower.id\n\n\t\tresponse = self.client.post(reverse('store:detail', args=(flower_id,)), {\n\t\t\t'email': customer.email,\n\t\t\t'name': customer.name,\n\t\t\t'phone_number': customer.phone_number,\n\t\t\t'flower_id': flower_id\n\t\t})\n\t\tbooking = Booking.objects.create(\n\t\t\tcustomer=self.customer,\n\t\t\tflower=self.flower\n\t\t)\n\t\tself.assertEqual(self.customer, booking.customer)\n\n\n\n\n\n\n", "id": "12220741", "language": "Python", "matching_score": 1.28839910030365, "max_stars_count": 0, "path": "store/tests.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"contraceptive_method_choice.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/14Wab6GFhA08hhVtMUbpyjZAWahDGHB8C\n\n# Dataset Description\n<p>This dataset is a subset of the 1987 National Indonesia Contraceptive Prevalence Survey. The samples are married women who were either not pregnant or do not know if they were at the time of interview. The problem is to predict the current contraceptive method choice<strong>(no use, long-term methods, or short-term methods)</strong> of a woman based on her demographic and socio-economic characteristics.\n<a href=\"https://archive.ics.uci.edu/ml/datasets/Contraceptive+Method+Choice\" target=\"_blank\" >cmc</a></p>\n\n#### Inclusion of needed packages and libraries\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\n# %matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom pandas.plotting import scatter_matrix\nfrom scipy.stats import pearsonr\nfrom scipy.stats import shapiro, normaltest, chi2_contingency\nALPHA = 0.05\n\nsns.set()\nsns.set_theme()\nplt.style.use('classic')\nplt.rcParams.update({ \"font.family\": \"serif\",})\n\n\"\"\"#### <b> Note that LR is for LogisticRegression </b>\n\n### Dataset Uploading\n\"\"\"\n\nheaders = ['wife_age', 'wife_education', 'husband_education', 'number_children_ever_born',\n 'wife_religion', 'wife_working', 'husband_occupation', 'standard_living',\n 'media_exposure', 'contraceptive_method_used']\nfilename = \"../data/cmc.data\"\ndf = pd.read_csv(filename, names=headers)\n\n\"\"\"### Data Preprocessing\"\"\"\n\nheaders[:7]\n\nprint(f'Dataset info : \\n {df.info()} \\n Dataset Variables data types : \\n {df.dtypes}')\n\nprint(f'Contains no values : \\n {df.isnull().sum()} \\n Contains NaN : \\n {df.isna().sum()}')\n\ndf.shape\n\ndf.head(7)\n\ndf['contraceptive_method_used'].value_counts()\nsns.histplot(df['contraceptive_method_used'])\nplt.show()\n\n\"\"\"The dataset contains 10 variables with 1473 observations in which there 9 predictors and 1 target (contraceptive_method_choice). All observations are integers and the dataset contains no missing values. The target has two types of value 1 (no-use), 2 (short-term) and 3 (long-term).This is a prediction by classifying if the woman's contraceptive method choice doesn't exist(no-use) ,is short-term method usage and long-term method. The observations per class are 629 for no-use, 511 for short-term method and 333 for long-term method.<b> So, the contraceptive_method_choice class is unbalanced.</b>\n\n---\n\n#### Descriptive Statistics\n\"\"\"\n\ndf.describe()\n\n\"\"\"The wife age ranges between 16 and 49. The average is around 32 years old. The standard deviation is around 8, so the observations of the wife_age variable are not too scattered. <b>We'll need to standardize predictors(to put them in a same range of values)</b>\"\"\"\n\ncorrelations = df.corr(method='pearson')\ncorrelations.style.background_gradient()\n\n\"\"\" Correlation matrix plot \"\"\"\nplt.style.use('classic')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 5))\nax = plt.axes()\ncax = ax.matshow(correlations, vmin=-1, vmax=1)\ncbar = fig.colorbar(cax)\nticks = np.arange(0,10,1)\nax.set_xticks(ticks)\nax.set_yticks(ticks)\nax.set_yticklabels(headers)\nplt.show()\n\npearsonr(df['wife_education'], df['husband_education'])\n\nplt.figure(figsize=(10, 4))\nplt.rcParams.update({ \"font.family\": \"serif\", \"font.size\": 13})\nsns.scatterplot(\n data=df, x=\"wife_education\", y=\"husband_education\", hue=\"contraceptive_method_used\",\n palette = 'deep'\n)\nplt.legend()\nplt.show()\n\nstat, p = pearsonr(df['wife_education'], df['husband_education'])\nprint('stat=%.3f, p=%.3f' % (stat, p))\nif p > 0.05:\n\tprint('Probably independent')\nelse:\n\tprint('Probably dependent')\n\n\"\"\"This shows that the wife_education and the husband_eduction variables are moderately correlated. Others predictors than wife_education and husband_education are not correlated.<b>The correlation is given with a p-value under 0.001 though the scatter plot doesn't show that. The pearson test confirms that wife_education and husband_education are dependant</b> We can assume these other predictors are independant with the target.\"\"\"\n\ndf.hist(bins=20, layout=(5, 5), sharex=False, figsize=(17, 15))\nplt.show()\n\ndf.plot(kind='density', subplots=True, layout=(5, 5), sharex=False, figsize=(17, 15))\nplt.show()\n\n\"\"\"This above plots show that the predictors number children_ever_born and wife_age except the others seem to have a certain normal distribution.\"\"\"\n\ndf.plot(kind = 'box', subplots=True, layout=(5, 5), sharex=False, figsize=(17, 15))\nplt.show()\n\n\"\"\"The observations of wife_age predictors are well distributed, which is not the same case for others predictors.\n\n#### Hypotheses Tests (Gaussianity)\n\nH0: the sample has a Gaussian distribution.\n H1: the sample does not have a Gaussian distribution.\n\n##### The Shapiro Test\n\"\"\"\n\nfor predictor_name in headers:\n stat, p = shapiro(df[predictor_name])\n if p > ALPHA:\n print(f'{predictor_name} probably Gaussian')\n else:\n print(f'{predictor_name} probably not Gaussian')\n\n\"\"\"##### The Normaltest Test\"\"\"\n\nfor predictor_name in headers:\n stat, p = normaltest(df[predictor_name])\n if p > ALPHA:\n print(f'{predictor_name} probably Gaussian')\n else:\n print(f'{predictor_name} probably not Gaussian')\n\n\"\"\"<b>After these two hypotheses tests, we can confirm that no predictor is gaussian, has a normal distribution for a confidence interval of 95%.</b>\n\n#### Hypotheses Tests (Independance)\n\nH0: the two samples are independent.\n H1: there is a dependency between the samples.\n\n##### The Chi-Squared Test\n\"\"\"\n\nfor predictor_name in headers:\n table = pd.crosstab( df['contraceptive_method_used'], df[predictor_name] )\n stat, p, dof, expected = chi2_contingency(table)\n if p > ALPHA:\n pass\n print(f'contraceptive_method_used & {predictor_name} are probably independent')\n else:\n print(f'contraceptive_method_used & {predictor_name} are probably dependent')\n\nfor predictor_name1 in headers:\n for predictor_name2 in headers:\n table = pd.crosstab( df[predictor_name1], df[predictor_name2] )\n stat, p, dof, expected = chi2_contingency(table)\n # print('stat=%.3f, p=%.3f' % (stat, p))\n print(f'stat={np.round(stat, 3)}, p={np.round(p, 4)}')\n if p > ALPHA:\n pass\n print(f'{predictor_name1} & {predictor_name2} are probably independent')\n else:\n print(f'{predictor_name1} & {predictor_name2} are probably dependent')\n\n\"\"\"##### All predictors except wife_working and the target (contraceptive_method_used) are dependant. Then, we can assume that most accurate variables for predicting contraceptive_method_used target are all the predictors except <b>wife_working</b> with a confidence interval of 95%.\n\n#### Data Standardization\n\"\"\"\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import(\n train_test_split, KFold, cross_val_score, RepeatedStratifiedKFold, GridSearchCV\n)\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\n\nfrom sklearn.metrics import (\n classification_report, confusion_matrix, accuracy_score, mean_absolute_error\n)\n\nX = df.drop('contraceptive_method_used', axis=1)\ny = df[\"contraceptive_method_used\"]\nseed = 7\n\nscaler = StandardScaler()\nscaled_X = scaler.fit_transform(X)\n\n\"\"\"##### Data Splitting\"\"\"\n\nX_train, X_test, y_train, y_test = train_test_split(scaled_X, y, test_size=0.3, random_state=seed)\n\n\"\"\"Algorithms to test \"\"\"\n\nmodels = []\nmodels.append(('LR', LogisticRegression()))\nmodels.append(('LDA', LinearDiscriminantAnalysis()))\nmodels.append(('KNN', KNeighborsClassifier()))\nmodels.append(('CART', DecisionTreeClassifier()))\nmodels.append(('NB', GaussianNB()))\nmodels.append(('SVM', SVC()))\n\n\"\"\"Printing training accuracy\"\"\"\n\nresults = []\nnames = []\nfor name, model in models:\n kfold = KFold(n_splits=10, random_state=seed, shuffle=True)\n cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n print(f\"{name} accuracy ({cv_results.mean()}) std ({cv_results.std()})\")\n\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('classic')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.ylabel('Training Accuracy')\nplt.show()\n\n\"\"\"##### <b>The SVM (SVC) classifier has the largest training accuracy, but it's not good enough. Can we do better ?</b>\n\n#### Parameters Tuning\n\n###### Tuning LogisticRegression parameters (Regularization, max_iter)\n\n###### <b> For penalty = l1 </b>\n\"\"\"\n\nmax_iters = [1_000, 10_000, 100_000, 10000000]\n\nfor iter in max_iters:\n\n clf_l1_LR = LogisticRegression(penalty='l1', tol=0.01, solver='saga', max_iter=iter, multi_class='multinomial')\n kfold = KFold(n_splits=10, random_state=seed, shuffle=True)\n cv_results = cross_val_score(clf_l1_LR, X_train, y_train, cv=kfold, scoring='accuracy')\n print(f'Accuracy ({cv_results.mean()}), std ({cv_results.std()})')\n\n\"\"\"###### <b> For penalty = l2 </b>\"\"\"\n\nmax_iters = [1_000, 10_000, 100_000, 10000000]\n\nfor iter in max_iters:\n\n clf_l2_LR = LogisticRegression(penalty='l2', tol=0.01, solver='newton-cg', max_iter=iter, multi_class='multinomial')\n kfold = KFold(n_splits=10, random_state=seed, shuffle=True)\n cv_results = cross_val_score(clf_l2_LR, X_train, y_train, cv=kfold, scoring='accuracy')\n print(f'Accuracy ({cv_results.mean()}), std ({cv_results.std()})')\n\n\"\"\"###### <b> For penalty = elasticnet </b>\"\"\"\n\nmax_iters = [1_000, 10_000, 100_000, 10000000]\nl1_ratio = 0.5\n\nfor iter in max_iters:\n clf_en_LR = LogisticRegression(penalty='elasticnet', solver='saga',l1_ratio=0.5, tol=0.01, max_iter=iter, multi_class='multinomial')\n\n kfold = KFold(n_splits=10, random_state=seed, shuffle=True)\n cv_results = cross_val_score(clf_en_LR, X_train, y_train, cv=kfold, scoring='accuracy')\n print(f'Accuracy ({cv_results.mean()}), std ({cv_results.std()})')\n\n\"\"\"##### No improvement, the training and test accuracy remain the same as in the default configuration.\n\n##### ''' Prediction Accuracy, confusion_matrix in default configuration '''\n\"\"\"\n\n\"\"\" Prediction Accuracy, confusion_matrix in default configuration \"\"\"\nmodel = LogisticRegression()\nfitted = model.fit(X_train, y_train)\ny_hat = fitted.predict(X_test)\n\nprint(f\"LogisticRegression\\n Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\" Prediction Accuracy, confusion_matrix in tuned configuration \"\"\"\n\nmax_iters = [1_000, 10_000, 100_000, 10000000]\n\nfor iter in max_iters:\n model = LogisticRegression(penalty='l2', tol=0.01, solver='newton-cg', max_iter=iter, multi_class='multinomial')\n fitted = model.fit(X_train, y_train)\n y_hat = fitted.predict(X_test)\n print(f\"Max_iterations : {iter}\")\n print(f\"{accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\"##### No improvement, the training and test accuracy remain the same as in the default configuration.\n\n###### <b>Tuning LDA parameters</b>\n\"\"\"\n\nmodel = LinearDiscriminantAnalysis()\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\ngrid = {'solver': ['svd', 'lsqr', 'eigen']}\nsearch = GridSearchCV(model, grid, scoring='accuracy', cv=cv, n_jobs=-1)\nresults = search.fit(X_train, y_train)\nprint('Training Accuracy: %.3f' % results.best_score_)\nprint('Config: %s' % results.best_params_)\n\n\"\"\"##### No improvement, the training and test accuracy remain the same as in the default configuration\"\"\"\n\nmodel = LinearDiscriminantAnalysis(solver='lsqr')\nfit = model.fit(X_train, y_train)\ny_hat = fit.predict(X_test)\n\nprint(f\"LDA Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\"###### <b>The best LDA configuration is obtained with lsqr solver. The LDA does better than the LogisticRegression in both training and test accuracy</b>\n\n###### <b>Tuning KNN parameters</b>\n\"\"\"\n\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\ngrid_params = {\n 'n_neighbors': list(range(1, 10)),\n 'weights' : ['uniform', 'distance'],\n 'algorithm': ['ball_tree', 'kd_tree', 'brute', 'auto'],\n 'metric' : ['euclidean', 'manhattan', 'minkowski'],\n}\n\nsearch = GridSearchCV(KNeighborsClassifier(n_jobs=-1), grid_params, scoring='accuracy', cv=cv, n_jobs=-1)\nresults = search.fit(X_train, y_train)\nprint('Mean Accuracy: %.3f' % results.best_score_)\nprint('Config: %s' % results.best_params_)\n\n\"\"\"###### This is better than the default configuration of KNeighborsClassifier. The best configuration is <b>Config: {'algorithm': 'ball_tree', 'metric': 'manhattan', 'n_neighbors': 9, 'weights': 'uniform'}</b>\n---\n\n\n\"\"\"\n\nmodel = KNeighborsClassifier(n_neighbors=9, weights='uniform',algorithm='ball_tree', metric='manhattan')\nfit = model.fit(X_train, y_train)\ny_hat = fit.predict(X_test)\nprint(f\"KNN Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n Confusion Matrix \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\"##### Tuning CART DecisionTreeClassifier\"\"\"\n\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\ngrid_params = {\n 'criterion': ['entropy', 'gini'],\n 'splitter' : ['best', 'random'],\n}\n\nsearch = GridSearchCV(DecisionTreeClassifier(), grid_params, scoring='accuracy', cv=cv, n_jobs=-1)\nresults = search.fit(X_train, y_train)\nprint('Mean Accuracy: %.3f' % results.best_score_)\nprint('Config: %s' % results.best_params_)\n\n\"\"\"<b>The DecisionTreeClassifier does not do better than the default configuration</b>\n\n###### Tuning SVM (*SVC*) parameters\n\"\"\"\n\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=seed)\nparam_grid = {'C': [0.1, 1, 10, 100, 1000, 10000, 100000],\n 'gamma': [1, 0.1, 0.01, 0.001, 0.0001],\n 'decision_function_shape': ['ovo', 'ovr'],\n 'kernel': ['linear', 'poly', 'rbf'] }\n \ngrid = GridSearchCV(SVC(shrinking=True), param_grid, scoring='accuracy', n_jobs=-1, refit=True,cv=cv, verbose=3) \nresults = grid.fit(X_train, y_train) \n\nprint('Mean Accuracy: %.3f' % results.best_score_)\nprint('Config: %s' % results.best_params_)\n\n\"\"\"This is better than the default configuration of SVM SVC classifier. The <b>training accuracy is 0.554 when the training accuracy of the default configuration is 0.5372.</b>\n\n###### SVM (SVC) Prediction Accuracy\n\"\"\"\n\nmodel = SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)\nfitted = model.fit(X_train, y_train)\n\ny_hat = fitted.predict(X_test)\nprint(f\"SVM(SVC) Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n Confusion Matrix \\n{confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\"###### Prediction Accuray of all algorithms\"\"\"\n\nmodels = []\nmodels.append(('LR', LogisticRegression()))\nmodels.append(('LDA', LinearDiscriminantAnalysis(solver='lsqr')))\nmodels.append(('KNN', KNeighborsClassifier(n_neighbors=9,weights='uniform', algorithm='ball_tree', metric='manhattan')))\nmodels.append(('CART', DecisionTreeClassifier()))\nmodels.append(('NB', GaussianNB()))\nmodels.append(('SVM', SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)))\n\nresults = []\nnames = []\nfor name, model in models:\n kfold = KFold(n_splits=3)\n cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n\n fitted = model.fit(X_train, y_train)\n y_hat = fitted.predict(X_test)\n print(f\"{name} Training TAccuracy ({name, cv_results.mean()}) STD ({cv_results.std()})\")\n print(f\"{name} Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('classic')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nplt.ylabel('Training Accuracy')\n\nax.set_xticklabels(names)\nplt.show()\n\n\"\"\"##### The SVM (SVC) does <b>much better than all other tested algorithms with a training accuracy of 0.545 and 0.5497 as testing accuracy</b>\". Can we do better again ?\"\"\"\n\nfrom sklearn.neural_network import MLPClassifier\n\nfitted = model.fit(X_train, y_train)\n\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=seed)\nkfold = KFold(n_splits=3)\ncv_results = cross_val_score(model, X_train, y_train, cv=cv, scoring='accuracy')\nprint(cv_results.mean(), cv_results.std())\n\nmodel = MLPClassifier(random_state=seed, hidden_layer_sizes=100, activation='tanh',\n solver='lbfgs', alpha=0.0001,max_iter = 10000,\n )\nfitted = model.fit(X_train, y_train)\n\ncv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=seed)\n# kfold = KFold(n_splits=3)\ncv_results = cross_val_score(model, X_train, y_train, cv=cv, scoring='accuracy')\nprint(cv_results.mean(), cv_results.std())\n\n\"\"\"##### The neuronal network MLPClassifier is worse for both training and prediction accuracy\"\"\"\n\nfrom sklearn.ensemble import(\n BaggingClassifier, RandomForestClassifier,ExtraTreesClassifier\n)\n\nensembles = []\nensembles.append(('BCL', BaggingClassifier()))\nensembles.append(('RFCL', RandomForestClassifier()))\nensembles.append(('ETCL', ExtraTreesClassifier()))\n\nresults = []\nnames = []\nfor name, ensemble in ensembles:\n kfold = KFold(n_splits=3)\n \n cv_results = cross_val_score(ensemble, X_train, y_train, cv=kfold, scoring=\"accuracy\")\n results.append(cv_results)\n names.append(name)\n print(f\"{name} Training accuracy ({cv_results.mean()}) SDT ({cv_results.std()})\")\n\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('seaborn-deep')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n\"\"\"###### <b> The RandomForestClassifier performs better as the SVM (SVC) classifier<b>.\n\n### <b>Feature Selection</b>\n\"\"\"\n\nX = df.drop('contraceptive_method_used', axis=1)\ny = df[\"contraceptive_method_used\"]\nfrom sklearn.feature_selection import SelectKBest, chi2\n\n\"\"\"###### Predictors Selection with <b>chi2</b>\"\"\"\n\nbest = SelectKBest(score_func=chi2, k=3)\nX_NEW = best.fit_transform(X, y)\n\nX_NEW_train, X_NEW_test, y_new_train, y_new_test = train_test_split(X_NEW, y, test_size=0.3,random_state=seed)\n\n\"\"\" Adding Algorithms \"\"\"\nmodels = []\nmodels.append(('LR', LogisticRegression(max_iter=1000)))\nmodels.append(('LDA', LinearDiscriminantAnalysis(solver='lsqr')))\nmodels.append(('KNN', KNeighborsClassifier(n_neighbors=9,weights='uniform', algorithm='ball_tree', metric='manhattan')))\nmodels.append(('SVM', SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)))\nmodels.append(('RFCL', RandomForestClassifier(criterion='entropy')))\n\n\"\"\" Cross_validation \"\"\"\nresults = []\nnames = []\nfor name, model in models:\n kfold = KFold(n_splits=3)\n\n cv_results = cross_val_score(model, X_NEW_train, y_new_train, cv=kfold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n\n fitted = model.fit(X_NEW_train, y_new_train)\n y_hat = fitted.predict(X_NEW_test)\n print(f\"{name} Training TAccuracy ({name, cv_results.mean()}) STD ({cv_results.std()})\")\n print(f\"{name} Prediction Accuracy {accuracy_score(y_new_test, y_hat)} \\n {confusion_matrix(y_new_test, y_hat)} \\n {classification_report(y_new_test, y_hat)} \")\n\n\"\"\" Cross_validation \"\"\"\nresults = []\nnames = []\nfor name, model in models:\n kfold = KFold(n_splits=3)\n\n cv_results = cross_val_score(model, X_NEW_train, y_new_train, cv=kfold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n\n fitted = model.fit(X_NEW_train, y_new_train)\n y_hat = fitted.predict(X_NEW_test)\n print(f\"{name} Training TAccuracy ({name, cv_results.mean()}) STD ({cv_results.std()})\")\n print(f\"{name} Prediction Accuracy {accuracy_score(y_new_test, y_hat)} \\n {confusion_matrix(y_new_test, y_hat)} \\n {classification_report(y_new_test, y_hat)} \")\n\n\"\"\" Plot Algorithms Comparison \"\"\"\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('classic')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n\"\"\"<b>The SVM (SVC) remains much better than other algorithms for k predictors, k = 3.</b>\n\n<b> Seek the best k fold </b>\n\"\"\"\n\nlist_training_error = []\nlist_testing_error = []\ndata = df.values\nX = data[:, :9]\ny = data[:, 9]\n\nkf = KFold(n_splits=20)\n\nfor train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n model = SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)\n \n model.fit(X_train, y_train)\n y_train_data_pred = model.predict(X_train)\n y_test_data_pred = model.predict(X_test)\n\n fold_training_error = mean_absolute_error(y_train, y_train_data_pred) \n fold_testing_error = mean_absolute_error(y_test, y_test_data_pred)\n\n list_training_error.append(fold_training_error)\n list_testing_error.append(fold_testing_error)\n\nplt.figure(figsize=(12, 5))\nplt.subplot(1,2,1)\nplt.plot(range(1, kf.get_n_splits() + 1), np.array(list_training_error).ravel(), 'o-')\nplt.xlabel('number of fold')\nplt.ylabel('training error')\nplt.title('Training error across folds')\nplt.tight_layout()\nplt.subplot(1,2,2)\nplt.plot(range(1, kf.get_n_splits() + 1), np.array(list_testing_error).ravel(), 'o-')\nplt.xlabel('number of fold')\nplt.ylabel('testing error')\nplt.title('Testing error across folds')\nplt.tight_layout()\nplt.show()\n\n\"\"\"##### <b>It's k = 3 as used in the cross_validation of SVC Classifier.</b> Can we improve with PCA\"\"\"\n\nfrom sklearn.decomposition import PCA\n\npca = PCA()\npca.fit(scaled_X)\n\nexp_variance = pca.explained_variance_ratio_\nfig, ax = plt.subplots()\nax.bar(range(pca.n_components_), exp_variance)\nax.set_xlabel('Principal Component #')\n\n\"\"\"It's not clear to see where the elbow appears, but take 6 components of 85%.\"\"\"\n\ncum_exp_variance = np.cumsum(exp_variance)\n\nfig, ax = plt.subplots()\nax.plot(cum_exp_variance)\nax.axhline(y=0.85, linestyle='--')\n\nn_components = 6\n\npca = PCA(n_components, random_state=seed)\npca.fit(scaled_X)\npca_projection = pca.transform(scaled_X)\n\nX_train_pca, X_test_pca, y_train_pca, y_test_pca = train_test_split(\n pca_projection, df['contraceptive_method_used'], random_state=seed,\n)\n\nmodel = SVC()\nfitted = model.fit(X_train_pca, y_train_pca)\n\ncross_val_results = cross_val_score(model, X_train_pca, y_train_pca, scoring='accuracy', cv=KFold(n_splits=3))\nprint(cross_val_results.mean(), cross_val_results.std())\n\ny_hat_pca = fitted.predict(X_test_pca)\nprint(f\"PCA SVM(SVC) Prediction Accuracy {accuracy_score(y_test_pca, y_hat_pca)} \\n Confusion Matrix \\n{confusion_matrix(y_test_pca, y_hat_pca)} \\n {classification_report(y_test_pca, y_hat_pca)} \")\n\n\"\"\"<b>This is not better than the chi2 feature selection.</b> Let's balance the classes ?\n\n###### <>With stratify arg<>\n\"\"\"\n\nX = df.drop('contraceptive_method_used', axis=1)\nscaled_X = scaler.fit_transform(X)\ny = df[\"contraceptive_method_used\"]\nX_bal_train, X_bal_test, y_bal_train, y_bal_test = train_test_split(scaled_X, y, test_size=0.2, random_state=seed,stratify=y)\n\nmodel = SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)\nfitted = model.fit(X_bal_train, y_bal_train)\ncross_val_results = cross_val_score(model, X_bal_train, y_bal_train, scoring='accuracy', cv=KFold(n_splits=10))\nprint(f\"SVM(SVC) \\nTraining Accuracy ({cross_val_results.mean()}), STD ({cross_val_results.std()})\")\n\ny_hat_pca = fitted.predict(X_bal_test)\nprint(f\"Prediction Accuracy {accuracy_score(y_bal_test, y_hat_pca)} \\n Confusion Matrix \\n{confusion_matrix(y_bal_test, y_hat_pca)} \\n {classification_report(y_bal_test, y_hat_pca)} \")\n\n\"\"\"###### <b> Balancing the data by sampling </b>\"\"\"\n\ndf_class_1 = df[df['contraceptive_method_used'] == 1]\ndf_class_2 = df[df['contraceptive_method_used'] == 2]\ndf_class_3 = df[df['contraceptive_method_used'] == 3]\n\ndf_class_2 = df_class_2.sample(df_class_1.shape[0], replace=True, random_state=seed)\ndf_class_3 = df_class_3.sample(df_class_1.shape[0], replace=True, random_state=seed)\n\ndata = pd.concat([df_class_1, df_class_2, df_class_3])\nprint(data['contraceptive_method_used'].value_counts())\n\n\"\"\"<b>The classes are now well balanced</b>\"\"\"\n\nX_train, X_test, y_train, y_test = train_test_split(\n scaler.fit_transform(data.drop('contraceptive_method_used', axis=1)), data['contraceptive_method_used'],\n test_size=0.3, random_state=seed, stratify=data['contraceptive_method_used']\n)\n\nmodel = SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)\n\nfitted = model.fit(X_train, y_train)\ncross_val_results = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=KFold(n_splits=10))\nprint(f\"SVM(SVC) \\nTraining Accuracy ({cross_val_results.mean()}), STD ({cross_val_results.std()})\")\n\ny_hat = fitted.predict(X_test)\nprint(f\"Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n Confusion Matrix \\n{confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\"<b>The SVM (SVC) does more much better than the unbalanced case.</b> Can we have better ?\n\n<b>Chi2 Feature Selection after balancing classes</b>\n\"\"\"\n\nbest = SelectKBest(score_func=chi2, k=7)\ny = data['contraceptive_method_used']\nX = data.drop('contraceptive_method_used', axis=1)\nX_NEW = best.fit_transform(X, y)\n\nX_NEW_train, X_NEW_test, y_new_train, y_new_test = train_test_split(\n X_NEW, y, test_size=0.3, random_state=seed, stratify=data['contraceptive_method_used'],\n )\n\n\"\"\" Adding Algorithms \"\"\"\nmodels = []\nmodels.append(('LR', LogisticRegression(max_iter=1000)))\nmodels.append(('LDA', LinearDiscriminantAnalysis(solver='lsqr')))\nmodels.append(('KNN', KNeighborsClassifier(n_neighbors=9,weights='uniform', algorithm='ball_tree', metric='manhattan')))\nmodels.append(('SVM', SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)))\nmodels.append(('RFCL', RandomForestClassifier()))\n\n\"\"\" Cross_validation \"\"\"\nresults = []\nnames = []\nfor name, model in models:\n kfold = RepeatedStratifiedKFold(n_splits=13, n_repeats=3, random_state=seed)\n\n cv_results = cross_val_score(model, X_NEW_train, y_new_train, cv=kfold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n\n fitted = model.fit(X_NEW_train, y_new_train)\n y_hat = fitted.predict(X_NEW_test)\n print(f\"{name} Training Accuracy ({name, cv_results.mean()}) STD ({cv_results.std()})\")\n print(f\"{name} Prediction Accuracy {accuracy_score(y_new_test, y_hat)} \\n {confusion_matrix(y_new_test, y_hat)} \\n {classification_report(y_new_test, y_hat)} \")\n\n\"\"\" Plot Algorithms Comparison \"\"\"\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('classic')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n\"\"\"###### <b> The RandomForestClassifier does much better than the SVC classifier as shown by the plot above</b>\n\n#### Let's check again PCA\n\"\"\"\n\npca = PCA()\npca.fit(scaler.fit_transform(data.drop('contraceptive_method_used', axis=1)))\n\nexp_variance = pca.explained_variance_ratio_\nfig, ax = plt.subplots()\nax.bar(range(pca.n_components_), exp_variance)\nax.set_xlabel('Principal Component #')\n\ncum_exp_variance = np.cumsum(exp_variance)\n\nfig, ax = plt.subplots()\nax.plot(cum_exp_variance)\nax.axhline(y=0.85, linestyle='--')\nn_components = 6\npca = PCA(n_components, random_state=seed)\npca.fit(scaled_X)\npca_projection = pca.transform(scaled_X)\n\nX_train_pca, X_test_pca, y_train_pca, y_test_pca = train_test_split(\n pca_projection, df['contraceptive_method_used'], random_state=seed,\n)\n\nmodel = SVC(C=100, kernel='rbf', gamma=0.01, shrinking=True, probability=True)\nfitted = model.fit(X_train_pca, y_train_pca)\n\ncross_val_results = cross_val_score(model, X_train_pca, y_train_pca, scoring='accuracy', cv=KFold(n_splits=16))\nprint(cross_val_results.mean(), cross_val_results.std())\n\ny_hat_pca = fitted.predict(X_test_pca)\nprint(f\"PCA SVM(SVC) Prediction Accuracy {accuracy_score(y_test_pca, y_hat_pca)} \\n Confusion Matrix \\n{confusion_matrix(y_test_pca, y_hat_pca)} \\n {classification_report(y_test_pca, y_hat_pca)} \")\n\n\"\"\"<b>This is worse according to the previous algorithms. Then, by conclusion, the better model is obtained with k = 7 features(chi2 selection) with RandomForestClassifier.</b>\"\"\"\n\nlist_training_error = []\nlist_testing_error = []\nvalues = data.values\nX = values[:, :7]\ny = values[:, 9]\n\nkf = KFold(n_splits=20)\n\nfor train_index, test_index in kf.split(X):\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n model = RandomForestClassifier()\n \n model.fit(X_train, y_train)\n y_train_data_pred = model.predict(X_train)\n y_test_data_pred = model.predict(X_test)\n\n fold_training_error = mean_absolute_error(y_train, y_train_data_pred) \n fold_testing_error = mean_absolute_error(y_test, y_test_data_pred)\n\n list_training_error.append(fold_training_error)\n list_testing_error.append(fold_testing_error)\n\nplt.figure(figsize=(12, 5))\nplt.subplot(1,2,1)\nplt.plot(range(1, kf.get_n_splits() + 1), np.array(list_training_error).ravel(), 'o-')\nplt.xlabel('number of fold')\nplt.ylabel('training error')\nplt.title('Training error across folds')\nplt.tight_layout()\nplt.subplot(1,2,2)\nplt.plot(range(1, kf.get_n_splits() + 1), np.array(list_testing_error).ravel(), 'o-')\nplt.xlabel('number of fold')\nplt.ylabel('testing error')\nplt.title('Testing error across folds')\nplt.tight_layout()\nplt.show()\n\n\"\"\"<b>The best kfold value is between 15 and 17 for cross validation</b>\"\"\"\n\nbest = SelectKBest(score_func=chi2, k=7)\ny = data['contraceptive_method_used']\nX = data.drop('contraceptive_method_used', axis=1)\nX_NEW = best.fit_transform(X, y)\n\nX_NEW_train, X_NEW_test, y_new_train, y_new_test = train_test_split(\n X_NEW, y, test_size=0.3, random_state=seed,\n )\n\nmodel = RandomForestClassifier()\n\"\"\" Cross_validation \"\"\"\n\ncv_results = cross_val_score(model, X_NEW_train, y_new_train, cv=KFold(n_splits=16), scoring='accuracy')\nfitted = model.fit(X_NEW_train, y_new_train)\ny_hat = fitted.predict(X_NEW_test)\n\nprint(f\"RFCL Training Accuracy ({cv_results.mean()}) STD ({cv_results.std()})\")\nprint(f\"RFCL Prediction Accuracy {accuracy_score(y_new_test, y_hat)} \\n {confusion_matrix(y_new_test, y_hat)} \\n {classification_report(y_new_test, y_hat)} \")\n\n\"\"\"<b>The RFCL wins and the needed predictors are the seven obtained with chi2 for feature selection for predicting with an accuracy of 0.714 and the confusion matrix is much better than the one of the SVM (SVC) and other algorithms</b>.\n\n#### Ensemble with balanced class ( Contraceptive_method_used ) and chi2 feature selection\n\"\"\"\n\nbest = SelectKBest(score_func=chi2, k=7)\ny = data['contraceptive_method_used']\nX = data.drop('contraceptive_method_used', axis=1)\nX_NEW = best.fit_transform(X, y)\n\nX_train, X_test, y_train, y_test = train_test_split(X_NEW, y, test_size=0.3, random_state=seed)\n\nresults = []\nnames = []\nfor name, ensemble in ensembles:\n kfold = KFold(n_splits=16)\n \n cv_results = cross_val_score(ensemble, X_train, y_train, cv=kfold, scoring=\"accuracy\")\n results.append(cv_results)\n names.append(name)\n\n fitted = ensemble.fit(X_train, y_train)\n y_hat = fitted.predict(X_test)\n\n print(f\"{name} Error Test Rate {((y_hat != y_test).sum())/data.shape[0]*100}\")\n print(f\"{name} Training accuracy ({cv_results.mean()}) SDT ({cv_results.std()})\")\n print(f\"{name} Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('seaborn-deep')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n\"\"\"#### <b>Conclusion : compare to other ensemble algorithms and SVM (SVC), RandomForestClassifier remains the best with 9% as test error rate, the best training and prediction accuracy and the confusion matrix.</b>\n\n#### <b> Test the first 7 predictors of the balanced dataframe </b>\n\"\"\"\n\ndf_balanced = data.values\nX_balanced = df_balanced[:,:7]\ny_balanced = df_balanced[:,9]\nimport pickle\n\nX_train, X_test, y_train, y_test = train_test_split(X_balanced, y_balanced, test_size=0.3, random_state=seed)\n\nmodel = RandomForestClassifier()\n\"\"\" Cross_validation \"\"\"\n\ncv_results = cross_val_score(model, X_train, y_train, cv=KFold(n_splits=16), scoring='accuracy')\nmodel_rfcl = model.fit(X_train, y_train)\ny_hat = model_rfcl.predict(X_test)\n\nprint(f\"RFCL Training Accuracy ({cv_results.mean()}) STD ({cv_results.std()})\")\nprint(f\"RFCL Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\nresults = []\nnames = []\nfor name, ensemble in ensembles:\n kfold = KFold(n_splits=16)\n \n cv_results = cross_val_score(ensemble, X_train, y_train, cv=kfold, scoring=\"accuracy\")\n results.append(cv_results)\n names.append(name)\n\n fitted = ensemble.fit(X_train, y_train)\n y_hat = fitted.predict(X_test)\n\n print(f\"{name} Error Test Rate {((y_hat != y_test).sum())/data.shape[0]*100}\")\n print(f\"{name} Training accuracy ({cv_results.mean()}) SDT ({cv_results.std()})\")\n print(f\"{name} Prediction Accuracy {accuracy_score(y_test, y_hat)} \\n {confusion_matrix(y_test, y_hat)} \\n {classification_report(y_test, y_hat)} \")\n\n\"\"\" Plotting Comparison \"\"\" \nplt.style.use('seaborn-deep')\nplt.rcParams.update({ \"font.family\": \"serif\",})\nfig = plt.figure(figsize=(10, 4))\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n\"\"\"###### <b>Finally : compare to other ensemble algorithms and SVM (SVC), RandomForestClassifier remains the best test error rate value between 8% and 9%, the best training and prediction accuracy and the confusion matrix with the first seven predictors of the balanced. These are the only variables that impact the woman's contraceptive method that she uses or will use</b>\n\n#### <b>Save the model for reuse purposes</b>\n\"\"\"\n\nmodel_filename = 'finalized_model_rfcl.sav'\nimport joblib\njoblib.dump(model_rfcl, model_filename)\n\n\n\n", "id": "12138922", "language": "Python", "matching_score": 1.6840248107910156, "max_stars_count": 0, "path": "scripts/contraceptive_method_choice.py" }, { "content": "from django.apps import AppConfig\n#from suit.apps import DjangoSuitConfig\n\n\nclass PredictmethodsConfig(AppConfig):\n name = 'predictMethods'", "id": "10629151", "language": "Python", "matching_score": 0.8916986584663391, "max_stars_count": 0, "path": "predictMethods/apps.py" }, { "content": "from django.urls import re_path, path\nfrom . import views\n\napp_name = 'predictMethods'\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('predict_methods/result/', views.get_sent_data, name='predict'),\n path('predict_methods/login/', views.user_login, name='login'),\n path('predict_methods/about_cmc', views.about_cmc, name='about'),\n]\n", "id": "10925503", "language": "Python", "matching_score": 2.0581626892089844, "max_stars_count": 0, "path": "predictMethods/urls.py" }, { "content": "from django.urls import path, re_path\nfrom . import views\n\n\napp_name = 'store'\n\nurlpatterns = [\n\tre_path(r'^search/', views.search, name='search'),\n\tre_path(r'^flower/(?P<flower_id>[0-9]+)/$', views.detail, name='detail'),\n\tre_path(r'^$', views.listing, name='listing'),\n\tre_path(r'^about/', views.about, name='about'),\n]", "id": "9463948", "language": "Python", "matching_score": 0.3271273374557495, "max_stars_count": 0, "path": "store/urls.py" }, { "content": "# Generated by Django 3.1.3 on 2021-04-01 20:35\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=30, verbose_name='Nom')),\n ('email', models.EmailField(max_length=254, unique=True, verbose_name='Courrier électronique')),\n ('phone_number', models.CharField(max_length=20, unique=True, verbose_name='Téléphone')),\n ],\n options={\n 'verbose_name': 'Client',\n 'verbose_name_plural': 'Clients',\n },\n ),\n migrations.CreateModel(\n name='Flower',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=30, verbose_name='Nom')),\n ('description', models.TextField(verbose_name='Description')),\n ('price', models.FloatField(verbose_name='Prix')),\n ('available', models.BooleanField(default=True)),\n ('image_url', models.URLField(max_length=3000, verbose_name='Image')),\n ('created_at', models.DateField(auto_now_add=True, verbose_name=\"Date d'ajout \")),\n ],\n options={\n 'verbose_name': 'Fleur',\n 'verbose_name_plural': 'Fleurs',\n },\n ),\n migrations.CreateModel(\n name='Booking',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('booked_date', models.DateField(auto_now_add=True, verbose_name='Date de réservation')),\n ('delivered', models.BooleanField(default=False, verbose_name='Livraison terminée')),\n ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.customer')),\n ('flower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.flower')),\n ],\n options={\n 'verbose_name': 'Réservation',\n 'verbose_name_plural': 'Réservations',\n },\n ),\n ]\n", "id": "7800163", "language": "Python", "matching_score": 6.294925212860107, "max_stars_count": 0, "path": "store/migrations/0001_initial.py" }, { "content": "from django.db import models\n\n\"\"\" ALL MODELS \"\"\"\n\nclass Customer(models.Model):\n\t\"\"\"docstring for Customer\n\tThis object represents the customer\n\n\tAttributes:\n\t\tname (str): The customer name,\n\t\temail(str) : The customer email,\n\t\tphone_number(str): The customer phone number,\n\t\"\"\"\n\tname = models.CharField(\"Nom\", max_length=30)\n\temail = models.EmailField(\"Courrier électronique\", unique=True)\n\tphone_number = models.CharField(\"Téléphone\", max_length=20, unique=True)\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\tclass Meta:\n\t\tverbose_name = \"Client\"\n\t\tverbose_name_plural = \"Clients\"\n\nclass Flower(models.Model):\n\t\"\"\"docstring for Flower\n\tThis object reprents the flower that wil be booked\n\n\tAttributes:\n\t\tname (str): The name of the flower,\n\t\tdescription (str): The flower description,\n\t\tprice (integer): The flower price,\n\t\tavailable (bool): Is the flower available ?,\n\t\timage_url(str): The flower image,\n\t\tbooking (Booking): The Booking that contains the booked flower,\n\t\"\"\"\n\tname = models.CharField(\"Nom\", max_length=30)\n\tdescription = models.TextField(\"Description\")\n\tprice = models.FloatField(\"Prix\", null=False)\n\tavailable = models.BooleanField(default=True)\n\timage_url = models.URLField(\"Image\", max_length=3000)\n\tcreated_at = models.DateField(\"Date d'ajout \", auto_now_add=True)\n\n\tclass Meta:\n\t\tverbose_name = \"Fleur\"\n\t\tverbose_name_plural = \"Fleurs\"\n\n\tdef __str__(self):\n\t\treturn self.name\n\nclass Booking(models.Model):\n\t\"\"\"docstring for Booking\n\tThis object represents the booking\n\n\tAttributes:\n\t\tbooked_date (Date): The booked date,\n\t\tdelivery_date (Date): The delivery date,\n\t\tdelivered (bool): Is the delivery done ?,\n\t\tcustomer (Customer): The customer who made the booking,\n\n\t\"\"\"\n\tbooked_date = models.DateField(\"Date de réservation\", auto_now_add=True)\n\tdelivered = models.BooleanField(\"Livraison terminée\", default=False)\n\tcustomer = models.ForeignKey(Customer, on_delete=models.CASCADE)\n\tflower = models.ForeignKey(Flower, on_delete=models.CASCADE)\n\t\n\tclass Meta:\n\t\tverbose_name = \"Réservation\"\n\t\tverbose_name_plural = \"Réservations\"\n\n\n\tdef __str__(self):\n\t\treturn self.customer\n\n", "id": "11526203", "language": "Python", "matching_score": 2.37392520904541, "max_stars_count": 0, "path": "store/models.py" }, { "content": "# Generated by Django 3.1.6 on 2021-02-14 13:49\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('pseudonym', models.CharField(max_length=15)),\n ('predictResult', models.PositiveSmallIntegerField(choices=[(1, 'No Use'), (2, 'Long Term'), (3, 'Short Term')])),\n ('predictDate', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='UserData',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('user_age', models.PositiveSmallIntegerField()),\n ('user_education', models.PositiveSmallIntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3, 'High'), (4, 'Very High')])),\n ('husband_education', models.PositiveSmallIntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3, 'High'), (4, 'Very High')])),\n ('number_children_ever_born', models.PositiveSmallIntegerField()),\n ('user_religion', models.PositiveSmallIntegerField(choices=[(0, 'Noneislam'), (1, 'Islam')])),\n ('user_working', models.PositiveSmallIntegerField(choices=[(0, 'Yes'), (1, 'No')])),\n ('husband_occupation', models.PositiveSmallIntegerField(choices=[(1, 'Low'), (2, 'Medium'), (3, 'High'), (4, 'Very High')])),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='predictMethods.user')),\n ],\n ),\n ]\n", "id": "4095298", "language": "Python", "matching_score": 5.344038963317871, "max_stars_count": 0, "path": "predictMethods/migrations/0001_initial.py" }, { "content": "from django.db import models\n\nclass User(models.Model):\n\t\"\"\"docstring for User\"\"\"\n\n\tclass UserContraceptiveMethod(models.IntegerChoices):\n\t\t\"\"\"docstring for UserContraceptiveMethod\"\"\"\n\t\tNO_USE = 1\n\t\tLONG_TERM = 2\n\t\tSHORT_TERM = 3\n\n\t\"\"\" unique pseudonym \"\"\"\n\tpseudonym = models.CharField(max_length = 20, unique=True)\n\tpredict_result = models.PositiveSmallIntegerField(choices = UserContraceptiveMethod.choices)\n\tpredict_date = models.DateField(auto_now_add = True)\n\tpredict_proba = models.FloatField(default=0)\n\n\tdef __str__(self):\n\t\treturn self.pseudonym\n\nclass UserData(models.Model):\n\t\"\"\"docstring for UserData\n\t\"\"\"\n\tclass UserEducation(models.IntegerChoices):\n\t\t\"\"\"docstring for UserEducation\"\"\"\n\t\tLOW = 1\n\t\tMEDIUM = 2\n\t\tHIGH = 3\n\t\tVERY_HIGH = 4\n\n\tclass UserReligion(models.IntegerChoices):\n\t\t\"\"\"docstring for UserReligion\"\"\"\n\t\tNONE_ISLAM = 0\n\t\tISLAM = 1\n\n\tclass UserNowWorking(models.IntegerChoices):\n\t\t\"\"\"docstring for UserNowWorking\"\"\"\n\t\tYES = 0\n\t\tNO = 1\n\n\tuser_age = models.PositiveSmallIntegerField()\n\tuser_education = models.PositiveSmallIntegerField(choices = UserEducation.choices)\n\thusband_education = models.PositiveSmallIntegerField(choices = UserEducation.choices)\n\tnumber_children_ever_born = models.PositiveSmallIntegerField()\n\tuser_religion = models.PositiveSmallIntegerField(choices = UserReligion.choices)\n\tuser_working = models.PositiveSmallIntegerField(choices = UserNowWorking.choices)\n\thusband_occupation = models.PositiveSmallIntegerField(choices = UserEducation.choices)\n\tuser = models.OneToOneField(User, on_delete=models.CASCADE)\n", "id": "12466428", "language": "Python", "matching_score": 2.7058258056640625, "max_stars_count": 0, "path": "predictMethods/models.py" }, { "content": "# Generated by Django 3.1.6 on 2021-02-16 01:42\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('predictMethods', '0001_initial'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='user',\n old_name='predictDate',\n new_name='predict_date',\n ),\n migrations.RenameField(\n model_name='user',\n old_name='predictResult',\n new_name='predict_result',\n ),\n migrations.AlterField(\n model_name='user',\n name='pseudonym',\n field=models.CharField(max_length=20),\n ),\n migrations.AlterField(\n model_name='userdata',\n name='user_religion',\n field=models.PositiveSmallIntegerField(choices=[(0, 'None Islam'), (1, 'Islam')]),\n ),\n ]\n", "id": "10995742", "language": "Python", "matching_score": 2.6634044647216797, "max_stars_count": 0, "path": "predictMethods/migrations/0002_auto_20210216_0242.py" }, { "content": "# Generated by Django 3.1.6 on 2021-02-16 02:05\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('predictMethods', '0002_auto_20210216_0242'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='predict_proba',\n field=models.FloatField(default=0),\n ),\n ]\n", "id": "9326565", "language": "Python", "matching_score": 2.432096481323242, "max_stars_count": 0, "path": "predictMethods/migrations/0003_user_predict_proba.py" } ]
2.410074
cwusinich
[ { "content": "#CW 5.8.2020\n#makes swarm file in swarm folder for copying OG MID dataset into a new dataset; also makes SAM command swarm files (sam_cov, sam_wts, and sam_3d)\n#if you run this twice on the same date with the same arguments, it will overwrite the old files (on purpose!)\n\nimport os\nfrom datetime import datetime\n\n#set current date for use in filenames\ndate_today=datetime.now().strftime('%m%d%Y')\n\n\n### MAKE NEWDS SWARM FILE\ndef make_swarm_newDs(subjectlist,newds,marker,timewindow,swarmdir,subdir, origds='_MID-f.ds'):\n\t'''Makes swarm file for creating new MEG datasets based on input parameters.'''\n\t#create newDs swarm file and add info about how to run it\n\tswarmfile_name=f'{swarmdir}/newDs_{date_today}.swarm'\n\tswarmfile=open(swarmfile_name,'w+')\t\n\tswarmcommand=f'Run newDs swarm using this command: swarm -f {swarmfile_name} -g 15 -t auto --module ctf --logdir {swarmdir}/swarm_logs'\n\tswarmfile.write(f'#{swarmcommand}\\n')\n\tswarmfile.close()\n\n\t#read list of subjects\t\n\tsublist=open(subjectlist, 'r')\n\tproclist=sublist.readlines()\n\tproclist=map(lambda x: x.strip(), proclist)\n\n\t#append line to swarm file for each subject for newDs command\n\tfor sub in proclist:\n\t\tswarmfile=open(swarmfile_name,'a')\n\t\tswarmfile.write(f'set -e ; cd {subdir}sub-{sub}/meg ; newDs -marker {marker} -time {timewindow} {sub}{origds} {sub}{newds}\\n')\n\t\tswarmfile.close()\n\n\t#print message that it is complete, and include command for running swarm file\n\tprint('Swarm file has been added! \\n')\n\tprint(swarmcommand)\n\n\n### MAKE SAM SWARM FILES\ndef make_swarm_sam(subjectlist,ds,marker,freqband,swarmdir,subdir):\n\t#read list of subjects\t\n\tsublist=open(subjectlist, 'r')\n\tproclist=sublist.read().splitlines()\n\tsublist.close()\n\n\t#set paramfile name for use in sam command\n\tparamfile_name=f'{freqband}_{marker}.param'\n\n\t#create sam_cov swarm file and add info about how to run it\n\tswarmfile_name=f'{swarmdir}/sam_cov_{freqband}_{marker}_{date_today}.swarm'\n\tswarmfile=open(swarmfile_name,'w+')\t\n\tswarmcommand_cov=f'Run sam_cov swarm using this command: swarm -f {swarmfile_name} -g 15 -t auto --module samsrcv3 --logdir {swarmdir}/swarm_logs'\n\tswarmfile.write('#' + swarmcommand_cov + '\\n')\n\tswarmfile.close()\n\n\t#append line to swarm file for each subject for sam_cov command\n\tfor sub in proclist:\n\t\tswarmfile=open(swarmfile_name,'a')\n\t\tswarmfile.write(f'set -e ; cd {subdir}sub-{sub}/meg ; sam_cov -r {sub}{ds} -m {paramfile_name} -v \\n')\n\t\tswarmfile.close()\n\n\n\t#now to make a swarm file for sam_wts\n\tswarmfile_name=f'{swarmdir}/sam_wts_{freqband}_{marker}_{date_today}.swarm'\n\tswarmfile=open(swarmfile_name,'w+')\t\n\tswarmcommand_wts=f'Run sam_wts swarm using this command: swarm -f {swarmfile_name} -g 15 -t auto --module samsrcv3 --logdir {swarmdir}/swarm_logs'\n\tswarmfile.write(f'#{swarmcommand_wts}\\n')\n\tswarmfile.close()\n\tfor sub in proclist:\n\t\tswarmfile=open(swarmfile_name,'a')\n\t\tswarmfile.write(f'set -e ; cd {subdir}sub-{sub}/meg ; sam_wts -r {sub}{ds} -m {paramfile_name} -v --MRIPattern %M/%s -H hull.shape \\n')\n\t\tswarmfile.close()\n\n\n\t#and finally make sam_3d swarm file\n\tswarmfile_name=f'{swarmdir}/sam_3d_{freqband}_{marker}_{date_today}.swarm'\n\tswarmfile=open(swarmfile_name,'w+')\t\n\tswarmcommand_3d=f'Run sam_3d swarm using this command: swarm -f {swarmfile_name} -g 15 -t auto --module samsrcv3 --logdir {swarmdir}/swarm_logs'\n\tswarmfile.write(f'#{swarmcommand_3d}\\n')\n\tswarmfile.close()\n\tfor sub in proclist:\n\t\tswarmfile=open(swarmfile_name,'a')\n\t\tswarmfile.write(f'set -e ; cd {subdir}sub-{sub}/meg ; sam_3d -r {sub}{ds} -m {paramfile_name} -v \\n')\n\t\tswarmfile.close()\n\n\n\t#print message that it is complete, and include commands for running each swarm file\n\tprint('Swarm files have been added! \\n')\n\tprint(f'1.) {swarmcommand_cov}\\n\\n2.) {swarmcommand_wts}\\n\\n3.) {swarmcommand_3d}')\n\n\nif __name__=='__main__':\n\tmake_swarm_newDs()\n\tmake_swarm_sam()\n", "id": "12474022", "language": "Python", "matching_score": 2.241502285003662, "max_stars_count": 0, "path": "MID_proc/make_swarms.py" }, { "content": "#CW 5.8.2020\n#makes .param files and puts them in each subject's meg folder\n\nimport os,sys\n\n#make this dictionary\nalpha=['alpha','8 14']\nbeta=['beta','15 29']\ngamma=['gamma','30 60']\nhighgamma=['highgamma','62 118']\n\n#set default variables here:\ndefault_Marker1='respwin'\ndefault_marker1window='0.5 2'\ndefault_freq=highgamma\n\n#setting more variables\nXBounds='-10 10'\nYBounds='-9 9'\nZBounds='0 15'\nImageStep='.5'\nImageMetric='Power'\nModel='Nolte'\nCovType='SUM'\nImageFormat='TLRC 5'\n\ndef make_param(freq,rootdir='/data/MoodGroup/07M0021_meg_analysis/MID_data/subjects', NumMarkers='1', Marker1='respwin', marker1window='0.5 2'):\n\t'''Makes param files for each subject in their meg folder.'''\n\t#define subject list and some other things for the file\n\troot, dirs, files = os.walk(rootdir).__next__()\n\tsublist=list(dirs)\n \n\t#provide dict of frequency band options and their Hz range, so you just need to enter the name string of the band as an argument\n\tfreq_dict={'alpha':'8 14','beta':'15 29','gamma':'30 60','highgamma':'62 118'}\n\tfreqband=freq_dict[freq]\n\tOrientBand=freqband\n\tNoiseBand=freqband\n\tCovBand=freqband\n\tImageBand=freqband\n\tDataSegment=marker1window\n\n\t#bestow an appropriate name upon the new param file\t\n\tparamfile_name=f'{freq}_{Marker1}.param'\n\t\n\t#make param file for each subject and drop it in their meg folder\n\tfor sub in sublist:\n\t\tnew_paramfile=open(f'{rootdir}/{sub}/meg/{paramfile_name}','w+')\t\n\t\tnew_paramfile.write(f'NumMarkers {NumMarkers}\\nMarker1 {Marker1} {marker1window} TRUE\\nOrientBand {OrientBand}\\nNoiseBand {NoiseBand}\\nCovBand {CovBand}\\nImageBand {ImageBand}\\nDataSegment {DataSegment}\\nXBounds {XBounds}\\nYBounds {YBounds}\\nZBounds {ZBounds}\\nImageStep {ImageStep}\\nImageMetric {ImageMetric}\\nPrefixLength {str(len(sub))}\\nMRIDirectory {rootdir}/{sub}/mri\\nModel {Model}\\nCovType {CovType}\\nImageFormat {ImageFormat}')\n\t\tnew_paramfile.close()\n\n\tprint('Param files have been added!')\n\nif __name__=='__main__':\n\tmake_param()\n", "id": "1998309", "language": "Python", "matching_score": 0.998640775680542, "max_stars_count": 0, "path": "MID_proc/make_paramfiles.py" }, { "content": "import MID_proc as mid\nimport os\nimport shutil\nfrom datetime import datetime\nfrom pathlib import Path\nimport numpy as np\nimport pandas as pd\nimport random\n\n#set current date for use in filenames\nDATE_TODAY = datetime.now().strftime('%m%d%Y')\nFAKE_SUBS = ['7','13','101','102','99','990','9999']\n\ndef make_test_data(basedir=None):\n\t#set up some example directories and files for use with tests\n\tif not basedir:\n\t\tbasedir= Path(os.getcwd())\n\n\tfake_subdir= basedir / 'MID_test_data' / 'subjects'\n\tfake_scriptsdir = fake_subdir.with_name('scripts')\n\tfake_swarmdir= fake_scriptsdir / 'swarm'\n\tfake_group_beh=basedir / 'MID_test_data' / 'group_behavior_data'\n\n\tos.makedirs(fake_swarmdir,exist_ok=True)\n\tos.makedirs(fake_group_beh,exist_ok=True)\n \n\t#set sub fake sub ID numbers; these were picked to test that prefix length calculation in make_param() works with a variety of ID lengths\n\tfake_sub_file = fake_scriptsdir / 'fake_sub_file.txt'\n\tfake_sub_file.write_text('\\n'.join(FAKE_SUBS))\n\n\tfor sub in FAKE_SUBS:\n\t\tos.makedirs(fake_subdir / f'sub-{sub}'/ 'meg' , exist_ok=True)\n\t\tos.makedirs(fake_subdir / f'sub-{sub}' / 'behavior', exist_ok=True)\n\n\t#make fake cue files for use in behavior data processing\n\tfor sub in FAKE_SUBS:\n\t\ta=np.array([0]*78)\n\t\tb=np.array(np.linspace(10, 727, 78, False))\n\t\tcuemarks=np.column_stack((a,b))\n\t\tdf = pd.DataFrame(cuemarks)\n\t\tdf.to_csv(fake_subdir / f'sub-{sub}' / 'meg' / 'cue_marks', index=False, header=False, sep=' ')\n \n\t#make fake behavior data files (length based on maximum length of MID behavior file)\n\tfor sub in FAKE_SUBS:\n\t\tbeh_name=f'MID1-{sub}-1_behavior.txt'\n\t\t#create the three columns we're going to use with data those columns have IRL; I know this is REALLY clunky...but it works...\n\t\twin=['Win2']*26\n\t\tlose=['Lose2']*26\n\t\tcont=['Control']*26\n\t\twin.extend(lose)\n\t\twin.extend(cont)\n\t\trandom.shuffle(win)\n\t\tcuewords=win\n\t\tcol1=np.array(cuewords)\n\t\tresp=random.choices(['GoodResponse','NoResponse'], k=78)\n\t\tcol2=np.array(resp)\n\t\trt=np.random.randint(150,550,size=78)\n\t\tcol3=rt\n\t\tdf2=np.column_stack([col1,col2,col3])\n\t\t#add in top line that comes with actual e-prime data files and headers beneath it; top line will be removed so we need a throw-away top line in the fake data file too\n\t\tcola=np.array(['Cue'])\n\t\tcolb=np.array(['ResponseType'])\n\t\tcolc=np.array(['Target.RT'])\n\t\tdf3=np.column_stack([cola,colb,colc])\n\t\tdf4=pd.DataFrame(df2)\n\t\tdf5=pd.DataFrame(df3)\n\t\tfake_beh=pd.concat([df5,df4],ignore_index=True)\n\t\tfake_beh.to_csv(fake_subdir / f'sub-{sub}' / 'behavior' / beh_name, sep='\\t', mode='a', index=False)\n\n\treturn fake_subdir,fake_sub_file,fake_group_beh\n\n\n#first test is for the param file maker function\ndef test_make_param():\n\t'''This tests if make_param() can make parameter files with a few non-default inputsarguments and put them in fake subjects' meg folders directories.'''\n\tfake_subdir, fake_sub_file,fake_group_beh = make_test_data()\n\tmid.make_param(freq='alpha',rootdir=fake_subdir,Marker1='cue',marker1window='0 4')\n\tparamfiles_exist=[os.path.exists(fake_subdir / f'sub-{sub}' / 'meg' / 'alpha_cue.param') for sub in FAKE_SUBS]\n\tassert paramfiles_exist==[1]*len(FAKE_SUBS),'Param files were not created properly--oh no!'\n\n\t#remove the evidence\n\tshutil.rmtree(fake_subdir.parent, ignore_errors=False, onerror=None)\n\n \n#second test will test the swarm making functions\ndef test_make_swarms():\n\t'''This tests if make_swarm_newDs() and make_swarm_sam() make their respective swarm files with a fake subject list and some non-default args.'''\n\tfake_subdir, fake_sub_file,fake_group_beh = make_test_data()\n\tfake_swarmdir = fake_subdir.parent / 'scripts' / 'swarm'\n\tmid.make_swarm_newDs(subjectlist=fake_sub_file,newds='_MID_cue-f',marker='cue',timewindow='0 4',swarmdir=fake_swarmdir,subdir=fake_subdir)\n\tmid.make_swarm_sam(subjectlist=fake_sub_file,ds='_MID_cue-f.ds',marker='cue',freqband='highgamma',swarmdir=fake_swarmdir,subdir=fake_subdir)\n\tswarmfiles_exist=[os.path.exists(f'{fake_swarmdir}/newDs_{DATE_TODAY}.swarm'),os.path.exists(f'{fake_swarmdir}/sam_cov_highgamma_cue_{DATE_TODAY}.swarm'),os.path.exists(f'{fake_swarmdir}/sam_wts_highgamma_cue_{DATE_TODAY}.swarm'),os.path.exists(f'{fake_swarmdir}/sam_3d_highgamma_cue_{DATE_TODAY}.swarm')]\n\n\tassert swarmfiles_exist==[1,1,1,1],'Swarm files not created. UGH!'\n\n\t#remove the evidence\n\tshutil.rmtree(fake_subdir.parent, ignore_errors=False, onerror=None)\n\n \n#third, fourth, and fifth tests will check the behavior data functions\ndef test_make_markerfiles():\n\t'''This tests if make_markerfiles_MID() makes the 3 win, loss, and control marker files in each subjects' behavior directories.'''\n\tfake_subdir, fake_sub_file,fake_group_beh = make_test_data()\n\tmid.make_markerfiles_MID(fake_sub_file,fake_subdir)\n\tmarkerfiles_exist=[os.path.exists(fake_subdir / f'sub-{sub}' / 'meg' / f'{sub}_win.txt') for sub in FAKE_SUBS]+ [os.path.exists(fake_subdir / f'sub-{sub}' / 'meg' / f'{sub}_lose.txt') for sub in FAKE_SUBS]+[os.path.exists(fake_subdir / f'sub-{sub}' / 'meg' / f'{sub}_cont.txt') for sub in FAKE_SUBS]\n\n\tassert markerfiles_exist==[1]*(len(FAKE_SUBS)*3),'Marker files not found!'\n\n\t#remove the evidence\n\tshutil.rmtree(fake_subdir.parent, ignore_errors=False, onerror=None)\n\ndef test_clean_beh():\n\t'''This tests if clean_beh_MID() makes and distributes cleaned behavior data csvs to each subject's behavior directories'''\n\tfake_subdir, fake_sub_file,fake_group_beh = make_test_data()\n\tmid.clean_beh_MID(fake_sub_file,fake_subdir,fake_group_beh)\n\tbehfiles_exist=[os.path.exists(fake_subdir / f'sub-{sub}' / 'behavior' / f'{sub}_behavior_MID_bytrial.csv') for sub in FAKE_SUBS]\n \n\tassert behfiles_exist==[1]*len(FAKE_SUBS),'Behavior files are MIA!'\n \n\t#remove the evidence\n\tshutil.rmtree(fake_subdir.parent, ignore_errors=False, onerror=None)\n\ndef test_masterbeh():\n\t'''This tests if clean_beh_MID() makes master descriptives file and puts it in output folder (generally some kind of group analysis folder)'''\n\tfake_subdir, fake_sub_file,fake_group_beh = make_test_data()\n\tmid.clean_beh_MID(fake_sub_file,fake_subdir,fake_group_beh)\n\tmasterbeh_exists=os.path.exists(fake_group_beh / 'all_behavior_MID_bytrial.csv')\n \n\tassert masterbeh_exists==1, 'Master descriptives file is missing!'\n \n\t#remove the evidence\n\tshutil.rmtree(fake_subdir.parent, ignore_errors=False, onerror=None)\n\n\nif __name__=='__main__':\n\ttest_make_param()\n\ttest_make_swarms()\n\ttest_make_markerfiles()\n\ttest_clean_beh()\n\ttest_masterbeh()\n\n\n#celebrate\nprint('All tests passed! Woo!!')\n", "id": "6874405", "language": "Python", "matching_score": 3.463259220123291, "max_stars_count": 0, "path": "tests/test_MID_proc_.py" }, { "content": "#CW 5.8.2020\n#output of these functions include marker files (for marker placement in MEG file processing) and behavioral data results (adds them to master spreadsheet)\n\nimport sys,os\nimport csv\nimport numpy as np\nimport pandas as pd\nimport statistics\n\n\n##############################\n#STEP 1: MAKE MARKER TXT FILES\n#MEG preprocessing step 1 must be run first so that the cue files exist\ndef make_markerfiles_MID(subjectlist,subdir_base):\n\t'''Makes marker text files for each subject in input list and puts them in subject's meg folder.'''\t\n\tsublist=open(subjectlist, 'r')\n\tproclist=sublist.readlines()\n\tproclist=map(lambda x: x.strip(), proclist)\n\t#loop through all subjects in input list begins here!\n\tfor subject in proclist:\n\t\tsubdir=f'{subdir_base}/sub-{subject}'\n\t\t#pull data from cue_marks file\n\t\tstimtime=pd.read_csv(subdir + '/meg/cue_marks',delimiter=' ',names=['Onset','Time'])\n\n\t\t#read MID behavior file\n\t\tdata_read=pd.read_csv(subdir + '/behavior/MID1-' + subject + '-1_behavior.txt',delimiter='\\t',skiprows=1)\n\n\t\t#pull data from cue file and behavior file to make a stimtimes variables (includes all trial types)\n\t\ttrial=data_read[['Cue']]\n\t\tframes=[trial,stimtime]\n\t\tstimtimes_full=pd.concat(frames,axis=1)\n\n\t\t#from stimtimes_full, pulls out win stim times and adds to a matrix (df_#_win)\n\t\tstimtimes_win=stimtimes_full[stimtimes_full.Cue=='Win2']\n\t\tdf_win=stimtimes_win[stimtimes_win.columns[1:3]]\n\n\t\t#repeats the above for lose stims\n\t\tstimtimes_lose=stimtimes_full[stimtimes_full.Cue=='Lose2']\n\t\tdf_lose=stimtimes_lose[stimtimes_lose.columns[1:3]]\n\n\t\t#repeats the above for control stims\n\t\tstimtimes_cont=stimtimes_full[stimtimes_full.Cue=='Control']\n\t\tdf_cont=stimtimes_cont[stimtimes_cont.columns[1:3]]\n\n\t\t#adds win, lose, and control stim time matrices each to its own text file in each subjects meg folder; these text files are used in MEG preprocessing step 2 to add trial type stim markers\n\t\tdf_win.to_csv(subdir + '/meg/' + subject + '_win.txt',index=False,header=False,sep=' ')\n\t\tdf_lose.to_csv(subdir + '/meg/' + subject + '_lose.txt',index=False,header=False,sep=' ')\n\t\tdf_cont.to_csv(subdir + '/meg/' + subject + '_cont.txt',index=False,header=False,sep=' ')\n\n\n#####################################################\n#STEP 2: CLEAN MID BEHAVIORAL DATA AND OUTPUT DESCRIPTIVES\ndef clean_beh_MID(subjectlist,subdir_base,outputdir):\n\t'''Makes cleaned behavior file and descriptives file (mean reaction time and accuracy by win/lose/control trial type) for each subject and puts it in their beh folder and a group behavior files folder'''\n\tsublist=open(subjectlist, 'r')\n\tprocessinglist=sublist.readlines()\n\tprocessinglist=map(lambda x: x.strip(), processinglist)\n\t#initiate aggregate df for later\n\tdf_all=[]\n\t#loop through all subjects in input list begins here!\n\tfor subject in processinglist:\n\t\tsubdir=f'{subdir_base}/sub-{subject}'\n\t\t#read in subject's behavior data file\n\t\tdata_read=pd.read_csv(subdir + '/behavior/MID1-' + subject + '-1_behavior.txt',delimiter='\\t',skiprows=1)\n\t\tbehavior=data_read[['Cue','ResponseType','Target.RT']]\n\n\t\t#assign win/loss/control hits and misses to variables respectively\n\t\tbehavior_win=behavior[behavior.Cue=='Win2']\n\t\tbehavior_win_hit=behavior_win[behavior_win.ResponseType=='GoodResponse']\n\t\tbehavior_win_miss=behavior_win[behavior_win.ResponseType=='NoResponse']\n\n\t\tbehavior_lose=behavior[behavior.Cue=='Lose2']\n\t\tbehavior_lose_hit=behavior_lose[behavior_lose.ResponseType=='GoodResponse']\n\t\tbehavior_lose_miss=behavior_lose[behavior_lose.ResponseType=='NoResponse']\n\n\t\tbehavior_cont=behavior[behavior.Cue=='Control']\n\t\tbehavior_cont_hit=behavior_cont[behavior_cont.ResponseType=='GoodResponse']\n\t\tbehavior_cont_miss=behavior_cont[behavior_cont.ResponseType=='NoResponse']\n\n\t\t#calculate reaction times and accuracy for each trial type and assign to variables respectively\n\t\twin_hit_RT=behavior_win_hit['Target.RT'][behavior_win_hit['Target.RT']!=0].mean()\n\t\twin_hit_acc=behavior_win_hit.count()\n\t\twin_miss_RT=behavior_win_miss['Target.RT'][behavior_win_miss['Target.RT']!=0].mean()\n\t\twin_miss_acc=behavior_win_miss.count()\n\t\tlose_hit_RT=behavior_lose_hit['Target.RT'][behavior_lose_hit['Target.RT']!=0].mean()\n\t\tlose_hit_acc=behavior_lose_hit.count()\n\t\tlose_miss_RT=behavior_lose_miss['Target.RT'][behavior_lose_miss['Target.RT']!=0].mean()\n\t\tlose_miss_acc=behavior_lose_miss.count()\n\t\tcont_hit_RT=behavior_cont_hit['Target.RT'][behavior_cont_hit['Target.RT']!=0].mean()\n\t\tcont_hit_acc=behavior_cont_hit.count()\n\t\tcont_miss_RT=behavior_cont_miss['Target.RT'][behavior_cont_miss['Target.RT']!=0].mean()\n\t\tcont_miss_acc=behavior_cont_miss.count()\n\n\t\t#create dataframe for holding these variables; Type = type of trial (win, lose, control), Outcome = outcome of trial (hit, miss), RT = mean reaction times for each trial type/outcome, Acc = mean accuracy for each trial type/outcome\n\t\td={'Type':['Win','Win','Lose','Lose','Cont','Cont'],\n\t\t 'Outcome':['Hit','Miss','Hit','Miss','Hit','Miss'],\n\t\t 'RT':[win_hit_RT,win_miss_RT,lose_hit_RT,lose_miss_RT,cont_hit_RT,cont_miss_RT],\n\t\t 'Acc':[win_hit_acc.Cue,win_miss_acc.Cue,lose_hit_acc.Cue,lose_miss_acc.Cue,cont_hit_acc.Cue,cont_miss_acc.Cue]}\n\t\tdf=pd.DataFrame(d)\n\n\t\t#put dataframe from above into a csv (in case we want this data at some point)\n\t\tdf.to_csv(subdir + '/behavior/' + subject + '_behavior_MID.csv',index=False,sep=' ')\n\n\t\t#calculate mean RTs for each trial type (collapsing outcome)\n\t\twin_RT=behavior_win['Target.RT'][behavior_win['Target.RT']!=0].mean()\n\t\tlose_RT=behavior_lose['Target.RT'][behavior_lose['Target.RT']!=0].mean()\n\t\tcont_RT=behavior_cont['Target.RT'][behavior_cont['Target.RT']!=0].mean()\n\n\t\t#calculate average accuracy for each trial type (collapsing outcome)\n\t\twin_acc=win_hit_acc.Cue/26\n\t\tlose_acc=lose_hit_acc.Cue/26\n\t\tcont_acc=cont_hit_acc.Cue/26\n\n\t\t#make new dataframe for collapsed data\n\t\td_2={'Type':['Win','Lose','Cont'],\n\t\t 'RT':[win_RT,lose_RT,cont_RT],'Acc':[win_acc,lose_acc,cont_acc]}\n\t\tdf_2=pd.DataFrame(d_2)\n\n\t\t#put dataframe from above into a csv in subject's behavior directory (this is the data we want to analyze for now)\n\t\tdf_2.to_csv(subdir + '/behavior/' + subject + '_behavior_MID_bytrial.csv',index=False,sep=' ')\n\n\t\t#add each subject's collapsed data into one csv with their subject number\n\t\tdf_2['Subject']=subject\n\t\tdf_all.append(df_2)\n\tdf_all=pd.concat(df_all)\n\tdf_all.to_csv(f'{outputdir}/all_behavior_MID_bytrial.csv',index=False,sep=' ')\n\nif __name__=='__main__':\n\tmake_markerfiles_MID()\t\n\tclean_beh_MID()\n", "id": "1082650", "language": "Python", "matching_score": 1.7032057046890259, "max_stars_count": 0, "path": "MID_proc/MID_beh.py" }, { "content": "'''Tools for processing MID MEG and behavioral data!'''\n\nfrom .MID_beh import make_markerfiles_MID,clean_beh_MID\nfrom .make_paramfiles import make_param\nfrom .make_swarms import make_swarm_newDs,make_swarm_sam\n", "id": "1307185", "language": "Python", "matching_score": 0.9702586531639099, "max_stars_count": 0, "path": "MID_proc/__init__.py" }, { "content": "from setuptools import setup, find_packages\n \nwith open('README.md','r') as yeet:\n\tlong_description=yeet.read()\n\nsetup(\n name = 'MID_proc',\n author = '<NAME>',\n version = '0.2.0',\n\tauthor_email='<EMAIL>',\n description = 'Some tools for processing MID behavioral and MEG data',\n\tlong_description=long_description,\n\tlong_description_content_type='text/markdown',\n\turl='https://github.com/cwusinich/project_spring_2020',\n license = 'Apache',\n packages = find_packages(),\n\tpython_requires='>=3.6'\n )\n\n\n", "id": "8290368", "language": "Python", "matching_score": 0.3783840537071228, "max_stars_count": 0, "path": "setup.py" } ]
1.350923
Serene-Arc
[ { "content": "BAK_VERSION = \"0.1.1a1\"\n", "id": "7533664", "language": "Python", "matching_score": 0.8911349177360535, "max_stars_count": 0, "path": "bak/__init__.py" }, { "content": "import os\nimport sqlite3\nfrom datetime import datetime\nfrom pathlib import Path\nfrom shutil import copy2\nfrom subprocess import call\nfrom sys import stderr, stdout\nfrom typing import List, Optional\nfrom warnings import warn\n\nimport click\nfrom config import Config\nfrom rich import box\nfrom rich.color import Color\nfrom rich.console import Console\nfrom rich.style import Style\nfrom rich.table import Table\n\nfrom bak.data import bak_db, bakfile\n\n# TODO: customizable file extension\n\ntry:\n data_dir = Path(os.environ[\"XDG_DATA_HOME\"]).expanduser().resolve()\nexcept KeyError:\n data_dir = Path(\"~/.local/share\").expanduser().resolve()\ntry:\n config_dir = Path(os.environ[\"XDG_CONFIG_HOME\"]).expanduser().resolve()\nexcept KeyError:\n config_dir = Path(\"~/.config\").expanduser().resolve()\n\nconfig_file = config_dir / 'bak.cfg'\ncfg = Config(str(config_file))\n\nbak_dir = cfg['bakfile_location'] or data_dir / 'bak' / 'bakfiles'\nbak_db_loc = cfg['bak_database_location'] or data_dir / 'bak' / 'bak.db'\n\nbak_list_relpaths = cfg['bak_list_relative_paths']\n\nif not bak_dir.exists():\n bak_dir.mkdir(parents=True)\n\ndb_handler = bak_db.BakDBHandler(bak_db_loc)\n\n\ndef _assemble_bakfile(filename: Path):\n time_now = datetime.now()\n bakfile_name = \"\".join(\n [\"-\".join(i for i in filename.parent.parts[1:])\n + '-' + filename.name, \".\", '-'.join(str(time_now.timestamp()).split('.')), \".bak\"]).replace(\" \", \"-\")\n bakfile_path = bak_dir / bakfile_name\n\n new_bak_entry = bakfile.BakFile(filename.name,\n filename,\n bakfile_path,\n time_now,\n time_now)\n return new_bak_entry\n\n\ndefault_select_prompt = (\"Enter a number, or: (V)iew (D)iff (C)ancel\", 'C')\n\n\ndef _get_bakfile_entry(filename: Path,\n select_prompt=default_select_prompt,\n err=True):\n entries = db_handler.get_bakfile_entries(filename)\n if not entries:\n return None\n # If there's only one bakfile corresponding to filename, return that.\n # If there's more than one, disambiguate.\n return entries[0] if len(entries) == 1 else \\\n _do_select_bakfile(entries, select_prompt, err)\n\n\ndef _do_select_bakfile(bakfiles: List[bakfile.BakFile],\n select_prompt=default_select_prompt,\n err=True):\n console = Console(file=stderr if err else stdout)\n console.print(\n f\"Found {len(bakfiles)} bakfiles for file: {bakfiles[0].orig_abspath}\")\n console.print(\"Please select from the following: \")\n _range = range(len(bakfiles))\n for i in _range:\n console.print(\n f\"{i + 1}: .bakfile last modified at {bakfiles[i].date_modified.split('.')[0]}\")\n\n def get_choice():\n return click.prompt(*select_prompt, err=err).lower()\n\n choice = get_choice()\n\n while True:\n if choice == \"c\":\n console.print(\"Cancelled.\")\n return False\n else:\n view = False\n try:\n if choice == \"v\":\n idx = int(click.prompt(\n \"View which .bakfile?\", err=err)) - 1\n view = True\n elif choice == \"d\":\n idx = int(click.prompt(\n \"Diff which .bakfile?\", err=err)) - 1\n bak_diff_cmd(bakfiles[idx])\n choice = get_choice()\n continue\n elif choice == \"l\":\n show_bak_list(bakfiles[0].orig_abspath)\n choice = get_choice()\n continue\n else:\n idx = int(choice) - 1\n if idx not in _range:\n console.print(\"Invalid selection. Aborting.\")\n return False\n elif view:\n bak_print_cmd(bakfiles[idx])\n choice = get_choice()\n continue\n else:\n return bakfiles[idx]\n except (ValueError, TypeError) as error:\n warn(error)\n console.print(\"Invalid input. Aborting.\")\n return False\n get_choice()\n\n\ndef show_bak_list(filename: Optional[Path] = None,\n relative_paths: bool = False):\n \"\"\" Prints list of .bakfiles with metadata\n\n Arguments:\n filename (str|os.path, optional):\n List only `filename`'s .bakfiles\n \"\"\"\n # pass\n bakfiles: List[bakfile.BakFile]\n bakfiles = db_handler.get_bakfile_entries(filename) if filename else \\\n db_handler.get_all_entries()\n\n console = Console()\n if not bakfiles:\n console.print(f\"No .bakfiles found for \"\n f\"{filename}\" if\n filename else \"No .bakfiles found\")\n return\n\n _title = f\".bakfiles of {filename}\" if \\\n filename else \".bakfiles\"\n\n table = Table(title=_title,\n show_lines=True, box=box.HEAVY_EDGE)\n\n table.add_column(\"\")\n table.add_column(\"Original File\")\n table.add_column(\"Date Created\")\n table.add_column(\"Last Modified\")\n\n i = 1\n for _bakfile in bakfiles:\n table.add_row(str(i),\n filename.relative_to(Path.cwd()) if\n relative_paths else\n _bakfile.orig_abspath,\n _bakfile.date_created.split('.')[0],\n _bakfile.date_modified.split('.')[0])\n i += 1\n\n console.print(table)\n\n\ndef create_bakfile(filename: Path):\n \"\"\" Default command. Roughly equivalent to\n cp filename $XDG_DATA_DIR/.bakfiles/filename.bak\n but inserts relevant metadata into the database.\n\n Arguments:\n filename: (str|os.path)\n \"\"\"\n if not filename.exists():\n # TODO descriptive failure\n return False\n new_bakfile = _assemble_bakfile(filename)\n copy2(new_bakfile.orig_abspath, new_bakfile.bakfile_loc)\n db_handler.create_bakfile_entry(new_bakfile)\n\n\ndef bak_up_cmd(filename: Path):\n \"\"\" Overwrite an existing .bakfile with the file's current contents\n\n Args:\n filename (str|os.path)\n \"\"\"\n # Return Truthy things for failures that echo their own output,\n # false for nonspecific or generic failures.\n # Put differently, False is for complete failures. If this function\n # handles a failure gracefully, it should return True.\n\n console = Console()\n\n old_bakfile = db_handler.get_bakfile_entries(filename)\n if old_bakfile is None:\n console.print(f\"No bakfile found for {filename}\")\n console.print(f\"Creating {filename}.bak\")\n return create_bakfile(filename)\n\n # Disambiguate\n old_bakfile = old_bakfile[0] if len(old_bakfile) == 1 else \\\n _do_select_bakfile(old_bakfile,\n select_prompt=(\n (\"Enter a number to overwrite a .bakfile, or:\\n(V)iew (L)ist (C)ancel\", \"C\")))\n\n if old_bakfile is None:\n console.print(\"Cancelled.\")\n return True\n elif not isinstance(old_bakfile, bakfile.BakFile):\n return False\n\n old_bakfile.date_modified = datetime.now()\n copy2(old_bakfile.original_file, old_bakfile.bakfile_loc)\n db_handler.update_bakfile_entry(old_bakfile)\n return True\n\ndef _sudo_bak_down_helper(src, dest):\n # TODO spin this off into a separate exec for sanity\n click.echo(f\"The destination {dest} is privileged. Falling back on 'sudo cp'\")\n call(f\"sudo cp {src} {dest}\".split(\" \"))\n\ndef bak_down_cmd(filename: Path,\n destination: Optional[Path],\n keep_bakfile: bool = False,\n quiet: bool = False):\n \"\"\" Restore `filename` from .bakfile. Prompts if ambiguous (such as\n when there are multiple .bakfiles of `filename`)\n\n Args:\n filename (str|Path)\n keep_bakfile (bool): If False, .bakfile is deleted (default: False)\n quiet (bool): If True, does not ask user to confirm\n destination (None|Path): destination path to restore to\n \"\"\"\n console = Console()\n bakfile_entries = db_handler.get_bakfile_entries(filename)\n if not bakfile_entries:\n console.print(f\"No bakfiles found for {filename}\")\n return\n\n bakfile_entry = _do_select_bakfile(bakfile_entries) if len(\n bakfile_entries) > 1 else bakfile_entries[0]\n\n if bakfile_entry is None:\n console.print(f\"No bakfiles found for {filename}\")\n return\n elif not bakfile_entry:\n return\n if not destination:\n destination = Path(bakfile_entry.orig_abspath).expanduser()\n\n if quiet:\n confirm = 'y'\n else:\n if destination != bakfile_entry.orig_abspath:\n if destination.exists():\n confirm = click.confirm(f\"Overwrite {destination}?\")\n \n confirm_prompt = f\"Confirm: Restore {filename} to {destination} and erase bakfiles?\" \\\n if not keep_bakfile else \\\n f\"Confirm: Restore {filename} to {destination} and keep bakfiles?\"\n confirm = click.confirm(confirm_prompt, default=False)\n if not confirm:\n console.print(\"Cancelled.\")\n return\n \n try:\n copy2(bakfile_entry.bakfile_loc, destination)\n except PermissionError:\n _sudo_bak_down_helper(bakfile_entry.bakfile_loc, destination)\n if not keep_bakfile:\n for entry in bakfile_entries:\n Path(entry.bakfile_loc).unlink(missing_ok=True)\n db_handler.del_bakfile_entry(entry)\n\n\ndef __remove_bakfiles(bakfile_entries):\n for entry in bakfile_entries:\n Path(entry.bakfile_loc).unlink()\n db_handler.del_bakfile_entry(entry)\n\n\ndef bak_off_cmd(filename: Optional[Path],\n quietly=False):\n \"\"\" Used when finished. Deletes `filename.bak`. Prompts if ambiguous:\n 3 .bakfiles detected:\n 1. filename.bak | <metadata>\n 2. filename.bak.1 | <metadata>\n 3. filename.bak.2 | <metadata>\n Delete:\n (A)ll, (1,2,3), (N)one, (C)ancel\n NOTE: that output isn't implemented yet, but it does offer decent\n options when disambiguation is required\n Args:\n filename ([type], optional): [description]. Defaults to None.\n \"\"\"\n console = Console()\n bakfiles = db_handler.get_bakfile_entries(filename)\n if not bakfiles:\n console.print(f\"No bakfiles found for {filename}\")\n return False\n confirm = input(\n f\"Confirming: Remove {len(bakfiles)} .bakfiles for {filename}? \"\n f\"(y/N) \").lower() == 'y' if not quietly else True\n if confirm:\n __remove_bakfiles(db_handler.get_bakfile_entries(filename))\n return True\n else:\n return False\n\n\ndef bak_print_cmd(bak_to_print: (str, bakfile.BakFile),\n using: (str, None) = None):\n # if this thing is given a string, it needs to go find\n # a corresponding bakfile\n console = Console()\n\n if not isinstance(bak_to_print, bakfile.BakFile):\n _bak_to_print = _get_bakfile_entry(bak_to_print,\n select_prompt=(\n \"Enter a number to select a .bakfile, or:\\n(D)iff (L)ist (C)ancel\",\n \"C\"))\n if _bak_to_print is None:\n console.print(\n f\"No bakfiles found for {Path(bak_to_print).resolve()}\")\n else:\n bak_to_print = _bak_to_print\n if not isinstance(bak_to_print, bakfile.BakFile):\n return # _get_bakfile_entry() handles failures, so just exit here\n pager = using if using else \\\n (cfg['bak_open_exec'] or os.environ['PAGER']) or 'less'\n pager = pager.strip('\"').strip(\"'\").split(\" \")\n call(pager + [bak_to_print.bakfile_loc])\n\n\ndef bak_getfile_cmd(bak_to_get: (str, bakfile.BakFile)):\n console = Console(file=stderr)\n\n if not isinstance(bak_to_get, bakfile.BakFile):\n filename = bak_to_get\n bak_to_get = _get_bakfile_entry(bak_to_get, err=True)\n if bak_to_get is None:\n console.print(f\"No bakfiles found for {Path(filename).resolve()}\")\n return # _get_bakfile_entry() handles failures, so just exit\n print(bak_to_get.bakfile_loc)\n\n\ndef bak_diff_cmd(filename: (bakfile.BakFile, Path), command='diff'):\n # TODO write tests for this (mildly tricky)\n console = Console()\n\n bak_to_diff = filename if isinstance(filename, bakfile.BakFile) else \\\n _get_bakfile_entry(filename,\n select_prompt=(\n (\"Enter a number to diff a .bakfile, or:\\n(V)iew (L)ist (C)ancel\", \"C\")))\n if not command:\n command = cfg['bak_diff_exec'] or 'diff'\n if bak_to_diff is None:\n console.print(f\"No bakfiles found for {filename}\")\n return\n if not bak_to_diff:\n return\n command = command.strip('\"').strip(\"'\").split(\" \")\n call(command +\n [bak_to_diff.bakfile_loc, bak_to_diff.orig_abspath])\n", "id": "3414847", "language": "Python", "matching_score": 2.23848557472229, "max_stars_count": 0, "path": "bak/commands/__init__.py" }, { "content": "#!/usr/bin/env python3\n\nimport argparse\nimport configparser\nimport logging\nimport multiprocessing\nimport os\nimport pathlib\nimport random\nimport sys\nimport xml.etree.ElementTree as et\n\nfrom tqdm import tqdm\n\nimport podcastdownloader.episode as episode\nimport podcastdownloader.writer as writer\nfrom podcastdownloader.exceptions import EpisodeException, FeedException\nfrom podcastdownloader.feed import Feed\nfrom podcastdownloader.tagengine import writeTags\n\nparser = argparse.ArgumentParser()\n\n\nif __name__ == \"__main__\":\n\n parser.add_argument('destination', help='directory to store downloads')\n parser.add_argument('-f', '--feed', action='append', help='feed to download')\n parser.add_argument('--file', action='append', help='location of a file of feeds')\n parser.add_argument('-o', '--opml', action='append', help='location of an OPML file to load')\n parser.add_argument('-t', '--threads', type=int, default=10, help='number of concurrent downloads')\n parser.add_argument('-l', '--limit', type=int, default=-1, help='number of episodes to download from each feed')\n parser.add_argument(\n '-w', '--write-list',\n choices=['none', 'audacious', 'text', 'm3u'],\n default='none',\n help='flag to write episode list')\n parser.add_argument('-s', '--suppress-progress', action='store_true')\n parser.add_argument('-v', '--verbose', action='count', default=0, help='increase the verbosity')\n parser.add_argument('--max-attempts', type=int, help='maximum nuimber of attempts to download file')\n download_alternates = parser.add_mutually_exclusive_group()\n download_alternates.add_argument('--skip-download', action='store_true', help='skips the download of episodes')\n download_alternates.add_argument('--verify', action='store_true', help='verify all downloaded files')\n download_alternates.add_argument(\n '--update-tags',\n action='store_true',\n help='download and apply tags to existing files only')\n parser.add_argument('-m', '--max-downloads', type=int, default=0,\n help='maximum number of total episodes to download')\n parser.add_argument('--log', help='log to specified file')\n\n args = parser.parse_args()\n\n if args.file:\n args.file = [pathlib.Path(file).resolve() for file in args.file]\n if args.opml:\n args.opml = [pathlib.Path(file).resolve() for file in args.opml]\n args.destination = pathlib.Path(args.destination).resolve()\n\n logger = logging.getLogger()\n logger.setLevel(1)\n stream_handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] - %(message)s')\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n logging.getLogger('urllib3').setLevel(logging.CRITICAL)\n\n if args.log:\n file_handler = logging.FileHandler(args.log)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n if args.verbose == 0:\n stream_handler.setLevel(logging.INFO)\n elif args.verbose == 1:\n stream_handler.setLevel(logging.DEBUG)\n elif args.verbose >= 2:\n stream_handler.setLevel(9)\n\n if args.max_attempts:\n episode.max_attempts = args.max_attempts\n\n subscribedFeeds = []\n\n if args.opml:\n for opml_loc in args.opml:\n opml_tree = et.parse(pathlib.Path(opml_loc))\n for opml_feed in opml_tree.getroot().iter('outline'):\n subscribedFeeds.append(Feed(opml_feed.attrib['xmlUrl']))\n logger.debug('Feed {} added'.format(opml_feed.attrib['xmlUrl']))\n\n if args.feed:\n for arg_feed in args.feed:\n subscribedFeeds.append(Feed(arg_feed))\n logger.debug('Feed {} added'.format(arg_feed))\n\n if args.file:\n for feed_file in args.file:\n with open(pathlib.Path(feed_file), 'r') as file:\n for line in file.readlines():\n if line != '\\n' and not line.strip().startswith('#'):\n parsed_line = line.split(' #')[0].strip()\n subscribedFeeds.append(Feed(parsed_line))\n logger.debug('Feed {} added'.format(line.strip()))\n\n episode_queue = []\n existingFiles = []\n\n logger.info('{} feeds to be downloaded'.format(len(subscribedFeeds)))\n\n logger.info('Scanning existing files...')\n for (dirpath, dirnames, filenames) in os.walk(args.destination):\n existingFiles.extend([str(pathlib.PurePath(dirpath, filename)) for filename in filenames])\n\n def readyFeed(in_feed: Feed) -> Feed:\n try:\n logger.debug('Attempting to download feed {}'.format(in_feed.url))\n in_feed.fetchRSS()\n in_feed.extractEpisodes(args.limit)\n logger.debug('Feed {} downloaded'.format(in_feed.title))\n in_feed.feed = None\n\n except (FeedException, KeyError) as e:\n logger.error('Feed {} could not be parsed: {}'.format(in_feed.url, e))\n return None\n\n return in_feed\n\n def fillEpisode(ep: episode.Episode) -> episode.Episode:\n try:\n ep.parseRSSEntry()\n ep.calcPath(args.destination)\n logger.log(9, 'Episode {} parsed'.format(ep.title))\n\n if str(ep.path) in existingFiles:\n ep.status = episode.Status.downloaded\n\n except EpisodeException as e:\n logger.error('{} in podcast {} failed: {}'.format(ep.title, ep.podcast, e))\n return ep\n\n def downloadEpisode(ep: episode.Episode):\n try:\n ep.downloadContent()\n logger.debug('Episode {} downloaded from podcast {}'.format(ep.title, ep.podcast))\n try:\n writeTags(ep)\n except episode.EpisodeException as e:\n logger.warning('Tags could not be written to {} in podcast {}: {}'.format(ep.title, ep.podcast, e))\n except episode.EpisodeException as e:\n logger.error('{} failed to download: {}'.format(ep.title, e))\n\n def check_episode(ep: episode.Episode) -> episode.Episode:\n try:\n ep.verifyDownload()\n except KeyError:\n logger.error('Episode {} in podcast {} could not be checked'.format(ep.title, ep.podcast))\n return ep\n\n pool = multiprocessing.Pool(args.threads)\n\n # randomise the feed list, just so there's less chance of a slow group\n random.shuffle(subscribedFeeds)\n\n logger.info('Updating feeds...')\n\n subscribedFeeds = list(\n tqdm(pool.imap_unordered(\n readyFeed,\n subscribedFeeds),\n total=len(subscribedFeeds),\n disable=args.suppress_progress))\n subscribedFeeds = list(filter(None, subscribedFeeds))\n\n logger.info('Parsing episodes...')\n\n for feed in tqdm(subscribedFeeds, disable=args.suppress_progress):\n feed.makeDirectory(args.destination)\n feed.feed_episodes = list(pool.imap(fillEpisode, feed.feed_episodes))\n writer.writeEpisode(feed, args.write_list)\n episode_queue.extend([ep for ep in feed.feed_episodes])\n\n logger.info('{} episodes missing from archive'.format(\n len(list(filter(lambda e: e.status == episode.Status.pending, episode_queue)))))\n if args.verify:\n episode_queue = list(filter(lambda e: e.status == episode.Status.downloaded, episode_queue))\n logger.info('Commencing offline cache verification')\n random.shuffle(episode_queue)\n\n checked_episodes = list(\n tqdm(pool.imap_unordered(\n check_episode,\n episode_queue),\n total=len(episode_queue),\n disable=args.suppress_progress))\n\n with open('output.txt', 'w') as file:\n for ep in filter(lambda e: e.status == episode.Status.corrupted, checked_episodes):\n logger.error(\n 'Episode {} in podcast {} has a mismatched filesize, presumed corrupted'.format(\n ep.title, ep.podcast))\n file.write(str(ep.path) + '\\n')\n\n elif args.skip_download:\n episode_queue = list(filter(lambda e: e.status == episode.Status.pending, episode_queue))\n for ep in episode_queue:\n logger.info('Skipping download for episode {} in podcast {}'.format(ep.title, ep.podcast))\n\n elif args.update_tags:\n episode_queue = list(filter(lambda e: e.status == episode.Status.downloaded, episode_queue))\n logger.info('Writing tags to {} files'.format(len(episode_queue)))\n\n checked_episodes = list(\n tqdm(pool.imap_unordered(\n writeTags,\n episode_queue),\n total=len(episode_queue),\n disable=args.suppress_progress))\n\n else:\n episode_queue = list(filter(lambda e: e.status == episode.Status.pending, episode_queue))\n if args.max_downloads > 0:\n logger.info('Reducing number of downloads to a maximum of {}'.format(args.max_downloads))\n episode_queue = episode_queue[:args.max_downloads]\n\n # randomise the list, if all the episodes from one server are close\n # together, then the server will start cutting off downloads. this should\n # limit/prevent that as much as possible to keep the average speed high\n random.shuffle(episode_queue)\n\n list(tqdm(pool.imap_unordered(\n downloadEpisode,\n episode_queue),\n total=len(episode_queue),\n disable=args.suppress_progress))\n\n pool.close()\n pool.join()\n", "id": "6015046", "language": "Python", "matching_score": 4.852226257324219, "max_stars_count": 10, "path": "podcastdownloader/__main__.py" }, { "content": "#!/usr/bin/env python3\n\n'''Class for feeds'''\n\nimport os\nimport pathlib\nimport ssl\nimport time\n\nimport feedparser\nimport requests\nimport requests.exceptions\n\nfrom podcastdownloader.episode import Episode, Status, max_attempts\nfrom podcastdownloader.exceptions import FeedException\n\n\ndef _rate_limited_request(url: str) -> requests.Response:\n url = url.strip()\n attempts = 1\n while True:\n try:\n response = requests.get(url, timeout=180, allow_redirects=True)\n return response\n except (requests.exceptions.RequestException, ssl.SSLError) as e:\n # 3 is a magic number\n if attempts > 3:\n raise FeedException('Failed to get feed {}; connection was limited/refused: {}'.format(url, e))\n time.sleep(30 * attempts)\n attempts += 1\n\n\nclass Feed:\n def __init__(self, url: str):\n self.url = url\n self.feed_episodes = []\n self.downloaded_episodes = []\n\n def __download_rss(self):\n response = _rate_limited_request(self.url)\n self.feed = response.content\n\n def fetchRSS(self):\n self.__download_rss()\n self.feed = feedparser.parse(self.feed)\n self.title = self.feed['feed']['title'].encode('utf-8').decode('ascii', 'ignore')\n\n def extractEpisodes(self, episode_limit: int):\n if episode_limit == -1:\n episode_limit = len(self.feed['entries'])\n for entry in self.feed['entries'][:episode_limit]:\n self.feed_episodes.append(Episode(entry, self.title))\n\n def makeDirectory(self, destination: pathlib.Path):\n self.directory = pathlib.Path(destination, self.title)\n if not self.directory.exists():\n os.mkdir(self.directory)\n\n\nif __name__ == \"__main__\":\n import os\n import pathlib\n\n from podcastdownloader.tagengine import writeTags\n\n feed = Feed(input('Enter a feed URL: '))\n destination = input('Enter a destination location: ')\n\n print('Getting feed...')\n feed.fetchRSS()\n feed.makeDirectory(destination)\n feed.extractEpisodes(-1)\n\n existingFiles = []\n print('Scanning existing files...')\n for (dirpath, dirnames, filenames) in os.walk(destination):\n existingFiles.extend([str(pathlib.PurePath(dirpath, filename)) for filename in filenames])\n\n for ep in feed.feed_episodes:\n print('Parsing episode...')\n ep.parseRSSEntry()\n ep.calcPath(destination)\n if str(ep.path) in existingFiles:\n ep.status = Status.downloaded\n ep.verifyDownload()\n if ep.status == Status.pending:\n ep.downloadContent()\n writeTags(ep)\n", "id": "6803142", "language": "Python", "matching_score": 2.8755037784576416, "max_stars_count": 10, "path": "podcastdownloader/feed.py" }, { "content": "#!/usr/bin/env python3\n\nimport pytest\nfrom podcastdownloader.feed import Feed\n\n\[email protected]\ndef feed():\n return Feed('https://rss.art19.com/wecrashed')\n\n\ndef test_fetchRSS(feed):\n feed.fetchRSS()\n assert feed.title == 'WeCrashed: The Rise and Fall of WeWork'\n\n\ndef test_extractEpisodes(feed):\n feed.fetchRSS()\n feed.extractEpisodes(-1)\n assert len(feed.feed_episodes) == 9\n", "id": "11339921", "language": "Python", "matching_score": 0.1547200232744217, "max_stars_count": 10, "path": "podcastdownloader/tests/test_feed.py" }, { "content": "#!/usr/bin/env python3\n\n\nclass PodcastException(Exception):\n pass\n\n\nclass FeedException(PodcastException):\n pass\n\n\nclass EpisodeException(PodcastException):\n pass\n", "id": "9296757", "language": "Python", "matching_score": 0.19788676500320435, "max_stars_count": 10, "path": "podcastdownloader/exceptions.py" }, { "content": "#!/usr/bin/env python3\n\nimport os\nimport pathlib\nimport re\nimport ssl\nimport time\nfrom enum import Enum\nfrom typing import Dict, Optional\n\nimport mutagen\nimport mutagen.easyid3\nimport requests\nimport requests.exceptions\n\nfrom podcastdownloader.exceptions import EpisodeException\n\n\nclass Status(Enum):\n blank = 0\n pending = 1\n downloaded = 2\n corrupted = 3\n\n\nmax_attempts = 10\n\n\ndef _rate_limited_request(url: str, head_only: bool) -> requests.Response:\n url = url.strip()\n attempts = 1\n global max_attempts\n while True:\n try:\n if head_only:\n response = requests.head(url, timeout=180, allow_redirects=True)\n else:\n response = requests.get(url, timeout=180, allow_redirects=True)\n return response\n\n except (requests.exceptions.RequestException, ssl.SSLError) as e:\n if attempts > max_attempts:\n raise EpisodeException('Connection was limited/refused: {}'.format(e))\n time.sleep(30 * attempts)\n attempts += 1\n\n\nclass Episode:\n def __init__(self, feed_dict: Dict, podcast: str):\n self.feed_entry = feed_dict\n self.podcast = podcast\n self.status = Status.blank\n self.download_link = None\n self.size = None\n\n def parseRSSEntry(self):\n self.title = re.sub(r'(/|\\0)', '', self.feed_entry['title'])\n\n if 'links' in self.feed_entry:\n for link in self.feed_entry['links']:\n if 'type' in link and re.match('audio*', link['type']):\n self.download_link = link['href']\n self.file_type = link['type']\n break\n\n elif 'link' in self.feed_entry:\n self.download_link = self.feed_entry['link']\n self.file_type = None\n\n else:\n self.download_link = None\n\n if not self.download_link:\n raise EpisodeException(\n 'No download link found for episode {} in podcast {}'.format(\n self.title, self.podcast))\n\n if not self.file_type:\n r = _rate_limited_request(self.download_link, True)\n self.file_type = r.headers['content-type']\n r.close()\n\n self.status = Status.pending\n\n def calcPath(self, dest_folder: pathlib.Path):\n intended_path = pathlib.Path(dest_folder, self.podcast)\n self.path = None\n\n if self.file_type == 'audio/mp4' or self.file_type == 'audio/x-m4a':\n self.path = pathlib.Path(intended_path, self.title + '.m4a')\n elif self.file_type == 'audio/mpeg' or self.file_type == 'audio/mp3':\n self.path = pathlib.Path(intended_path, self.title + '.mp3')\n\n if self.path is None:\n raise EpisodeException('Cannot determine filename with codec {}'.format(self.file_type))\n\n def _get_download_size(self):\n r = _rate_limited_request(self.download_link, True)\n self.size = int(r.headers['content-length'])\n\n def verifyDownload(self):\n self._get_download_size()\n if self.path.exists():\n found_size = self.path.stat().st_size\n # set the tolerance as a percent of the filesize\n if abs(found_size - self.size) >= (self.size * 0.02):\n self.status = Status.corrupted\n\n def checkExistence(self):\n if os.path.exists(self.path) is True:\n self.status = Status.downloaded\n\n def downloadContent(self):\n content = _rate_limited_request(self.download_link, False).content\n\n with open(self.path, 'wb') as episode_file:\n episode_file.write(content)\n self.status = Status.downloaded\n", "id": "2162139", "language": "Python", "matching_score": 3.709940195083618, "max_stars_count": 10, "path": "podcastdownloader/episode.py" }, { "content": "#!/usr/bin/env python3\n\nimport pytest\nfrom podcastdownloader.episode import Episode, Status\n\n\[email protected]\ndef episode():\n feed_dict = {\n 'title': 'Bad /Test\\0 Title',\n 'links': [\n {'href': 'https://dts.podtrac.com/redirect.mp3/chtbl.com/track/9EE2G/pdst.fm/e/rss.art19.com/episodes/b58e6644-0b5d-492b-ad4a-a773bc701b81.mp3',\n 'length': '4690337',\n 'rel': 'enclosure',\n 'type': 'audio/mpeg'}]}\n return Episode(feed_dict, 'test podcast')\n\n\ndef test_parseRSS(episode):\n episode.parseRSSEntry()\n assert episode.title == 'Bad Test Title'\n assert episode.download_link == 'https://dts.podtrac.com/redirect.mp3/chtbl.com/track/9EE2G/pdst.fm/e/rss.art19.com/episodes/b58e6644-0b5d-492b-ad4a-a773bc701b81.mp3'\n assert episode.file_type == 'audio/mpeg'\n assert episode.status == Status.pending\n\n\ndef test_pathCalc(episode):\n episode.title = 'test title'\n episode.file_type = 'audio/mpeg'\n episode.calcPath('testdirectory')\n assert str(episode.path) == 'testdirectory/test podcast/test title.mp3'\n", "id": "12124473", "language": "Python", "matching_score": 1.4044159650802612, "max_stars_count": 10, "path": "podcastdownloader/tests/test_episode.py" }, { "content": "#!/usr/bin/env python3\n\nimport logging\nimport pathlib\nfrom datetime import datetime\nfrom time import mktime\n\nimport mutagen\nimport mutagen.id3\nimport mutagen.mp3\nimport mutagen.mp4\n\nfrom podcastdownloader.episode import Episode\n\nlogger = logging.getLogger(__name__)\n\n\ndef writeTags(episode: Episode):\n\n try:\n if episode.file_type is None:\n episode = _guessFileType(episode)\n except AttributeError:\n episode = _guessFileType(episode)\n\n if episode.file_type in ('audio/mpeg', 'audio/mp3'):\n _writeID3Tags(episode)\n elif episode.file_type in ('audio/mp4', 'audio/x-m4a'):\n _writeMP4Tags(episode)\n\n\ndef _guessFileType(episode: Episode):\n if str(episode.path).endswith('mp3'):\n episode.file_type = 'audio/mp3'\n return episode\n\n\ndef _writeID3Tags(episode: Episode):\n episode_tags = mutagen.id3.ID3FileType(episode.path, ID3=mutagen.id3.ID3)\n\n try:\n episode_tags.add_tags()\n except mutagen.MutagenError:\n pass\n\n episode_tags.tags.update_to_v24()\n tags = [\n ('TIT2', mutagen.id3.TIT2(encoding=3, text=episode.title)),\n ('TALB', mutagen.id3.TALB(encoding=3, text=episode.podcast)),\n ('TDOR', mutagen.id3.TDOR(encoding=3, text=datetime.fromtimestamp(\n mktime(episode.feed_entry['published_parsed'])).isoformat()))]\n\n if 'summary' in episode.feed_entry:\n tags.append(('TDES', mutagen.id3.TDES(encoding=3, text=episode.feed_entry['summary'])))\n elif 'subtitle' in episode.feed_entry:\n tags.append(('TDES', mutagen.id3.TDES(encoding=3, text=episode.feed_entry['subtitle'])))\n else:\n logger.debug(\n 'Could not add description tag for episode {} in podcast {}'.format(\n episode.title, episode.podcast))\n\n if 'itunes_episode' in episode.feed_entry:\n tags.append(('TRCK', mutagen.id3.TRCK(encoding=3, text=episode.feed_entry['itunes_episode'])))\n else:\n logger.debug(\n 'Could not add track number tag to episode {} in podcast {}'.format(\n episode.title, episode.podcast))\n\n for (tag, content) in tags:\n try:\n episode_tags[tag] = content\n logger.debug('Wrote tag {}'.format(tag))\n except Exception as e:\n logger.error('Mutagen had an error writing ID3 tag {}: {}'.format(tag, e))\n\n episode_tags.save(episode.path)\n\n\ndef _writeMP4Tags(episode: Episode):\n try:\n episode_tags = mutagen.mp4.MP4(episode.path)\n except mutagen.mp4.MP4StreamInfoError:\n logger.error('Thought {} was an MP4 file but it was not'.format(episode.path.name))\n return\n\n try:\n episode_tags.add_tags()\n except mutagen.MutagenError:\n pass\n\n tags = [(r'\\xa9nam', episode.title),\n (r'\\xa9alb', episode.podcast),\n (r'\\xa9day', datetime.fromtimestamp(\n mktime(episode.feed_entry['published_parsed'])).isoformat())]\n\n if 'summary' in episode.feed_entry:\n tags.append((r'desc', episode.feed_entry['summary']))\n elif 'subtitle' in episode.feed_entry:\n tags.append((r'desc', episode.feed_entry['subtitle']))\n else:\n logger.debug(\n 'Could not add description tag for episode {} in podcast {}'.format(\n episode.title, episode.podcast))\n\n if 'itunes_episode' in episode.feed_entry:\n tags.append((r'trkn', (int(episode.feed_entry['itunes_episode']), 0)))\n else:\n logger.debug(\n 'Could not add track number tag to episode {} in podcast {}'.format(\n episode.title, episode.podcast))\n\n for (tag, content) in tags:\n try:\n episode_tags[tag] = content\n except Exception as e:\n logger.error(\n 'Could not write tag {} with value {} to episode {} in podcast {}'.format(\n tag, content, episode.title, episode.podcast))\n\n episode_tags.save(episode.path)\n", "id": "76795", "language": "Python", "matching_score": 2.3431174755096436, "max_stars_count": 10, "path": "podcastdownloader/tagengine.py" }, { "content": "#!/usr/bin/env python3\n\nimport logging\nimport pathlib\n\nimport podcastdownloader.feed as feed\n\nlogger = logging.getLogger(__name__)\n\n\ndef __writeAudacious(feed: feed.Feed):\n with open(pathlib.Path(feed.directory, 'episode_playlist.audpl'), 'w') as file:\n file.write('title={}\\n'.format(feed.title).replace(' ', '%20'))\n for episode in reversed(feed.feed_episodes):\n try:\n file.write('uri=file://{}\\n'.format(episode.path).replace(' ', '%20'))\n file.write('title={}\\n'.format(episode.title).replace(' ', '%20'))\n except AttributeError as e:\n logger.warning('Could not write {} to playlist'.format(episode.title))\n\n\ndef __writeText(feed: feed.Feed):\n with open(pathlib.Path(feed.directory, 'episode_list.txt'), 'w') as file:\n for entry in reversed(feed.feed_episodes):\n file.write(entry.title + '\\n')\n\n\ndef __writeM3u(feed: feed.Feed):\n with open(pathlib.Path(feed.directory, 'episode_playlist.m3u'), 'w') as file:\n file.write('#EXTM3U\\n')\n for episode in reversed(feed.feed_episodes):\n try:\n file.write('./' + episode.path.name + '\\n')\n except AttributeError:\n logger.warning('Could not write {} to playlist'.format(episode.title))\n\n\ndef writeEpisode(feed: feed.Feed, write_choice: str):\n if write_choice == 'audacious':\n __writeAudacious(feed)\n elif write_choice == 'text':\n __writeText(feed)\n elif write_choice == 'm3u':\n __writeM3u(feed)\n", "id": "8028695", "language": "Python", "matching_score": 0.9010488986968994, "max_stars_count": 10, "path": "podcastdownloader/writer.py" } ]
1.821451
dorianignee
[ { "content": "import aoc\n\nclass Chiton:\n def __init__(self, risk):\n self.risk = int(risk)\n self.total_risk = 10000\n self.neighbors = []\n\n def find_way(self):\n global recalls\n for cell in self.neighbors:\n if cell.total_risk > (self.total_risk + cell.risk):\n cell.total_risk = self.total_risk + cell.risk\n recalls.add(cell)\n\n def __repr__(self):\n return str(self.risk)\n\n# initialize grid\ninput_grid = [[Chiton(digit) for digit in line] for line in aoc.lines(\"15_in.txt\")]\ngrid = [[None]*(len(input_grid[0])*5) for _ in range(len(input_grid)*5)]\nfor row_id in range(len(input_grid)):\n grid[row_id][0:len(input_grid[row_id])] = input_grid[row_id]\n\n# extend grid\n# - first column\nfor row_id in range(len(input_grid), len(grid)):\n for col_id in range(0, len(input_grid[0])):\n grid[row_id][col_id] = Chiton(grid[row_id - len(input_grid)][col_id].risk % 9 + 1)\n\n# - other columns\nfor row_id in range(0, len(grid)):\n for col_id in range(len(input_grid[0]), len(grid[row_id])):\n grid[row_id][col_id] = Chiton(grid[row_id][col_id - len(input_grid[0])].risk % 9 + 1)\n\naoc.neighbors(grid, with_diagonals = False)\n\n# special values for first cell\nfirst_cell = grid[0][0]\nfirst_cell.risk = 0\nfirst_cell.total_risk = 0\n\n# find a way\nrecalls = {first_cell}\nwhile len(recalls) > 0:\n current_queue = recalls.copy()\n recalls = set()\n for cell in current_queue:\n cell.find_way()\n\n# print result\nprint(grid[-1][-1].total_risk)\n", "id": "8769277", "language": "Python", "matching_score": 3.479276418685913, "max_stars_count": 0, "path": "2021/152.py" }, { "content": "import aoc\n\nclass Chiton:\n def __init__(self, risk):\n self.risk = int(risk)\n self.total_risk = 10000\n self.neighbors = []\n\n def find_way(self):\n global recalls\n for cell in self.neighbors:\n if cell.total_risk > (self.total_risk + cell.risk):\n cell.total_risk = self.total_risk + cell.risk\n recalls.add(cell)\n\n# initialize grid\ngrid = [[Chiton(digit) for digit in line] for line in aoc.lines(\"15_in.txt\")]\naoc.neighbors(grid, with_diagonals = False)\n\n# special values for first cell\nfirst_cell = grid[0][0]\nfirst_cell.risk = 0\nfirst_cell.total_risk = 0\n\n# find a way\nrecalls = {first_cell}\nwhile len(recalls) > 0:\n current_queue = recalls.copy()\n recalls = set()\n for cell in current_queue:\n cell.find_way()\n\n# print result\nprint(grid[-1][-1].total_risk)\n", "id": "521337", "language": "Python", "matching_score": 1.588470697402954, "max_stars_count": 0, "path": "2021/151.py" }, { "content": "import aoc\n\nclass Octo:\n def __init__(self, power):\n self.power = power\n self.flashed = False\n\n def flash(self):\n global flash_count\n if self.power > 9:\n self.power = 0\n self.flashed = True\n flash_count += 1\n for octo in [octo for octo in self.neighbors if octo.flashed == False]:\n octo.power += 1\n \ndef turn():\n global flash_count\n global total_flash_count\n \n for octo in aoc.flat_grid(grid):\n octo.flashed = False\n octo.power += 1\n \n flash_count = 1 \n while flash_count > 0:\n flash_count = 0\n for octo in aoc.flat_grid(grid):\n octo.flash()\n total_flash_count += flash_count\n \nlines = aoc.lines(\"11_in.txt\")\ngrid = [[Octo(int(power)) for power in line] for line in lines]\naoc.neighbors(grid)\ntotal_flash_count = 0\n\nfor _ in range(100):\n turn()\n\nprint(total_flash_count)\n", "id": "11854962", "language": "Python", "matching_score": 1.253087043762207, "max_stars_count": 0, "path": "2021/111.py" }, { "content": "import re\r\n\r\ndef read_raw(path):\r\n return \"\".join(open(path).readlines())\r\n\r\ndef ints(input):\r\n # return a list of ints being separated by non-numerical characters\r\n if input.endswith(\".txt\"):\r\n return ints(read_raw(input))\r\n else:\r\n return [int(num) for num in re.split(\"\\D+\", input) if num != '']\r\n\r\ndef lines(input):\r\n # return lines separated by \\n.\r\n if input.endswith(\".txt\"):\r\n return lines(read_raw(input))\r\n else:\r\n return input.split(\"\\n\")\r\n\r\ndef blocks(input):\r\n # return blocks separated by \\n\\n\r\n if input.endswith(\".txt\"):\r\n return blocks(read_raw(input))\r\n else:\r\n return input.split(\"\\n\\n\")\r\n\r\ndef neighbors(grid, with_diagonals = True):\r\n # assign neighbors to each element in the grid\r\n for y in range(len(grid)):\r\n for x in range(len(grid[y])):\r\n neighbors = []\r\n for ny in range(max(0, y-1), y+2):\r\n for nx in range(max(0, x-1), x+2):\r\n if not (x == nx and y == ny):\r\n if with_diagonals or x == nx or y == ny:\r\n try:\r\n neighbors.append(grid[ny][nx])\r\n except IndexError:\r\n pass\r\n grid[y][x].neighbors = neighbors\r\n\r\ndef flat_grid(grid):\r\n # return each element in 2-D grid\r\n return [item for row in grid for item in row]\r\n", "id": "62212", "language": "Python", "matching_score": 1.0602717399597168, "max_stars_count": 0, "path": "2021/aoc.py" }, { "content": "import aoc\n\n# read file\nraw_coords, raw_instr = aoc.blocks(\"13_in.txt\")\n\n# determine max x and y values\ncoords = [(aoc.ints(coord)) for coord in aoc.lines(raw_coords)]\nmax_x = max([x for x, y in coords])\nmax_y = max([y for x, y in coords])\n\n# setup grid\ngrid = [[False]*(max_x+1) for y in range(max_y+1)]\n\n# read True cells\nfor x, y in coords:\n grid[y][x] = True\n\n# read first instruction\ninstr = raw_instr.split(\"\\n\")[0].split(\" \")[2]\ndimension, value = instr.split(\"=\")\nvalue = int(value)\n\n# execute instruction\nif dimension == \"x\":\n # new width\n width = max(len(grid[0])-value, value)\n new_grid = [[False]*width for y in range(len(grid))]\n\n # go through all rows\n for row_id in range(len(grid)):\n # generate overlapping subrows\n sub1 = grid[row_id][:value]\n sub2 = grid[row_id][:value:-1]\n\n # swap if list 2 is larger than list 1\n if len(sub2) > len(sub1):\n temp = sub1\n sub1 = sub2\n sub2 = temp\n\n # OR together the contents of both lists\n for cell_id in range(-len(sub2),0):\n sub1[cell_id] = sub1[cell_id] or sub2[cell_id]\n\n # save new row in new_grid\n new_grid[row_id] = sub1\nelse:\n #new height\n height = max(len(grid)-value, value)\n new_grid = [[False]*len(grid[0]) for y in range(height)]\n\n # generate overlapping subgrids\n sub1 = grid[:value]\n sub2 = grid[:value:-1]\n\n # swap if list 2 is larger than list 1\n if len(sub2) > len(sub1):\n temp = sub1\n sub1 = sub2\n sub2 = temp\n\n new_grid = sub1[:]\n\n # OR together the contents of both lists\n for row_id in range(-len(sub2),0):\n new_grid[row_id] = [a or b for a, b in zip(sub1[row_id], sub2[row_id])]\n\ngrid = new_grid\n\n# print count of True cells\nprint(sum([len([cell for cell in row if cell]) for row in grid]))\n", "id": "11798911", "language": "Python", "matching_score": 1.6334329843521118, "max_stars_count": 0, "path": "2021/131.py" }, { "content": "import aoc\r\n\r\nclass Coords:\r\n def __init__(self, line):\r\n self.x1, self.y1, self.x2, self.y2 = aoc.ints(line)\r\n if self.x1 > self.x2:\r\n self.x1,self.x2=self.x2,self.x1\r\n if self.y1 > self.y2:\r\n self.y1,self.y2=self.y2,self.y1\r\n\r\n def __repr__(self):\r\n return f\"Coords [x1={self.x1},y1={self.y1},x2={self.x2},y2={self.y2}]\"\r\n\r\ndef main():\r\n global fields\r\n coords = [Coords(line) for line in aoc.lines(\"05_in.txt\")]\r\n coords = [c for c in coords if c.x1 == c.x2 or c.y1 == c.y2]\r\n\r\n fields = [[0]*1000 for _ in range(1000)]\r\n\r\n for c in coords:\r\n if c.x1 != c.x2:\r\n for x in range(c.x1, c.x2+1):\r\n fields[c.y1][x] += 1\r\n else:\r\n for y in range(c.y1, c.y2+1):\r\n fields[y][c.x1] += 1\r\n\r\n print(sum([1 for row in fields for cell in row if cell > 1]))\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "id": "228487", "language": "Python", "matching_score": 0.015046507120132446, "max_stars_count": 0, "path": "2021/051.py" }, { "content": "import aoc\n\nclass Bracket:\n def __init__(self, bracket):\n if bracket == \"(\":\n self.expected = \")\"\n elif bracket == \"[\":\n self.expected = \"]\"\n elif bracket == \"{\":\n self.expected = \"}\"\n elif bracket == \"<\":\n self.expected = \">\"\n else:\n print(f\"Bracket '{bracket}' unknown.\")\n\n def check(self, bracket):\n if self.expected == bracket:\n return 0\n elif bracket == \")\":\n return 3\n elif bracket == \"]\":\n return 57\n elif bracket == \"}\":\n return 1197\n elif bracket == \">\":\n return 25137\n else:\n print(f\"Bracket '{bracket}' unknown.\")\n\ndef check_line(line):\n stack = []\n for bracket in line:\n if bracket in \"([{<\":\n stack.append(Bracket(bracket))\n else:\n ans = stack.pop().check(bracket)\n if ans > 0:\n return ans\n return 0\n\nlines = aoc.lines(\"10_in.txt\")\nresult = 0\n\nfor line in lines:\n result += check_line(line)\n\nprint(result)\n", "id": "1055600", "language": "Python", "matching_score": 2.074465274810791, "max_stars_count": 0, "path": "2021/101.py" }, { "content": "import aoc\n\nclass Bracket:\n def __init__(self, bracket):\n if bracket == \"(\":\n self.expected = \")\"\n elif bracket == \"[\":\n self.expected = \"]\"\n elif bracket == \"{\":\n self.expected = \"}\"\n elif bracket == \"<\":\n self.expected = \">\"\n else:\n print(f\"Bracket '{bracket}' unknown.\")\n\n def check(self, bracket):\n if self.expected == bracket:\n return True\n return False\n\ndef check_line(line):\n stack = []\n result = 0\n for bracket in line:\n if bracket in \"([{<\":\n stack.append(Bracket(bracket))\n else:\n ans = stack.pop().check(bracket)\n if not ans:\n return 0\n while len(stack) > 0:\n result *= 5\n result += \")]}>\".index(stack.pop().expected)+1\n return result\n\nlines = aoc.lines(\"10_in.txt\")\nresults = []\n\nfor line in lines:\n line_res = check_line(line)\n if line_res > 0:\n results.append(line_res)\n\nresults.sort()\nprint(results[len(results)//2])\n", "id": "10476825", "language": "Python", "matching_score": 0.11815693974494934, "max_stars_count": 0, "path": "2021/102.py" }, { "content": "import aoc\n\nclass Cave:\n def __init__(self, name):\n self.name = name\n self.multi_visit = name.isupper()\n self.connections = set()\n self.active_connections = []\n self.is_start = name == \"start\"\n self.is_end = name == \"end\"\n\n def count_ways(self):\n global paths\n global cur_path\n global small_twice\n\n me_set_small_twice = False\n \n # add a path if this is the end cave\n if self.is_end:\n paths += 1\n return\n\n # don't go further if this is a small cave that is already visited\n # added rule that one small cave can be visited twice\n if not self.multi_visit:\n if self.is_start and len(cur_path) > 1:\n return\n if len(self.active_connections) > 0:\n if small_twice == \"\":\n me_set_small_twice = True\n small_twice = self.name\n else:\n return\n\n # visit all caves that are not visited from this cave yet\n for cave in self.connections:\n cur_path.append(self)\n self.active_connections.append(cave)\n cave.count_ways()\n self.active_connections.pop()\n cur_path.pop()\n\n if me_set_small_twice:\n small_twice = \"\"\n\ncaves = {}\ncur_path = []\npaths = 0\nsmall_twice = \"\"\n\n# build up all connections\nfor line in aoc.lines(\"12_in.txt\"):\n a, b = line.split(\"-\")\n\n if not a in caves:\n caves[a] = Cave(a)\n if not b in caves:\n caves[b] = Cave(b)\n \n caves[a].connections.add(caves[b])\n caves[b].connections.add(caves[a])\n\ncaves[\"start\"].count_ways()\nprint(paths)\n", "id": "420203", "language": "Python", "matching_score": 3.3513336181640625, "max_stars_count": 0, "path": "2021/122.py" }, { "content": "import aoc\n\nclass Cave:\n def __init__(self, name):\n self.multi_visit = name.isupper()\n self.connections = set()\n self.active_connections = set()\n self.is_end = name == \"end\"\n\n def count_ways(self, from_cave):\n # add a path if this is the end cave\n if self.is_end:\n global paths\n paths += 1\n return\n\n # don't go further if this is a small cave that is already connected\n if not self.multi_visit and len(self.active_connections) > 0:\n return\n\n # visit all caves that are not visited from this cave yet\n for cave in self.connections^self.active_connections:\n self.active_connections.add(cave)\n cave.count_ways(self)\n self.active_connections.remove(cave)\n\ncaves = {}\npaths = 0\n\n# build up all connections\nfor line in aoc.lines(\"12_in.txt\"):\n a, b = line.split(\"-\")\n\n if not a in caves:\n caves[a] = Cave(a)\n if not b in caves:\n caves[b] = Cave(b)\n \n caves[a].connections.add(caves[b])\n caves[b].connections.add(caves[a])\n\nstart = caves[\"start\"]\nstart.count_ways(None)\nprint(paths)\n", "id": "2558199", "language": "Python", "matching_score": 0.9244367480278015, "max_stars_count": 0, "path": "2021/121.py" }, { "content": "import aoc\nimport re\n\ninp = aoc.lines(\"17_in.txt\")[0]\nmin_x, max_x, min_y, max_y = re.match('.+x=(-?\\d+)\\.+(-?\\d+), y=(-?\\d+)\\.+(-?\\d+)', inp).groups()\nmin_x, max_x, min_y, max_y = int(min_x), int(max_x), int(min_y), int(max_y)\n\n# find possible step counts of x\nx_step_counts = []\nfor vx_start in range(0, max_x+1):\n steps = 0\n pos_x = 0\n for vx in range(vx_start, -1, -1):\n steps += 1\n pos_x += vx\n if pos_x in range(min_x, max_x+1):\n x_step_counts.append((steps, vx_start, vx))\n\n# x_zero_velocities contains the step count after which a certain\n# start velocity leads to a horizontal stop inside the target area\n# these are valid for all step counts equal or larger the value in the list\nx_zero_velocities = [step_count[0:2] for step_count in x_step_counts if step_count[2] == 0]\n\n# the remaining values in x_step_counts are only valid on\n# the exact step they appear\nx_step_counts = [step_count[0:2] for step_count in x_step_counts if step_count[2] > 0]\n\n# find possible step counts of y\nvalues = []\nfor vy_start in range(min_y, 1000):\n steps = 0\n pos_y = 0\n for vy in range(vy_start, -1002, -1):\n steps += 1\n pos_y += vy\n if pos_y < min_y:\n break\n if pos_y in range(min_y, max_y+1):\n # add single step counts\n values += [(step_count[1], vy_start) for step_count in x_step_counts if step_count[0] == steps]\n # add multiple step counts\n values += [(step_count[1], vy_start) for step_count in x_zero_velocities if step_count[0] <= steps]\n\n# print result\nprint(len(set(values)))\n", "id": "2936264", "language": "Python", "matching_score": 2.827080488204956, "max_stars_count": 0, "path": "2021/172.py" }, { "content": "import aoc\nimport re\n\ninp = aoc.lines(\"17_in.txt\")[0]\nmin_x, max_x, min_y, max_y = re.match('.+x=(-?\\d+)\\.+(-?\\d+), y=(-?\\d+)\\.+(-?\\d+)', inp).groups()\nmin_x, max_x, min_y, max_y = int(min_x), int(max_x), int(min_y), int(max_y)\n\n# find max y\nmax_y_values = []\nfor vy_start in range(min_y, 1000):\n pos_y = 0\n cur_max_y = 0\n for vy in range(vy_start, -1002, -1):\n pos_y += vy\n cur_max_y = max(cur_max_y, pos_y)\n if pos_y < min_y:\n break\n if pos_y in range(min_y, max_y+1):\n max_y_values.append(cur_max_y)\n\n# print result\nprint(max(max_y_values))\n", "id": "300770", "language": "Python", "matching_score": 0.004576260689646006, "max_stars_count": 0, "path": "2021/171.py" }, { "content": "import aoc\r\n\r\nfishes = aoc.ints(\"06_in.txt\")\r\n\r\nfor day in range(80):\r\n for fish_id in range(len(fishes)):\r\n if fishes[fish_id] == 0:\r\n fishes[fish_id] = 6\r\n fishes.append(8)\r\n else:\r\n fishes[fish_id] -= 1\r\n\r\nprint(len(fishes))\r\n", "id": "4113722", "language": "Python", "matching_score": 1.0598561763763428, "max_stars_count": 0, "path": "2021/061.py" }, { "content": "import aoc\r\n\r\nfishes = aoc.ints(\"06_in.txt\")\r\ntimer_count = []\r\n\r\nfor days in range(9):\r\n timer_count.append(sum([1 for fish in fishes if fish == days]))\r\n\r\nfor day in range(256):\r\n new_fishes = timer_count[0]\r\n timer_count = timer_count[1:]\r\n timer_count.append(new_fishes)\r\n timer_count[6] += new_fishes\r\n\r\nprint(sum(timer_count))\r\n", "id": "8550679", "language": "Python", "matching_score": 0.20217880606651306, "max_stars_count": 0, "path": "2021/062.py" }, { "content": "import aoc\r\ndepths = aoc.ints(\"01_in.txt\")\r\n\r\nprev = depths[0]\r\ncountDeeper = 0\r\nfor depth in depths:\r\n if depth > prev:\r\n countDeeper += 1\r\n prev = depth\r\n\r\nprint(countDeeper)\r\n", "id": "12335747", "language": "Python", "matching_score": 0.912263810634613, "max_stars_count": 0, "path": "2021/011.py" }, { "content": "import aoc\n\ndepths = [[int(z) for z in line] for line in aoc.lines(\"09_in.txt\")]\nlow_points = []\n\nfor y in range(len(depths)):\n for x in range(len(depths[0])):\n adjacent = []\n if x > 0:\n adjacent.append(depths[y][x-1])\n if y > 0:\n adjacent.append(depths[y-1][x])\n if x < len(depths[0])-1:\n adjacent.append(depths[y][x+1])\n if y < len(depths)-1:\n adjacent.append(depths[y+1][x])\n if all([depths[y][x] < neighbor for neighbor in adjacent]):\n low_points.append(depths[y][x])\n\nprint(sum(low_points)+len(low_points))\n \n", "id": "7254847", "language": "Python", "matching_score": 1.2641959190368652, "max_stars_count": 0, "path": "2021/091.py" }, { "content": "import aoc\n\nclass Point:\n def __init__(self, value, x, y):\n self.value = value\n self.x = x\n self.y = y\n self.neighbors = []\n self.basin_id = 0\n \n def get_neighbors(self):\n if self.x > 0:\n self.neighbors.append(points[self.y][self.x-1])\n if self.y > 0:\n self.neighbors.append(points[self.y-1][self.x])\n if self.x < len(points[self.y])-1:\n self.neighbors.append(points[self.y][self.x+1])\n if self.y < len(points)-1:\n self.neighbors.append(points[self.y+1][self.x])\n\n def is_low_point(self):\n return all([self.value < neighbor.value for neighbor in self.neighbors])\n\n def set_basin(self, basin_id):\n if self.value < 9 and self.basin_id == 0:\n self.basin_id = basin_id\n for point in self.neighbors:\n if point.value >= self.value:\n point.set_basin(basin_id)\n\n\n\nlines = aoc.lines(\"09_in.txt\")\npoints = []\nfor y in range(len(lines)):\n lpoints = []\n for x in range(len(lines[y])):\n lpoints.append(Point(int(lines[y][x]),x,y))\n points.append(lpoints)\n\nall_points = [point for line in points for point in line]\n\nfor point in all_points:\n point.get_neighbors()\n\nlow_points = [point for point in all_points if point.is_low_point()]\n\nbasin_id = 1\nfor point in low_points:\n point.set_basin(basin_id)\n basin_id += 1\n\nbasin_sizes = [0]*len(low_points)\n\nfor point in all_points:\n if point.basin_id > 0:\n basin_sizes[point.basin_id - 1] += 1\nbasin_sizes.sort(reverse=True)\n\nprint(basin_sizes[0] * basin_sizes[1] * basin_sizes[2])\n\n", "id": "11179176", "language": "Python", "matching_score": 0.1454155147075653, "max_stars_count": 0, "path": "2021/092.py" }, { "content": "import aoc\n\nclass BitStream:\n def __init__(self, data, read_raw = False):\n self.index = 0\n if read_raw:\n self.bin_data = data\n else:\n raw_data = aoc.lines(data)[0] \n self.bin_data = \"\".join([f\"{int(digit,16):04b}\" for digit in raw_data])\n\n def next(self, bits = 1):\n data = self.bin_data[self.index:self.index+bits]\n self.index += bits\n return data\n\n def next_int(self, bits = 1):\n return int(self.next(bits),2)\n\n def has_next(self):\n return self.index < len(self.bin_data)\n\nclass Packet:\n def __init__(self, bit_stream):\n global all_packets\n all_packets.append(self)\n self.version = bit_stream.next_int(3)\n self.type = bit_stream.next_int(3)\n self.value = 0\n self.subpackets = []\n\n if self.type == 4:\n read_another = '1'\n while read_another == '1':\n read_another = bit_stream.next()\n self.value = self.value * 16 + bit_stream.next_int(4)\n else:\n length_type = bit_stream.next()\n if length_type == '0':\n length = bit_stream.next_int(15)\n sub_stream = BitStream(bit_stream.next(length), read_raw = True)\n while sub_stream.has_next():\n self.subpackets.append(Packet(sub_stream))\n else:\n length = bit_stream.next_int(11)\n for _ in range(length):\n self.subpackets.append(Packet(bit_stream))\n\n def __repr__(self):\n return f\"V:{self.version} T:{self.type} val:{self.value} sub:{self.subpackets}\"\n\nall_packets = []\nPacket(BitStream(\"16_in.txt\"))\nprint(sum([packet.version for packet in all_packets]))\n", "id": "2974288", "language": "Python", "matching_score": 3.630817174911499, "max_stars_count": 0, "path": "2021/161.py" }, { "content": "import aoc\n\nclass BitStream:\n def __init__(self, data, read_raw = False):\n self.index = 0\n if read_raw:\n self.bin_data = data\n else: \n self.bin_data = \"\".join([f\"{int(digit,16):04b}\" for digit in aoc.lines(data)[0]])\n\n def next(self, bits = 1):\n data = self.bin_data[self.index:self.index+bits]\n self.index += bits\n return data\n\n def next_int(self, bits = 1):\n return int(self.next(bits),2)\n\n def has_next(self):\n return self.index < len(self.bin_data)\n\nclass Packet:\n def __init__(self, bit_stream):\n self.version = bit_stream.next_int(3)\n self.type = bit_stream.next_int(3)\n self.value = 0\n self.subpackets = []\n\n if self.type == 4:\n read_another = '1'\n while read_another == '1':\n read_another = bit_stream.next()\n self.value = self.value * 16 + bit_stream.next_int(4)\n else:\n length_type = bit_stream.next()\n if length_type == '0':\n length = bit_stream.next_int(15)\n sub_stream = BitStream(bit_stream.next(length), read_raw = True)\n while sub_stream.has_next():\n self.subpackets.append(Packet(sub_stream))\n else:\n length = bit_stream.next_int(11)\n for _ in range(length):\n self.subpackets.append(Packet(bit_stream))\n if self.type == 0:\n self.value = sum([packet.value for packet in self.subpackets])\n elif self.type == 1:\n product = 1\n for packet in self.subpackets:\n product *= packet.value\n self.value = product\n elif self.type == 2:\n self.value = min([packet.value for packet in self.subpackets])\n elif self.type == 3:\n self.value = max([packet.value for packet in self.subpackets])\n elif self.type == 5:\n self.value = 1 if self.subpackets[0].value > self.subpackets[1].value else 0\n elif self.type == 6:\n self.value = 1 if self.subpackets[0].value < self.subpackets[1].value else 0\n elif self.type == 7:\n self.value = 1 if self.subpackets[0].value == self.subpackets[1].value else 0\n\n def __repr__(self):\n return f\"V:{self.version} T:{self.type} val:{self.value} sub:{self.subpackets}\"\n\nprint(Packet(BitStream(\"16_in.txt\")).value)\n", "id": "11290565", "language": "Python", "matching_score": 0.7163097262382507, "max_stars_count": 0, "path": "2021/162.py" }, { "content": "import aoc\n\nclass Pair:\n def __init__(self, value):\n input_pair = eval(value) if type(value) is str else value\n\n if type(input_pair[0]) is list:\n self.left = Pair(input_pair[0])\n elif type(input_pair[0]) is int:\n self.left = BoxedInt(input_pair[0])\n else:\n self.left = input_pair[0]\n\n if type(input_pair[1]) is list:\n self.right = Pair(input_pair[1])\n elif type(input_pair[1]) is int:\n self.right = BoxedInt(input_pair[1])\n else:\n self.right = input_pair[1]\n \n\n def __repr__(self):\n return f\"[{self.left},{self.right}]\"\n\n def __add__(self, other):\n return Pair([self, other])\n\n def __radd__(self, other):\n if other == None:\n return self\n return self + other\n\n def chain(self):\n # store all ints in a list, so you can easily decide, which is previous\n # and which is next\n global chain\n if type(self.left) is BoxedInt:\n chain.append(self.left)\n else:\n self.left.chain()\n \n if type(self.right) is BoxedInt:\n chain.append(self.right)\n else:\n self.right.chain()\n \n def reduce(self, nesting_level = 0):\n global was_reduced, chain\n\n if type(self.left) is BoxedInt:\n if int(self.left) > 9: # split left element\n self.left = split(self.left)\n return\n elif nesting_level == 3: # explode left element\n chain_index = chain.index(self.left.left)\n if \n \n \n# With a BoxedInt, we are able to save an int as reference,\n# so a Pair can change a value of a different Pair\nclass BoxedInt:\n def __init__(self, value):\n self.value = value\n\n def __add__(self, other):\n return BoxedInt(int(self) + int(other))\n\n def __mul__(self, other):\n return BoxedInt(int(self) * int(other))\n\n def __int__(self):\n return self.value\n\n def __repr__(self):\n return str(int(self))\n\ndef split(boxed_int):\n global was_reduced\n was_reduced = True\n return Pair(int(boxed_int) // 2, round(int(boxed_int)/2))\n\ndef explode(pair):\n global chain, was_reduced\n pos = chain.index(pair.left)\n if pos > 0:\n chain[pos-1] += pair.left\n if pos < len(chain)-2:\n chain[pos+2] += pair.right\n return BoxedInt(0)\n \nresult = None\nfor line in aoc.lines(\"18_in.txt\"):\n result += Pair(line)\n was_reduced = True\n while was_reduced:\n # Build int chain\n chain = list()\n result.chain()\n chain[0].prev = None\n chain[-1].next = None\n for index in range(len(chain)-1):\n chain[index].next = chain[index+1]\n chain[index+1].prev = chain[index]\n\n # reduce\n was_reduced = False\n result.reduce()\n", "id": "9027482", "language": "Python", "matching_score": 0.9907578229904175, "max_stars_count": 0, "path": "2021/181.py" }, { "content": "import aoc\n\nclass Pair:\n def __init__(self, pair, insert):\n self.pair = pair\n self.insert = insert\n self.count = 0\n self.new_count = 0\n\n def add_count(self):\n to_pair1 = self.pair[0] + self.insert\n to_pair2 = self.insert + self.pair[1]\n \n pairs[to_pair1].new_count += self.count\n pairs[to_pair2].new_count += self.count\n\n def apply_new_count(self):\n self.count = self.new_count\n self.new_count = 0\n\n def __repr__(self):\n return str(self.count)\n\n# read polymer and templates\npairs = dict()\npolymer, raw_templates = aoc.blocks(\"14_in.txt\")\nfor pair, insert in [line.split(\" -> \") for line in aoc.lines(raw_templates)]:\n pairs[pair] = Pair(pair, insert)\nfor pair_id in range(len(polymer) - 1):\n pair = polymer[pair_id:pair_id+2]\n pairs[pair].count = pairs[pair].count + 1\n\n# do 40 iterations of inserting\nfor _ in range(40):\n for pair in pairs.values():\n pair.add_count()\n for pair in pairs.values():\n pair.apply_new_count()\n\n# count single letters\ncounts = dict()\nfor pair in pairs:\n counts[pair[0]] = 0\n counts[pair[1]] = 0\nfor pair in pairs.values():\n counts[pair.pair[0]] = counts[pair.pair[0]] + pair.count\n counts[pair.pair[1]] = counts[pair.pair[1]] + pair.count\n\n# print result\nprint((max(counts.values()) - min(counts.values()) + 1) // 2)\n", "id": "6819250", "language": "Python", "matching_score": 2.3199868202209473, "max_stars_count": 0, "path": "2021/142.py" }, { "content": "import aoc\n\n# read polymer and templates\npolymer, raw_templates = aoc.blocks(\"14_in.txt\")\ntemplates = dict(line.split(\" -> \") for line in aoc.lines(raw_templates))\n\n# do 10 iterations of inserting\nfor _ in range(10):\n new_polymer = \"\"\n for index in range(len(polymer)-1):\n pair = polymer[index:index+2]\n new_polymer += polymer[index] + templates[pair]\n new_polymer += polymer[-1]\n polymer = new_polymer\n\n# count letters\ncounts = dict([(char, 0) for char in polymer])\nfor char in polymer:\n counts[char] = counts[char] + 1\n\n# print result\nprint(max(counts.values()) - min(counts.values()))\n", "id": "10007157", "language": "Python", "matching_score": 0.13219137489795685, "max_stars_count": 0, "path": "2021/141.py" }, { "content": "import aoc\r\nlines = aoc.lines(\"03_in.txt\")\r\n\r\nsums = [0] * len(lines[0])\r\n\r\nfor line in lines:\r\n for i in range(len(line)):\r\n sums[i] += int(line[i])\r\n\r\ngamma = int(\"\".join([str(int(bit_sum >= len(lines)//2)) for bit_sum in sums]),2)\r\nepsilon = int(\"\".join([str(int(bit_sum < len(lines)//2)) for bit_sum in sums]),2)\r\n\r\nprint(gamma*epsilon)\r\n", "id": "2668250", "language": "Python", "matching_score": 1.281648874282837, "max_stars_count": 0, "path": "2021/031.py" }, { "content": "import aoc\r\nall_lines = aoc.lines(\"03_in.txt\")\r\n\r\nlines = all_lines[:]\r\nfor i in range(len(lines[0])):\r\n col_sum = 0\r\n for line in lines:\r\n col_sum += int(line[i])\r\n most_common = str(int(col_sum >= len(lines)/2))\r\n lines = [line for line in lines if line[i] == most_common]\r\n if len(lines) == 1:\r\n break\r\noxygen = int(lines[0], 2)\r\n\r\nlines = all_lines[:]\r\nfor i in range(len(lines[0])):\r\n col_sum = 0\r\n for line in lines:\r\n col_sum += int(line[i])\r\n most_common = str(int(col_sum < len(lines)/2))\r\n lines = [line for line in lines if line[i] == most_common]\r\n if len(lines) == 1:\r\n break\r\nco2 = int(lines[0], 2)\r\n\r\n\r\nprint(oxygen*co2)\r\n", "id": "8727897", "language": "Python", "matching_score": 0.6665381193161011, "max_stars_count": 0, "path": "2021/032.py" }, { "content": "import aoc\r\n\r\nclass Table:\r\n def __init__(self, deftxt):\r\n self.lines = [aoc.ints(line) for line in aoc.lines(deftxt)];\r\n\r\n def check(self, num):\r\n for line in self.lines:\r\n if find(line,num) >= 0:\r\n line[find(line,num)] = 0\r\n\r\n for line in self.lines:\r\n if line == [0]*len(line):\r\n return sum([sum(line) for line in self.lines])\r\n\r\n zero_cols = [True]*len(self.lines[0])\r\n for line in self.lines:\r\n for col in range(len(line)):\r\n if line[col] != 0:\r\n zero_cols[col] = False\r\n\r\n if any(zero_cols):\r\n return sum([sum(line) for line in self.lines])\r\n\r\n return 0\r\n\r\ndef find(lst, num):\r\n try:\r\n i = lst.index(num)\r\n return i\r\n except Exception:\r\n return -1\r\n\r\ndef main():\r\n blocks = aoc.blocks(\"04_in.txt\")\r\n\r\n drawn_numbers = aoc.ints(blocks[0])\r\n\r\n tables = [Table(block) for block in blocks[1:]]\r\n\r\n for num in drawn_numbers:\r\n for table in tables:\r\n res = table.check(num)\r\n if res > 0:\r\n print(num * res)\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "id": "9739666", "language": "Python", "matching_score": 0.9999639987945557, "max_stars_count": 0, "path": "2021/041.py" }, { "content": "import aoc\r\n\r\ndef get_num(line):\r\n seg_num = [set(num) for num in line[:58].split(\" \")]\r\n displays = [set(num) for num in line[61:].split(\" \")]\r\n\r\n numbers = [None]*10\r\n\r\n # allocate obvious numbers\r\n for num in seg_num:\r\n if len(num) == 2:\r\n numbers[1] = num\r\n elif len(num) == 3:\r\n numbers[7] = num\r\n elif len(num) == 4:\r\n numbers[4] = num\r\n elif len(num) == 7:\r\n numbers[8] = num\r\n\r\n seg_num = [num for num in seg_num if len(num) in (5,6)]\r\n\r\n # find 6: 6 segments where one segment of number 7 isn't present\r\n candidates = [num for num in seg_num if len(num) == 6]\r\n for candidate in candidates:\r\n if len(numbers[7] & candidate) == 2:\r\n numbers[6] = candidate\r\n break\r\n candidates.remove(candidate)\r\n \r\n # find 9: 6 segments containing all segments of number 4\r\n for candidate in candidates:\r\n if len(numbers[4] & candidate) == 4:\r\n numbers[9] = candidate\r\n break\r\n candidates.remove(candidate)\r\n \r\n # save 0: remaining number with 6 segments\r\n numbers[0] = candidates[0]\r\n\r\n # find 3: 5 segments containing both segments of number 1\r\n candidates = [num for num in seg_num if len(num) == 5]\r\n for candidate in candidates:\r\n if len(numbers[1] & candidate) == 2:\r\n numbers[3] = candidate\r\n break\r\n candidates.remove(candidate)\r\n\r\n # find 2: 5 segments with two overlapping with number 4\r\n for candidate in candidates:\r\n if len(numbers[4] & candidate) == 2:\r\n numbers[2] = candidate\r\n break\r\n candidates.remove(candidate)\r\n\r\n # save 5: remaining number\r\n numbers[5] = candidates[0]\r\n\r\n # build number\r\n result = 0\r\n for digit in range(4):\r\n result *= 10\r\n result += numbers.index(displays[digit])\r\n return result\r\n\r\nprint(sum([get_num(line) for line in aoc.lines(\"08_in.txt\")]))\r\n\r\n\r\n \r\n", "id": "4703850", "language": "Python", "matching_score": 1.0545190572738647, "max_stars_count": 0, "path": "2021/082.py" }, { "content": "import aoc\r\n\r\nlines = aoc.lines(\"08_in.txt\")\r\n\r\ndisplays = \"\"\r\n\r\nfor line in lines:\r\n displays += line[60:]\r\n\r\nprint(sum([1 for display in displays.split(\" \") if len(display) in (2,3,4,7)]))\r\n", "id": "6274645", "language": "Python", "matching_score": 1.4559530019760132, "max_stars_count": 0, "path": "2021/081.py" }, { "content": "import aoc\r\ncommands = aoc.lines(\"02_in.txt\")\r\nx = 0\r\ny = 0\r\naim = 0\r\n\r\nfor command in commands:\r\n direction, distance = command.split(\" \")\r\n distance = int(distance)\r\n \r\n if direction == \"down\":\r\n aim += distance\r\n elif direction == \"up\":\r\n aim -= distance\r\n else:\r\n x += distance\r\n y += aim * distance\r\n\r\nprint(x*y)\r\n", "id": "1767102", "language": "Python", "matching_score": 0.016232332214713097, "max_stars_count": 0, "path": "2021/022.py" }, { "content": "import aoc\r\n\r\ncrabs = aoc.ints(\"07_in.txt\")\r\nfuel_usages = []\r\n\r\nfor position in range(min(crabs), max(crabs)):\r\n fuel_usages.append(sum([abs(crab-position) for crab in crabs]))\r\n\r\nprint(\"min fuel:\", min(fuel_usages))\r\n", "id": "10511828", "language": "Python", "matching_score": 2.1021203994750977, "max_stars_count": 0, "path": "2021/071.py" }, { "content": "import aoc\r\n\r\ndef triangle_value(num):\r\n return (num*(num+1))//2\r\n\r\ncrabs = aoc.ints(\"07_in.txt\")\r\nfuel_usages = []\r\n\r\nfor position in range(min(crabs), max(crabs)):\r\n fuel_usages.append(sum([triangle_value(abs(crab-position)) for crab in crabs]))\r\n\r\nprint(\"min fuel:\", min(fuel_usages))\r\n", "id": "10775763", "language": "Python", "matching_score": 2.0904524326324463, "max_stars_count": 0, "path": "2021/072.py" } ]
1.060064
XPXPv2
[ { "content": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys as keys\nimport time\n\nclass connection:\n\n def __helper_load_config(self,name):\n #loads file and returns a list but if the file dose not exist it returns None\n try:\n FP = open(name,'r')\n rawString = FP.read()\n listed = rawString.split(\"\\n\")[:-1]\n return listed\n except:\n return None\n\n def __init__(self,health_fail = 0.0 , tool_fail = ['','','',''], ammo_fail = {'pass':False}, heal_fail = {'pass':False}, armour_fail = {'pass':False}):\n #define varables\n self.driver = None\n self.FAILED_HEALTH = health_fail\n self.FAILED_TOOLS = tool_fail\n self.FAILED_AMMO = ammo_fail\n self.DEFAULT_AMMO = {'pass':True}\n self.FAILED_HEALING = heal_fail\n self.DEFAULT_HEALING = {'pass':True}\n self.FAILED_ARMOUR = armour_fail\n\n def set_driver(self,driver = 'firefox'):\n #loads the driver\n\n #firefox\n if driver == 'firefox':\n config = self.__helper_load_config(\"firefox_profile\")\n if config == None:\n self.driver = webdriver.Firefox()\n else:\n profile = webdriver.FirefoxProfile(config[0])\n self.driver = webdriver.Firefox(profile)\n\n if driver == 'chrome':\n config = self.__helper_load_config(\"chrome_profile\")\n if config == None:\n self.driver = webdriver.Chrome()\n else:\n options = webdriver.ChromeOptions()\n directory = 'user-data-dir=' + config[0]\n profile = 'profile-directory=' + config[1]\n options.add_argument(directory)\n options.add_argument(profile)\n self.driver = webdriver.Chrome(options=options)\n\n\n def close(self):\n self.driver.close()\n\n def load_page(self):\n #opens webpage\n\n self.driver.get(\"http://surviv.io\")\n\n def login(self,name):\n #logs in/joins the game\n\n #finds the join buttons\n join_solo = self.driver.find_element_by_id(\"btn-start-mode-0\")\n\n #finds name input\n name_input = self.driver.find_element_by_id('player-name-input-solo')\n\n #clears the input\n name_input.clear()\n\n #inputs the name\n name_input.send_keys(name)\n\n #clicks join game\n join_solo.click()\n\n def __get_health(self):\n #gets health of player\n\n health_bar = self.driver.find_element_by_id(\"ui-health-actual\")\n health_str = health_bar.get_property('attributes')['2']['value'].split(';')[1].split(\":\")[1]\n health_float = float(health_str[:-1])\n return health_float\n\n def __get_tools(self):\n #gets equipt tools\n\n tools = self.driver.find_elements_by_class_name(\"ui-weapon-name\")\n\n toolList = []\n\n for tool in tools:\n toolList += [tool.text]\n\n return toolList\n\n def __get_ammo(self):\n #gets ammo listing\n\n ammoList = self.driver.find_element_by_id(\"ui-ammo-interactive\")\n ammoList = ammoList.find_elements_by_css_selector(\"*\")\n ammoDic = self.DEFAULT_AMMO\n\n for ammo in ammoList:\n if ammo.get_attribute(\"id\") == \"\":\n continue\n\n ammoName = str(ammo.get_attribute(\"id\").split(\"-\")[2])\n ammoData = str(ammo.find_element_by_class_name(\"ui-loot-count\").text)\n\n ammoDic.update({ammoName:ammoData})\n\n return ammoDic\n\n\n def __get_healing(self):\n #gets medic listing\n\n medicList = self.driver.find_element_by_id(\"ui-medical-interactive\")\n medicList = medicList.find_elements_by_css_selector(\"*\")\n medicDic = self.DEFAULT_HEALING\n\n for medic in medicList:\n if medic.get_attribute(\"id\") == \"\":\n continue\n\n medicName = str(medic.get_attribute(\"id\").split(\"-\")[2])\n medicData = str(medic.find_element_by_class_name(\"ui-loot-count\").text)\n\n medicDic.update({medicName:medicData})\n\n return medicDic\n\n def __get_armour(self):\n equipedArmor = {'pass':True}\n equipment = self.driver.find_elements_by_class_name('ui-armor-counter')\n for equiped in equipment:\n Eid = equiped.get_attribute(\"id\")\n subElement = equiped.find_elements_by_class_name(\"ui-armor-level\")\n if len(subElement) < 1:\n continue\n name = Eid.split(\"-\")[2]\n equipedArmor.update({name:str(subElement[0].text)})\n\n return equipedArmor\n\n def get_health(self):\n try:\n return self.__get_health()\n except:\n return self.FAILED_HEALTH\n\n def get_tools(self):\n try:\n return self.__get_tools()\n except:\n return self.FAILED_TOOLS\n\n def get_ammo(self):\n try:\n return self.__get_ammo()\n except:\n return self.FAILED_AMMO\n\n def get_healing(self):\n try:\n return self.__get_healing()\n except:\n return self.FAILED_HEALING\n\n def get_armour(self):\n try:\n return self.__get_armour()\n except:\n return self.FAILED_ARMOUR\n\nif __name__ == '__main__':\n a = connection()\n a.FAILED_HEALTH = 100.0\n a.set_driver(driver='firefox')\n a.load_page()\n a.login(\"bot\")\n data = None\n run = True\n while run:\n ndata = {'health':a.get_health(),\"tool\":a.get_tools(),'ammo':a.get_ammo(),'healing':a.get_healing(),'armor':a.get_armour()}\n if ndata != data:\n data = ndata\n print(data)\n\n if data[\"health\"] == 0.0:\n if input(\"contine?[y/n]:\") == \"y\":\n continue\n a.close()\n run = False\n", "id": "5387313", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "connection.py" } ]
0
GeeWee
[ { "content": "from django.apps import AppConfig\n\n\nclass OrderedModelConfig(AppConfig):\n name = \"ordered_model\"\n label = \"ordered_model\"\n\n def ready(self):\n # This import has side effects\n # noinspection PyUnresolvedReferences\n from .signals import on_ordered_model_delete\n", "id": "5854267", "language": "Python", "matching_score": 3.1440577507019043, "max_stars_count": 1, "path": "ordered_model/apps.py" }, { "content": "default_app_config = \"ordered_model.apps.OrderedModelConfig\"\n", "id": "10081174", "language": "Python", "matching_score": 0.7141788601875305, "max_stars_count": 1, "path": "ordered_model/__init__.py" }, { "content": "from django.db.models.signals import post_delete\nfrom django.dispatch import receiver\nfrom ordered_model.models import OrderedModelBase\nfrom django.db.models import F\n\n\n@receiver(post_delete, dispatch_uid=\"on_ordered_model_delete\")\ndef on_ordered_model_delete(sender, instance, **kwargs):\n \"\"\"\n This signal makes sure that when an OrderedModelBase is deleted via cascade database deletes.\n \"\"\"\n\n \"\"\"\n We're only interested in subclasses of OrderedModelBase.\n We want to be able to support 'extra_kwargs' on the delete()\n method, which we can't do if we do all our work in the signal. We add a property to signal whether or not\n the model's .delete() method was called, because if so - we don't need to do any more work.\n \"\"\"\n if not issubclass(sender, OrderedModelBase) or getattr(instance, '_was_deleted_via_delete_method', None):\n return\n\n qs = instance.get_ordering_queryset()\n update_kwargs = {instance.order_field_name: F(instance.order_field_name) - 1}\n # Here we don't use a subQuery to get the value of the model, as it's already been deleted at this point\n # in the process, so we're actually unable to. We'll just have to pray that no other object has taken its\n # place from here until it got deleted.\n qs.filter(\n **{instance.order_field_name + \"__gt\": getattr(instance, instance.order_field_name)}\n ).update(**update_kwargs)\n", "id": "12565012", "language": "Python", "matching_score": 1.4015477895736694, "max_stars_count": 1, "path": "ordered_model/signals.py" }, { "content": "try:\n from django.db.models.fields.subclassing import Creator\nexcept ImportError:\n # This class was removed in Django 1.10, so I've pulled it into\n # django-recurrence.\n\n class Creator(object):\n \"\"\"\n A placeholder class that provides a way to set the attribute\n on the model.\n \"\"\"\n def __init__(self, field):\n self.field = field\n\n def __get__(self, obj, type=None):\n if obj is None:\n return self\n return obj.__dict__[self.field.name]\n\n def __set__(self, obj, value):\n obj.__dict__[self.field.name] = self.field.to_python(value)\n", "id": "10954727", "language": "Python", "matching_score": 1.6694467067718506, "max_stars_count": 3, "path": "recurrence/compat.py" }, { "content": "from docutils import nodes, utils\nfrom docutils.parsers.rst.roles import set_classes\n\n\n# With thanks to <NAME> for writing\n# https://doughellmann.com/blog/2010/05/09/defining-custom-roles-in-sphinx/\n# - this code is derived from an example BitBucket configuration.\n\n\ndef make_issue_node(rawtext, app, slug, options):\n \"\"\"Create a link to a GitHub issue.\n\n :param rawtext: Text being replaced with link node.\n :param app: Sphinx application context\n :param slug: ID of the thing to link to\n :param options: Options dictionary passed to role func.\n \"\"\"\n #\n try:\n base = app.config.github_project_url\n if not base:\n raise AttributeError\n except AttributeError as err:\n raise ValueError('github_project_url configuration value is not set (%s)' % str(err))\n\n slash = '/' if base[-1] != '/' else ''\n ref = base + slash + 'issues/' + slug + '/'\n set_classes(options)\n node = nodes.reference(rawtext, '#' + utils.unescape(slug), refuri=ref,\n **options)\n return node\n\n\ndef ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]):\n \"\"\"Link to a GitHub issue.\n\n Returns 2 part tuple containing list of nodes to insert into the\n document and a list of system messages. Both are allowed to be\n empty.\n\n :param name: The role name used in the document.\n :param rawtext: The entire markup snippet, with role.\n :param text: The text marked with the role.\n :param lineno: The line number where rawtext appears in the input.\n :param inliner: The inliner instance that called us.\n :param options: Directive options for customization.\n :param content: The directive content for customization.\n \"\"\"\n try:\n issue_num = int(text)\n if issue_num <= 0:\n raise ValueError\n except ValueError:\n msg = inliner.reporter.error(\n 'GitHub issue number must be a number greater than or equal to 1; '\n '\"%s\" is invalid.' % text, line=lineno)\n prb = inliner.problematic(rawtext, rawtext, msg)\n return [prb], [msg]\n app = inliner.document.settings.env.app\n node = make_issue_node(rawtext, app, str(issue_num), options)\n return [node], []\n\n\ndef setup(app):\n \"\"\"Install the plugin.\n\n :param app: Sphinx application context.\n \"\"\"\n app.add_role('issue', ghissue_role)\n app.add_config_value('github_project_url', None, 'env')\n return\n", "id": "8428811", "language": "Python", "matching_score": 0.9473077654838562, "max_stars_count": 251, "path": "docs/github.py" }, { "content": "from recurrence import Rule\nimport recurrence\n\n\ndef test_rule_to_text_simple():\n assert Rule(\n recurrence.WEEKLY\n ).to_text() == 'weekly'\n\n\ndef test_rule_to_text_interval():\n assert Rule(\n recurrence.WEEKLY,\n interval=3\n ).to_text() == 'every 3 weeks'\n\n\ndef test_rule_to_text_oneoff():\n assert Rule(\n recurrence.WEEKLY,\n count=1\n ).to_text() == 'weekly, occuring once'\n\n\ndef test_rule_to_text_multiple():\n assert Rule(\n recurrence.WEEKLY,\n count=5\n ).to_text() == 'weekly, occuring 5 times'\n\n\ndef test_rule_to_text_yearly_bymonth():\n assert Rule(\n recurrence.YEARLY,\n bymonth=[1, 3],\n ).to_text() == 'annually, each January, March'\n\n assert Rule(\n recurrence.YEARLY,\n bymonth=[1, 3],\n ).to_text(True) == 'annually, each Jan, Mar'\n\n\ndef test_rule_to_text_yearly_byday():\n assert Rule(\n recurrence.YEARLY,\n byday=[1, 3],\n ).to_text() == 'annually, on the Tuesday, Thursday'\n\n assert Rule(\n recurrence.YEARLY,\n byday=[1, 3],\n ).to_text(True) == 'annually, on the Tue, Thu'\n", "id": "3119579", "language": "Python", "matching_score": 1.002427577972412, "max_stars_count": 251, "path": "tests/test_to_text.py" }, { "content": "from datetime import datetime\nfrom recurrence import Recurrence, Rule\nimport recurrence\n\n\ndef test_exclusion_date():\n rule = Rule(\n recurrence.DAILY\n )\n\n pattern = Recurrence(\n dtstart=datetime(2014, 1, 2, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n rrules=[rule],\n exdates=[\n datetime(2014, 1, 3, 0, 0, 0)\n ]\n )\n\n occurrences = [\n instance for instance in\n pattern.occurrences()\n ]\n\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n ]\n\n assert 2 == pattern.count()\n\n\ndef test_exclusion_date_no_limits():\n pattern = Recurrence(\n rdates=[\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n ],\n exdates=[\n datetime(2014, 1, 2, 0, 0, 0)\n ]\n )\n\n occurrences = [\n instance for instance in\n pattern.occurrences()\n ]\n\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n ]\n\n assert 1 == pattern.count()\n\n\ndef test_exclusion_rule():\n inclusion_rule = Rule(\n recurrence.DAILY\n )\n\n exclusion_rule = Rule(\n recurrence.WEEKLY,\n byday=recurrence.THURSDAY\n )\n\n pattern = Recurrence(\n dtstart=datetime(2014, 1, 2, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n rrules=[inclusion_rule],\n exrules=[exclusion_rule]\n )\n\n occurrences = [\n instance for instance in\n pattern.occurrences()\n ]\n\n assert occurrences == [\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n ]\n\n assert 2 == pattern.count()\n", "id": "1468928", "language": "Python", "matching_score": 2.9352633953094482, "max_stars_count": 251, "path": "tests/test_exclusions.py" }, { "content": "from datetime import datetime\nfrom recurrence import Recurrence, Rule\nimport recurrence\n\n\ndef test_truthiness_with_single_rrule():\n rule = Rule(\n recurrence.DAILY\n )\n\n object = Recurrence(\n rrules=[rule]\n )\n\n assert bool(object)\n\n\ndef test_truthiness_with_single_exrule():\n rule = Rule(\n recurrence.DAILY\n )\n\n object = Recurrence(\n exrules=[rule]\n )\n\n assert bool(object)\n\n\ndef test_truthiness_with_single_rdate():\n object = Recurrence(\n rdates=[datetime(2014, 12, 31, 0, 0, 0)]\n )\n\n assert bool(object)\n\n\ndef test_truthiness_with_single_exdate():\n object = Recurrence(\n exdates=[datetime(2014, 12, 31, 0, 0, 0)]\n )\n\n assert bool(object)\n\n\ndef test_truthiness_with_dtstart():\n object = Recurrence(\n dtstart=datetime(2014, 12, 31, 0, 0, 0)\n )\n\n assert bool(object)\n\n\ndef test_truthiness_with_dtend():\n object = Recurrence(\n dtend=datetime(2014, 12, 31, 0, 0, 0)\n )\n\n assert bool(object)\n\n\ndef test_falsiness_with_empty_recurrence_object():\n assert not bool(Recurrence())\n", "id": "3036440", "language": "Python", "matching_score": 1.2482798099517822, "max_stars_count": 251, "path": "tests/test_magic_methods.py" }, { "content": "from datetime import datetime\nfrom recurrence import Recurrence, Rule\nimport recurrence\n\n\nRULE = Rule(\n recurrence.DAILY\n)\n\nPATTERN = Recurrence(\n rrules=[RULE]\n)\n\n\ndef test_between_without_dtend_and_dtstart():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0)\n )\n ]\n\n # We get back nothing, since dtstart and dtend will have defaulted\n # to the current time, and January 2014 is in the past.\n assert occurrences == []\n\n\ndef test_between_with_dtend_and_dtstart_dtend_lower_than_end():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n ]\n\n\ndef test_between_with_dtend_and_dtstart_dtend_higher_than_end():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 8, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n datetime(2014, 1, 5, 0, 0, 0),\n ]\n\n\ndef test_between_with_dtend_and_dtstart_limits_equal_exclusive():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n dtstart=datetime(2014, 1, 2, 0, 0, 0),\n dtend=datetime(2014, 1, 6, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n datetime(2014, 1, 5, 0, 0, 0),\n ]\n\n\ndef test_between_with_dtend_and_dtstart_limits_equal_inclusive():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 6, 0, 0, 0),\n inc=True\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n datetime(2014, 1, 5, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n ]\n\n\ndef test_between_with_dtend_and_dtstart_dtstart_lower_than_start():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 6, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n datetime(2014, 1, 5, 0, 0, 0),\n ]\n\n\ndef test_between_with_dtend_and_dtstart_dtstart_higher_than_start():\n occurrences = [\n instance for instance in\n PATTERN.between(\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 6, 0, 0, 0),\n dtstart=datetime(2014, 1, 2, 0, 0, 0),\n dtend=datetime(2014, 1, 6, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n datetime(2014, 1, 5, 0, 0, 0),\n ]\n", "id": "3380164", "language": "Python", "matching_score": 2.8882088661193848, "max_stars_count": 251, "path": "tests/test_recurrences_without_limits.py" }, { "content": "from datetime import datetime\nfrom recurrence import Recurrence, Rule\nimport recurrence\n\n\nRULE = Rule(\n recurrence.DAILY\n)\n\nPATTERN = Recurrence(\n dtstart=datetime(2014, 1, 2, 0, 0, 0),\n dtend=datetime(2014, 1, 3, 0, 0, 0),\n rrules=[RULE]\n)\n\n\ndef test_occurrences_with_implicit_start_and_end():\n occurrences = [\n instance for instance in\n PATTERN.occurrences()\n ]\n\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n ]\n\n assert 2 == PATTERN.count()\n\n\ndef test_occurrences_with_explicit_start():\n occurrences = [\n instance for instance in\n PATTERN.occurrences(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n )\n ]\n\n # If you specify dtstart, you get occurrences based on the rules\n # from the Recurrence object, which may be outside of the\n # Recurrence object's range.\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n ]\n\n assert 3 == PATTERN.count(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n )\n\n\ndef test_occurrences_with_explicit_end():\n occurrences = [\n instance for instance in\n PATTERN.occurrences(\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n ]\n\n # If you specify dtend, you get occurrences based on the rules\n # from the Recurrence object, which may be outside of the\n # Recurrence object's range.\n assert occurrences == [\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n ]\n\n assert 3 == PATTERN.count(\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n\n\ndef test_occurrences_with_explicit_start_and_end():\n occurrences = [\n instance for instance in\n PATTERN.occurrences(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n ]\n\n # If you specify dtstart or dtend, you get occurrences based on\n # the rules from the Recurrence object, which may be outside of\n # the Recurrence object's range.\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n ]\n\n assert 4 == PATTERN.count(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n\n\ndef test_occurrences_with_specific_include_dates():\n pattern = Recurrence(\n rdates=[\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n ]\n )\n\n occurrences = [\n instance for instance in\n pattern.occurrences(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 4, 0, 0, 0),\n ]\n\n assert 3 == pattern.count(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 4, 0, 0, 0),\n )\n\n all_occurrences = [\n instance for instance in\n pattern.occurrences()\n ]\n\n assert all_occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n ]\n\n assert 2 == pattern.count()\n\n\ndef test_occurrences_until():\n rule = Rule(\n recurrence.DAILY,\n until=datetime(2014, 1, 3, 0, 0, 0)\n )\n\n pattern = Recurrence(\n rrules=[\n rule\n ]\n )\n\n occurrences = [\n instance for instance in\n pattern.occurrences(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 5, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n datetime(2014, 1, 3, 0, 0, 0),\n # We always get dtend, for reasons that aren't entirely clear\n datetime(2014, 1, 5, 0, 0, 0),\n ]\n\n assert 4 == pattern.count(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 5, 0, 0, 0),\n )\n\n occurrences = [\n instance for instance in\n pattern.occurrences(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 2, 0, 0, 0),\n )\n ]\n\n assert occurrences == [\n datetime(2014, 1, 1, 0, 0, 0),\n datetime(2014, 1, 2, 0, 0, 0),\n ]\n\n assert 2 == pattern.count(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 1, 2, 0, 0, 0),\n )\n\n\ndef test_before():\n assert PATTERN.before(\n datetime(2014, 1, 3, 0, 0, 0)\n ) == datetime(2014, 1, 2, 0, 0, 0)\n\n assert PATTERN.before(\n datetime(2014, 1, 3, 0, 0, 0),\n inc=True\n ) == datetime(2014, 1, 3, 0, 0, 0)\n\n\ndef test_after():\n assert PATTERN.after(\n datetime(2014, 1, 2, 0, 0, 0)\n ) == datetime(2014, 1, 3, 0, 0, 0)\n\n assert PATTERN.after(\n datetime(2014, 1, 2, 0, 0, 0),\n inc=True\n ) == datetime(2014, 1, 2, 0, 0, 0)\n", "id": "8865648", "language": "Python", "matching_score": 0.27486228942871094, "max_stars_count": 251, "path": "tests/test_occurrences.py" }, { "content": "from django.db import models\nfrom recurrence.fields import RecurrenceField\n\n\nclass EventWithNoNulls(models.Model):\n recurs = RecurrenceField(null=False)\n\n\nclass EventWithNulls(models.Model):\n recurs = RecurrenceField(null=True)\n\n\nclass EventWithNullAndBlank(models.Model):\n recurs = RecurrenceField(null=True, blank=True)\n", "id": "5316847", "language": "Python", "matching_score": 2.7423598766326904, "max_stars_count": 0, "path": "tests/models.py" }, { "content": "from pprint import pprint\n\nfrom django.db import IntegrityError\nfrom recurrence import Recurrence\nfrom tests.models import EventWithNulls, EventWithNoNulls, EventWithNullAndBlank\nimport pytest\n\n\[email protected]_db\ndef test_recurs_can_be_explicitly_none_if_none_is_allowed():\n # Check we can save None correctly\n event = EventWithNulls.objects.create(recurs=None)\n assert event.recurs is None\n\n # Check we can deserialize None correctly\n reloaded = EventWithNulls.objects.get(pk=event.pk)\n assert reloaded.recurs is None\n\n\[email protected]_db\ndef test_recurs_cannot_be_explicitly_none_if_none_is_disallowed():\n with pytest.raises(IntegrityError):\n EventWithNoNulls.objects.create(recurs=None)\n\n\[email protected]_db\ndef test_recurs_can_be_empty_even_if_none_is_disallowed():\n event = EventWithNoNulls.objects.create(recurs=Recurrence())\n assert event.recurs == Recurrence()\n\n\[email protected]_db\ndef test_recurs_can_be_saved_and_retrieved_as_none_if_blank_and_null_set_to_true():\n # Can save this one, and it is returned as None\n event = EventWithNullAndBlank.objects.create()\n assert event.recurs is None\n event.refresh_from_db()\n assert event.recurs is None\n print(event.pk)\n\n # Can save this as well, and it is returned as None\n event = EventWithNullAndBlank.objects.create(recurs=None)\n assert event.recurs is None\n event.refresh_from_db()\n assert event.recurs is None\n\n\[email protected]_db\ndef test_recurs_errors_on_none_if_null_set_to_false():\n event = EventWithNoNulls.objects.create()\n\n # We should get an error\n pprint(event.__dict__)\n assert False\n assert event.recurs == Recurrence()\n\[email protected]_db\ndef test_recurs_can_be_saved_and_retrieved_as_none_if_null_set_to_true():\n event = EventWithNoNulls.objects.create()\n assert event.recurs == Recurrence()\n\n\[email protected]_db\ndef test_recurs_is_fetched_as_none_from_the_database_if_saved_as_empty_string_previously():\n event = EventWithNoNulls.objects.create(recurs=Recurrence())\n assert event.recurs == Recurrence()", "id": "326558", "language": "Python", "matching_score": 3.2760937213897705, "max_stars_count": 0, "path": "tests/test_nulls.py" }, { "content": "from datetime import datetime\nfrom django.core.exceptions import ValidationError\nfrom recurrence import Recurrence, Rule\nfrom tests.models import EventWithNoNulls\nimport pytest\nimport recurrence\n\n\[email protected]_db\ndef test_recurrence_text_pattern_is_saved():\n event = EventWithNoNulls.objects.create(\n recurs=\"RRULE:FREQ=WEEKLY;BYDAY=TU\"\n )\n\n assert len(event.recurs.rrules) == 1\n assert event.recurs.rrules[0].to_text() == \"weekly, each Tuesday\"\n recurrence_info = event.recurs\n\n event = EventWithNoNulls.objects.get(pk=event.pk)\n assert recurrence_info == event.recurs\n\n\[email protected]_db\ndef test_recurrence_object_is_saved():\n rule = Rule(\n recurrence.WEEKLY\n )\n\n limits = Recurrence(\n dtstart=datetime(2014, 1, 1, 0, 0, 0),\n dtend=datetime(2014, 2, 3, 0, 0, 0),\n rrules=[rule]\n )\n\n event = EventWithNoNulls.objects.create(\n recurs=limits\n )\n\n instances = event.recurs.between(\n datetime(2010, 1, 1, 0, 0, 0),\n datetime(2020, 12, 31, 0, 0, 0)\n )\n\n assert instances == [\n datetime(2014, 1, 1, 0, 0),\n datetime(2014, 1, 8, 0, 0),\n datetime(2014, 1, 15, 0, 0),\n datetime(2014, 1, 22, 0, 0),\n datetime(2014, 1, 29, 0, 0),\n datetime(2014, 2, 3, 0, 0) # We always get dtend\n ]\n\n event = EventWithNoNulls.objects.get(pk=event.pk)\n\n assert event.recurs == limits\n\n assert event.recurs.between(\n datetime(2010, 1, 1, 0, 0, 0),\n datetime(2020, 12, 31, 0, 0, 0)\n ) == instances\n\n\[email protected]_db\[email protected]('value', [\n ' ', 'invalid', 'RRULE:', 'RRULE:FREQ=', 'RRULE:FREQ=invalid'\n])\ndef test_recurrence_text_pattern_invalid(value):\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=value\n )\n\n\[email protected]_db\ndef test_invalid_frequency_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule('fish')]\n )\n )\n\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(42)]\n )\n )\n\n\[email protected]_db\ndef test_invalid_interval_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, interval=0)]\n )\n )\n\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, interval='cat')]\n )\n )\n\n\[email protected]_db\ndef test_invalid_wkst_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, wkst=17)]\n )\n )\n\n\[email protected]_db\ndef test_invalid_until_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, until=17)]\n )\n )\n\n\[email protected]_db\ndef test_invalid_count_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, count='fish')]\n )\n )\n\n\[email protected]_db\ndef test_invalid_byday_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, byday='house')]\n )\n )\n\n\[email protected]_db\ndef test_invalid_bymonth_too_high_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, bymonth=[1, 32])]\n )\n )\n\n\[email protected]_db\ndef test_invalid_bymonth_toolow_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rrules=[Rule(recurrence.DAILY, bymonth=[0, ])]\n )\n )\n\n\[email protected]_db\ndef test_invalid_exclusion_interval_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n exrules=[Rule(recurrence.DAILY, interval=0)]\n )\n )\n\n\[email protected]_db\ndef test_invalid_date_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n rdates=[\"fish\"]\n )\n )\n\n\[email protected]_db\ndef test_invalid_exclusion_date_recurrence_object_raises():\n with pytest.raises(ValidationError):\n EventWithNoNulls.objects.create(\n recurs=Recurrence(\n exdates=[\"fish\"]\n )\n )\n", "id": "2447284", "language": "Python", "matching_score": 2.28820538520813, "max_stars_count": 251, "path": "tests/test_saving.py" }, { "content": "from django.core.exceptions import ValidationError\n\n\nclass RecurrenceError(ValidationError):\n pass\n\n\nclass SerializationError(RecurrenceError):\n pass\n\n\nclass DeserializationError(RecurrenceError):\n pass\n", "id": "6191242", "language": "Python", "matching_score": 0.13745880126953125, "max_stars_count": 251, "path": "recurrence/exceptions.py" }, { "content": "from dateutil.rrule import weekday\nimport pytest\nimport recurrence\n\n\ndef test_to_weekday_from_weekday():\n day = recurrence.Weekday(4)\n\n assert recurrence.to_weekday(day) == day\n\n\ndef test_to_weekday_from_dateutil_weekday():\n day = weekday(1)\n\n assert recurrence.to_weekday(day) == recurrence.Weekday(1)\n\n\ndef test_to_weekday_from_int():\n assert recurrence.to_weekday(1) == recurrence.Weekday(1)\n\n with pytest.raises(ValueError):\n recurrence.to_weekday(7)\n\n\ndef test_to_weekday_from_nonelike():\n with pytest.raises(ValueError):\n recurrence.to_weekday(None)\n\n with pytest.raises(ValueError):\n recurrence.to_weekday(\"\")\n\n\ndef test_to_weekday_from_string():\n assert recurrence.to_weekday(\"3\") == recurrence.Weekday(3)\n\n with pytest.raises(ValueError):\n recurrence.to_weekday(\"7\")\n\n assert recurrence.to_weekday(\"MO\") == recurrence.Weekday(0)\n assert recurrence.to_weekday(\"mo\") == recurrence.Weekday(0)\n assert recurrence.to_weekday(\"TU\") == recurrence.Weekday(1)\n assert recurrence.to_weekday(\"Tu\") == recurrence.Weekday(1)\n\n with pytest.raises(ValueError):\n recurrence.to_weekday(\"FOO\")\n\n assert recurrence.to_weekday(\"-2TU\") == recurrence.Weekday(1, -2)\n\n # We don't do any validation of the index\n assert recurrence.to_weekday(\"-7SU\") == recurrence.Weekday(6, -7)\n", "id": "8495897", "language": "Python", "matching_score": 2.0566952228546143, "max_stars_count": 251, "path": "tests/test_to_weekday.py" }, { "content": "import pytest\nfrom recurrence import Weekday\n\n\ndef test_init():\n assert repr(Weekday(3)) == 'TH'\n assert repr(Weekday(3, -2)) == '-2TH'\n assert repr(Weekday(3, 3)) == '3TH'\n\n with pytest.raises(ValueError):\n Weekday(8)\n\n with pytest.raises(ValueError):\n Weekday('fish')\n\n\ndef test_call():\n # I'm not sure why this functionality is useful, but this is what\n # calling a weekday currently does.\n\n day = Weekday(4, -3)\n assert day(2) == Weekday(4, 2)\n assert day(-3) is day\n assert day(None) == Weekday(4)\n", "id": "9455377", "language": "Python", "matching_score": 1.4237998723983765, "max_stars_count": 251, "path": "tests/test_weekday.py" } ]
1.546623
Jasig
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Build and Launch iPhone Application in Simulator or install\n# the application on the device via iTunes\n# \n\nimport os, sys, uuid, subprocess, shutil, signal, string, traceback, imp, filecmp, inspect\nimport platform, time, re, run, glob, codecs, hashlib, datetime, plistlib\nfrom compiler import Compiler\nfrom projector import Projector\nfrom xml.dom.minidom import parseString\nfrom xml.etree.ElementTree import ElementTree\nfrom os.path import join, splitext, split, exists\n\n# the template_dir is the path where this file lives on disk\ntemplate_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))\n\n# add the parent and the common directory so we can load libraries from those paths too\nsys.path.append(os.path.join(template_dir,'../'))\nsys.path.append(os.path.join(template_dir,'../common'))\nsys.path.append(os.path.join(template_dir, '../module'))\nscript_ok = False\n\nfrom tiapp import *\nfrom css import csscompiler\nimport localecompiler\nfrom module import ModuleDetector\nfrom tools import *\n\nignoreFiles = ['.gitignore', '.cvsignore']\nignoreDirs = ['.git','.svn', 'CVS']\n\n# need this so unicode works\nsys.stdout = codecs.getwriter('utf-8')(sys.stdout)\n\ndef version_sort(a,b):\n\tx = float(a[0:3]) # ignore more than 2 places\n\ty = float(b[0:3]) # ignore more than 2 places\n\tif x > y:\n\t\treturn -1\n\tif x < y:\n\t\treturn 1\n\treturn 0\n\n# this will return the version of the iOS SDK that we have installed\ndef check_iphone_sdk(s):\n\tfound = []\n\toutput = run.run([\"xcodebuild\",\"-showsdks\"],True,False)\n\t#print output\n\tif output:\n\t\tfor line in output.split(\"\\n\"):\n\t\t\tif line[0:1] == '\\t':\n\t\t\t\tline = line.strip()\n\t\t\t\ti = line.find('-sdk')\n\t\t\t\tif i < 0: continue\n\t\t\t\ttype = line[0:i]\n\t\t\t\tcmd = line[i+5:]\n\t\t\t\tif cmd.find(\"iphoneos\")==0:\n\t\t\t\t\tver = cmd[8:]\n\t\t\t\t\tfound.append(ver)\n\t# The sanity check doesn't have to be as thorough as prereq.\n\tif s in found:\n\t\treturn s\n\t# Sanity check failed. Let's find something close.\n\treturn sorted(found,version_sort)[0]\n\ndef dequote(s):\n\tif s[0:1] == '\"':\n\t\treturn s[1:-1]\n\treturn s\n\n# force kill the simulator if running\ndef kill_simulator():\n\trun.run(['/usr/bin/killall',\"iPhone Simulator\"],True)\n\ndef write_project_property(f,prop,val):\n\texisting_val = read_project_property(f,prop)\n\tif existing_val!=val:\n\t\tfx = open(f,'w')\n\t\tfx.write(\"%s=%s\\n\"%(prop,val))\n\t\tfx.close()\n\t\t\t\ndef read_project_property(f,prop):\n\tif os.path.exists(f):\n\t\tcontents = open(f).read()\n\t\tfor line in contents.splitlines(False):\n\t\t\t(k,v) = line.split(\"=\")\n\t\t\tif k == prop:\n\t\t\t\treturn v\n\treturn None\n\ndef read_project_appid(f):\n\treturn read_project_property(f,'TI_APPID')\n\t\ndef read_project_version(f):\n\treturn read_project_property(f,'TI_VERSION')\n\t\t\t\ndef infoplist_has_appid(f,appid):\n\tif os.path.exists(f):\n\t\tcontents = codecs.open(f,encoding='utf-8').read()\n\t\treturn contents.find(appid)>0\n\treturn False\n\t\t\ndef copy_module_resources(source, target, copy_all=False, force=False):\n\tif not os.path.exists(os.path.expanduser(target)):\n\t\tos.makedirs(os.path.expanduser(target))\n\tfor root, dirs, files in os.walk(source):\n\t\tfor name in ignoreDirs:\n\t\t\tif name in dirs:\n\t\t\t\tdirs.remove(name)\t# don't visit ignored directories\t\t\t \n\t\tfor file in files:\n\t\t\tif copy_all==False and splitext(file)[-1] in ('.html', '.js', '.css', '.a', '.m', '.c', '.cpp', '.h', '.mm'):\n\t\t\t\tcontinue\n\t\t\tif file in ignoreFiles:\n\t\t\t\tcontinue\n\t\t\tfrom_ = os.path.join(root, file)\t\t\t \n\t\t\tto_ = os.path.expanduser(from_.replace(source, target, 1))\n\t\t\tto_directory = os.path.expanduser(split(to_)[0])\n\t\t\tif not exists(to_directory):\n\t\t\t\tos.makedirs(to_directory)\n\t\t\t# only copy if different filesize or doesn't exist\n\t\t\tif not os.path.exists(to_) or os.path.getsize(from_)!=os.path.getsize(to_) or force:\n\t\t\t\tif os.path.exists(to_): os.remove(to_)\n\t\t\t\tshutil.copyfile(from_, to_)\n\n# WARNING: This could be a time bomb waiting to happen, because it mangles\n# the app bundle name for NO REASON. Or... does it?\ndef make_app_name(s):\n\tr = re.compile('[0-9a-zA-Z_]')\n\tbuf = ''\n\tfor i in s:\n\t\tif i=='-':\n\t\t\tbuf+='_'\n\t\t\tcontinue\n\t\tif r.match(i)!=None:\n\t\t\tbuf+=i\n\t# if name starts with number, we simply append a k to it\n\tif re.match('^[0-9]+',buf):\n\t\tbuf = 'k%s' % buf\n\treturn buf\n\ndef getText(nodelist):\n\trc = \"\"\n\tfor node in nodelist:\n\t\tif node.nodeType == node.TEXT_NODE:\n\t\t\trc+=node.data\n\t\telif node.nodeType == node.ELEMENT_NODE:\n\t\t\trc+=getText(node.childNodes)\n\treturn rc\n\ndef make_map(dict):\n\tprops = {}\n\tcurkey = None\n\n\tfor i in dict.childNodes:\n\t\tif i.nodeType == 1:\n\t\t\tif i.nodeName == 'key':\n\t\t\t\tcurkey = str(getText(i.childNodes)).strip()\n\t\t\telif i.nodeName == 'dict':\n\t\t\t\tprops[curkey] = make_map(i)\n\t\t\t\tcurkey = None\n\t\t\telif i.nodeName == 'array':\n\t\t\t\ts = i.getElementsByTagName('string')\n\t\t\t\tif len(s):\n\t\t\t\t\ttxt = ''\n\t\t\t\t\tfor t in s:\n\t\t\t\t\t\ttxt+=getText(t.childNodes)\n\t\t\t\t\tprops[curkey]=txt\n\t\t\t\telse:\n\t\t\t\t\tprops[curkey]=None\n\t\t\t\tcurkey = None\n\t\t\telse:\n\t\t\t\tif i.childNodes.length > 0:\n\t\t\t\t\tprops[curkey] = getText(i.childNodes)\n\t\t\t\telse:\n\t\t\t\t\tprops[curkey] = i.nodeName\n\t\t\t\tcurkey = None\n\n\treturn props\n\ndef dump_resources_listing(rootdir,out):\n\tout.write(\"\\nFile listing for %s\\n\\n\" % rootdir)\n\ttotal = 0\n\tfor root, subFolders, files in os.walk(rootdir):\n\t\tfor file in files:\n\t\t\tp = os.path.join(root,file)\n\t\t\ts = os.path.getsize(p)\n\t\t\ttotal+=s\n\t\t\ts = \"[%.0f]\" % s\n\t\t\tp = p[len(rootdir)+1:]\n\t\t\tif p.startswith('build/android'): continue\n\t\t\tout.write(\" %s %s\\n\" % (string.ljust(p,120),string.ljust(s,8)))\n\tout.write(\"-\" * 130)\n\tout.write(\"\\nTotal files: %.1f MB\\n\" % ((total/1024)/1024))\n\tout.write(\"\\n\")\n\ndef dump_infoplist(infoplist,out):\n\tplist = codecs.open(infoplist, encoding='utf-8').read()\n\tout.write(\"Contents of Info.plist\\n\\n\")\n\tout.write(plist)\n\tout.write(\"\\n\")\n\tout.write(\"=\" * 130)\n\tout.write(\"\\n\\n\")\n\t\t\ndef read_provisioning_profile(f,o):\n\tf = open(f,'rb').read()\n\tb = f.index('<?xml')\n\te = f.index('</plist>')\n\txml_content = f[b:e+8]\n\to.write(\"Reading provisioning profile:\\n\\n%s\\n\" % xml_content)\n\tdom = parseString(xml_content)\n\tdict = dom.getElementsByTagName('dict')[0]\n\tprops = make_map(dict)\n\treturn props\n\ndef get_aps_env(provisioning_profile):\n\tentitlements = provisioning_profile['Entitlements']\n\tif entitlements.has_key('aps-environment'):\n\t\treturn entitlements['aps-environment']\n\treturn None\n\t\ndef get_task_allow(provisioning_profile):\n\tentitlements = provisioning_profile['Entitlements']\n\treturn entitlements['get-task-allow']\n\t\ndef get_app_prefix(provisioning_profile):\n\tappid_prefix = provisioning_profile['ApplicationIdentifierPrefix']\n\treturn appid_prefix\n\t\ndef get_profile_uuid(provisioning_profile):\n\treturn provisioning_profile['UUID']\n\t\ndef generate_customized_entitlements(provisioning_profile,appid,uuid,command,out):\n\t\n\tget_task_value = get_task_allow(provisioning_profile)\n\taps_env = get_aps_env(provisioning_profile)\n\t\n\tbuffer = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?> \t\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n\t<dict>\n\"\"\"\t\t\n\t\n\tapp_prefix = None\n\t\n\tif command=='distribute':\n\t\tapp_prefix = get_app_prefix(provisioning_profile)\n\t\tout.write(\"Using app_prefix = %s\\n\\n\" % (app_prefix))\n\t\tbuffer+=\"\"\"\n\t\t<key>application-identifier</key>\n\t\t<string>%s.%s</string>\n\t\t\"\"\" % (app_prefix,appid)\n\t\n\tbuffer+=\"<key>get-task-allow</key>\\n\t\t<%s/>\" % get_task_value\n\t\n\tif aps_env!=None:\n\t\tbuffer+=\"\\n<key>aps-environment</key>\\n\t\t<string>%s</string>\" % aps_env\n\t\n\tif command=='distribute':\n\t\tbuffer+=\"\"\"\n\t\t<key>keychain-access-groups</key>\n\t\t<array>\n\t\t\t<string>%s.%s</string>\n\t\t</array>\n\t\t\"\"\" % (app_prefix,appid)\n\n\tbuffer+=\"\"\"\n\t</dict>\n</plist>\"\"\"\n\t\n\treturn buffer\n\ndef xcode_version():\n\toutput = run.run(['xcodebuild','-version'],True,False)\n\tif output:\n\t\tversionLine = output.split('\\n')[0]\n\t\treturn float(versionLine.split(' ')[1].rpartition('.')[0])\n\ndef distribute_xc3(uuid, provisioning_profile, name, log):\n\t# starting in 4.0, apple now requires submission through XCode\n\t# this code mimics what xcode does on its own to package the \n\t# application for the app uploader process\n\tlog.write(\"Creating distribution for xcode3...\\n\");\n\tarchive_uuid = str(uuid.uuid4()).upper()\n\tarchive_dir = os.path.join(os.path.expanduser(\"~/Library/MobileDevice/Archived Applications\"),archive_uuid)\n\tarchive_app_dir = os.path.join(archive_dir,\"%s.app\" % name)\n\tarchive_appdsym_dir = os.path.join(archive_dir,\"%s.app.dSYM\" % name)\n\tos.makedirs(archive_app_dir)\n\tos.makedirs(archive_appdsym_dir)\n\t\n\tos.system('ditto \"%s.app\" \"%s\"' % (name,archive_app_dir))\n\tos.system('ditto \"%s.app.dSYM\" \"%s\"' % (name,archive_appdsym_dir))\n\t\n\tarchive_plist = os.path.join(archive_dir,'ArchiveInfo.plist')\n\tlog.write(\"Writing archive plist to: %s\\n\\n\" % archive_plist)\n\t\n\tprofile_uuid = get_profile_uuid(provisioning_profile)\n\t\n\tos.system(\"/usr/bin/plutil -convert xml1 -o \\\"%s\\\" \\\"%s\\\"\" % (os.path.join(archive_dir,'Info.xml.plist'),os.path.join(archive_app_dir,'Info.plist')))\n\tp = plistlib.readPlist(os.path.join(archive_dir,'Info.xml.plist'))\n\tarchive_metadata = {\n\t\t'CFBundleIdentifier':p['CFBundleIdentifier'],\n\t\t'CFBundleVersion':p['CFBundleVersion'],\n\t\t'XCApplicationFilename':'%s.app' %name,\n\t\t'XCApplicationName':name,\n\t\t'XCArchivedDate': time.time() - 978307200.0,\n\t\t'XCArchiveUUID':archive_uuid,\n\t\t'XCInfoPlist' : p,\n\t\t'XCProfileUUID': profile_uuid\n\t}\n\tlog.write(\"%s\\n\\n\" % archive_metadata)\n\tplistlib.writePlist(archive_metadata,archive_plist)\n\tos.remove(os.path.join(archive_dir,'Info.xml.plist'))\t\n\ndef distribute_xc4(name, icon, log):\n\t# Locations of bundle, app binary, dsym info\n\tlog.write(\"Creating distribution for xcode4...\\n\");\t\n\ttimestamp = datetime.datetime.now()\n\tdate = timestamp.date().isoformat()\n\ttime = timestamp.time().strftime('%H-%M-%S')\n\tarchive_name = os.path.join(date,'%s_%s' % (name, time))\n\tarchive_bundle = os.path.join(os.path.expanduser(\"~/Library/Developer/Xcode/Archives\"),\"%s.xcarchive\" % archive_name)\n\tarchive_app = os.path.join(archive_bundle,\"Products\",\"Applications\",\"%s.app\" % name)\n\tarchive_dsym = os.path.join(archive_bundle,\"dSYM\")\n\t\n\t# create directories\n\tif not os.access(archive_bundle, os.F_OK): os.makedirs(archive_bundle)\n\tif not os.access(archive_app, os.F_OK): os.makedirs(archive_app)\n\tif not os.access(archive_dsym, os.F_OK): os.makedirs(archive_dsym)\n\n\t# copy app bundles into the approps. places\n\tos.system('ditto \"%s.app\" \"%s\"' % (name,archive_app))\n\tos.system('ditto \"%s.app.dSYM\" \"%s\"' % (name,archive_dsym))\n\t\n\t# plist processing time - this is the biggest difference from XC3.\n\tarchive_info_plist = os.path.join(archive_bundle,'Info.plist')\n\tlog.write(\"Writing archive plist to: %s\\n\\n\" % archive_info_plist)\n\t\n\t# load existing plist values so that we can use them in generating the archive\n\t# plist\n\tos.system('/usr/bin/plutil -convert xml1 -o \"%s\" \"%s\"' % (os.path.join(archive_bundle,'Info.xml.plist'),os.path.join(archive_app,'Info.plist')))\n\tproject_info_plist = plistlib.readPlist(os.path.join(archive_bundle,'Info.xml.plist'))\n\tappbundle = \"Applications/%s.app\" % name\n\t# NOTE: We chop off the end '.' of 'CFBundleVersion' to provide the 'short' version\n\tversion = project_info_plist['CFBundleVersion']\n\tapp_version_ = version.split('.')\n\tif(len(app_version_) > 3):\n\t\tversion = app_version_[0]+'.'+app_version_[1]+'.'+app_version_[2]\t\n\tarchive_info = {\n\t\t'ApplicationProperties' : {\n\t\t\t'ApplicationPath' : appbundle,\n\t\t\t'CFBundleIdentifier' : project_info_plist['CFBundleIdentifier'],\n\t\t\t'CFBundleShortVersionString' : version,\n\t\t\t'IconPaths' : [os.path.join(appbundle,icon), os.path.join(appbundle,icon)]\n\t\t},\n\t\t'ArchiveVersion' : float(1),\n\t\t'CreationDate' : datetime.datetime.utcnow(),\n\t\t'Name' : name,\n\t\t'SchemeName' : name\n\t}\n\t\n\t# write out the archive plist and clean up\n\tlog.write(\"%s\\n\\n\" % archive_info)\n\tplistlib.writePlist(archive_info,archive_info_plist)\n\tos.remove(os.path.join(archive_bundle,'Info.xml.plist'))\n\t\n\t# Workaround for dumb xcode4 bug that doesn't update the organizer unless\n\t# files are touched in a very specific manner\n\ttemp = os.path.join(os.path.expanduser(\"~/Library/Developer/Xcode/Archives\"),\"temp\")\n\tos.rename(archive_bundle,temp)\n\tos.rename(temp,archive_bundle)\n\ndef is_indexing_enabled(tiapp, simulator_dir, **kwargs):\n\t# darwin versions:\n\t# - 9.x: Leopard (10.5)\n\t# - 10.x: Snow Leopard (10.6)\n\t# - 11.x: Lion (10.7)\n\n\t# for testing purposes\n\tplatform_release = kwargs.get(\"platform_release\", platform.release())\n\tdarwin_version = [int(n) for n in platform_release.split(\".\")]\n\n\tenable_mdfind = True\n\tif tiapp.has_app_property('ti.ios.enablemdfind'):\n\t\tenable_mdfind = tiapp.to_bool(tiapp.get_app_property('ti.ios.enablemdfind'))\n\n\t# mdfind is specifically disabled, so don't use it\n\tif not enable_mdfind:\n\t\treturn False\n\n\t# pre-Leopard, mdfind / mdutil don't exist\n\tif darwin_version[0] < 10:\n\t\treturn False\n\n\t# for testing purposes\n\tindexer_status = kwargs.get(\"indexer_status\")\n\tif indexer_status == None:\n\t\tindexer_status = run.run(['mdutil', '-a', '-s'], True)\n\n\t# An error occurred running mdutil, play it safe\n\tif indexer_status == None:\n\t\treturn False\n\n\tlines = indexer_status.splitlines()\n\tmount_point_status = {}\n\tfor i in range(0, len(lines), 2):\n\t\tmount_point = lines[i].rstrip(':')\n\t\tstatus = lines[i+1].strip('\\t.')\n\t\t# Only add mount points that the simulator_dir starts with\n\t\tif simulator_dir.startswith(mount_point):\n\t\t\tmount_point_status[mount_point] = status\n\n\tif len(mount_point_status) > 0:\n\t\t# There may be multiple volumes that have a mount point that the\n\t\t# simulator_dir matches, so the one with the longest length\n\t\t# *should* be the most specific / correct mount point.\n\t\tmount_points = mount_point_status.keys()\n\t\tmount_points.sort(lambda a, b: cmp(len(b), len(a)))\n\t\tstatus = mount_point_status[mount_points[0]]\n\n\t\tif 'Indexing enabled' in status:\n\t\t\treturn True\n\n\treturn False\n\nHEADER = \"\"\"/**\n* Appcelerator Titanium Mobile\n* This is generated code. Do not modify. Your changes *will* be lost.\n* Generated code is Copyright (c) 2009-2011 by Appcelerator, Inc.\n* All Rights Reserved.\n*/\n#import <Foundation/Foundation.h>\n\"\"\"\n\nDEFAULTS_IMPL_HEADER= \"\"\"#import \"TiUtils.h\"\n#import \"ApplicationDefaults.h\"\n \n@implementation ApplicationDefaults\n \n+ (NSMutableDictionary*) copyDefaults\n{\n NSMutableDictionary * _property = [[NSMutableDictionary alloc] init];\\n\n\"\"\"\n\nFOOTER =\"\"\"\n@end\n\"\"\"\n\ndef copy_tiapp_properties(project_dir):\n\ttiapp = ElementTree()\n\tsrc_root = os.path.dirname(sys.argv[0])\n\tassets_tiappxml = os.path.join(project_dir,'tiapp.xml')\n\tif not os.path.exists(assets_tiappxml):\n\t\tshutil.copy(os.path.join(project_dir, 'tiapp.xml'), assets_tiappxml)\n\ttiapp.parse(open(assets_tiappxml, 'r'))\n\timpf = open(\"ApplicationDefaults.m\",'w+')\n\tappl_default = os.path.join(project_dir,'build','iphone','Classes','ApplicationDefaults.m')\n\timpf.write(HEADER)\n\timpf.write(DEFAULTS_IMPL_HEADER)\n\tfor property_el in tiapp.findall(\"property\"):\n\t\tname = property_el.get(\"name\")\n\t\ttype = property_el.get(\"type\")\n\t\tvalue = property_el.text\n\t\tif name == None: continue\n\t\tif value == None: value = \"\"\n\t\tif type == \"string\":\n\t\t\timpf.write(\"\"\" [_property setObject:[TiUtils stringValue:@\"%s\"] forKey:@\"%s\"];\\n\"\"\"%(value,name))\n\t\telif type == \"bool\":\n\t\t\timpf.write(\"\"\" [_property setObject:[NSNumber numberWithBool:[TiUtils boolValue:@\"%s\"]] forKey:@\"%s\"];\\n\"\"\"%(value,name))\n\t\telif type == \"int\":\n\t\t\timpf.write(\"\"\" [_property setObject:[NSNumber numberWithInt:[TiUtils intValue:@\"%s\"]] forKey:@\"%s\"];\\n\"\"\"%(value,name))\n\t\telif type == \"double\":\n\t\t\timpf.write(\"\"\" [_property setObject:[NSNumber numberWithDouble:[TiUtils doubleValue:@\"%s\"]] forKey:@\"%s\"];\\n\"\"\"%(value,name))\n\t\telif type == None:\n\t\t\timpf.write(\"\"\" [_property setObject:[TiUtils stringValue:@\"%s\"] forKey:@\"%s\"];\\n\"\"\"%(value,name))\n\t\telse:\n\t\t\tprint \"\"\"[WARN] Cannot set property \"%s\" , type \"%s\" not supported\"\"\" % (name,type)\n\tif (len(tiapp.findall(\"property\")) > 0) :\n\t\timpf.write(\"\\n return _property;\\n}\")\n\telse: \n\t\timpf.write(\"\\n return NULL;\\n}\")\n\timpf.write(FOOTER)\n\timpf.close()\n\tif open(appl_default,'r').read() == open('ApplicationDefaults.m','r').read():\n\t\tos.remove('ApplicationDefaults.m')\n\t\treturn False\n\telse:\n\t\tshutil.copyfile('ApplicationDefaults.m',appl_default)\n\t\tos.remove('ApplicationDefaults.m')\n\t\treturn True\n\t\n\ndef cleanup_app_logfiles(tiapp, log_id, iphone_version):\n\tprint \"[DEBUG] finding old log files\"\n\tsys.stdout.flush()\n\tsimulator_dir = os.path.expanduser('~/Library/Application Support/iPhone Simulator/%s' % iphone_version)\n\n\t# No need to clean if the directory doesn't exist\n\tif not os.path.exists(simulator_dir):\n\t\treturn\n\n\tresults = None\n\n\t# If the indexer is enabled, we can use spotlight for faster searching\n\tif is_indexing_enabled(tiapp, simulator_dir):\n\t\tprint \"[DEBUG] Searching for old log files with mdfind...\"\n\t\tsys.stdout.flush()\n\t\tresults = run.run(['mdfind',\n\t\t\t'-onlyin', simulator_dir,\n\t\t\t'-name', '%s.log' % log_id\n\t\t], True)\n\n\t# Indexer is disabled, revert to manual crawling\n\tif results == None:\n\t\tprint \"[DEBUG] Searching for log files without mdfind...\"\n\t\tsys.stdout.flush()\n\t\tdef find_all_log_files(folder, fname):\n\t\t\tresults = []\n\t\t\tfor root, dirs, files in os.walk(os.path.expanduser(folder)):\n\t\t\t\tfor file in files:\n\t\t\t\t\tif fname==file:\n\t\t\t\t\t\tfullpath = os.path.join(root, file)\n\t\t\t\t\t\tresults.append(fullpath)\n\t\t\treturn results\n\t\tfor f in find_all_log_files(simulator_dir, '%s.log' % log_id):\n\t\t\tprint \"[DEBUG] removing old log file: %s\" % f\n\t\t\tsys.stdout.flush()\n\t\t\tos.remove(f)\n\telse:\n\t\tfor i in results.splitlines(False):\n\t\t\tprint \"[DEBUG] removing old log file: %s\" % i\n\t\t\tos.remove(i)\n\n#\n# this script is invoked from our tooling but you can run from command line too if \n# you know the arguments\n#\n# the current pattern is <command> [arguments]\n#\n# where the arguments are dependent on the command being passed\n#\t\ndef main(args):\n\tglobal script_ok\n\targc = len(args)\n\tif argc < 2 or argc==2 and (args[1]=='--help' or args[1]=='-h'):\n\t\tprint \"%s <command> <version> <project_dir> <appid> <name> [options]\" % os.path.basename(args[0])\n\t\tprint\n\t\tprint \"available commands: \"\n\t\tprint\n\t\tprint \" install install the app to itunes for testing on iphone\"\n\t\tprint \" simulator build and run on the iphone simulator\"\n\t\tprint \" distribute build final distribution bundle\"\n\t\tprint \" xcode build from within xcode\"\n\t\tprint \" run build and run app from project folder\"\n\t\n\t\tsys.exit(1)\n\n\tprint \"[INFO] One moment, building ...\"\n\tsys.stdout.flush()\n\tstart_time = time.time()\n\tcommand = args[1].decode(\"utf-8\")\n\t\n\ttarget = 'Debug'\n\tdeploytype = 'development'\n\tdevicefamily = 'iphone'\n\tdebug = False\n\tbuild_only = False\n\tsimulator = False\n\txcode_build = False\n\tforce_xcode = False\n\tsimtype = devicefamily\n\n\t# when you run from xcode, we'll pass xcode as the command and the \n\t# xcode script will simply pass some additional args as well as xcode\n\t# will add some additional useful stuff to the ENVIRONMENT and we pull\n\t# those values out here\n\tif command == 'xcode':\n\t\txcode_build = True\n\t\tsrc_root = os.environ['SOURCE_ROOT']\n\t\tproject_dir = os.path.abspath(os.path.join(src_root,'../','../'))\n\t\tname = os.environ['PROJECT_NAME']\n\t\ttarget = os.environ['CONFIGURATION']\n\t\tappid = os.environ['TI_APPID']\n\t\tarch = os.environ['CURRENT_ARCH']\n\t\tsdk_name = os.environ['SDK_NAME']\n\t\tiphone_version = sdk_name.replace('iphoneos','').replace('iphonesimulator','')\n\t\t# SUPPORTED_DEVICE_FAMILIES 1 or 2 or both\n\t\t# TARGETED_DEVICE_FAMILY 1 or 2\n\t\ttarget_device = os.environ['TARGETED_DEVICE_FAMILY']\n\t\tif target_device == '1':\n\t\t\tdevicefamily = 'iphone'\n\t\telif target_device == '2':\n\t\t\tdevicefamily = 'ipad'\n\t\telif target_device == '1,2':\n\t\t\tdevicefamily = 'universal'\n\t\tif arch == 'i386': \n\t\t\t# simulator always indicates simulator\n\t\t\tdeploytype = 'development'\n\t\telse:\n\t\t\t# if arch!=i386 indicates a build for device\n\t\t\tif target=='Debug':\n\t\t\t\t# non-simulator + debug build indicates test on device\n\t\t\t\tdeploytype = 'test'\n\t\t\telse:\n\t\t\t\t# non-simulator + release build indicates package for distribution\n\t\t\t\tdeploytype = 'production'\n\t\t#Ensure the localization files are copied in the application directory\n\t\tout_dir = os.path.join(os.environ['TARGET_BUILD_DIR'],os.environ['CONTENTS_FOLDER_PATH'])\n\t\tlocalecompiler.LocaleCompiler(name,project_dir,devicefamily,deploytype,out_dir).compile()\n\t\tcompiler = Compiler(project_dir,appid,name,deploytype,xcode_build,devicefamily,iphone_version)\n\t\tscript_ok = True\n\t\tsys.exit(0)\n\telse:\n\t\t# the run command is when you run from titanium using the run command\n\t\t# and it will run the project in the current directory immediately in the simulator\n\t\t# from the command line\n\t\tif command == 'run':\n\t\t\tif argc < 3:\n\t\t\t\tprint \"Usage: %s run <project_dir> [ios_version]\" % os.path.basename(args[0])\n\t\t\t\tsys.exit(1)\n\t\t\tif argc == 3:\n\t\t\t\tiphone_version = check_iphone_sdk('4.0')\n\t\t\telse:\n\t\t\t\tiphone_version = dequote(args[3].decode(\"utf-8\"))\n\t\t\tproject_dir = os.path.expanduser(dequote(args[2].decode(\"utf-8\")))\n\t\t\tiphonesim = os.path.abspath(os.path.join(template_dir,'ios-sim'))\n\t\t\tiphone_dir = os.path.abspath(os.path.join(project_dir,'build','iphone'))\n\t\t\ttiapp_xml = os.path.join(project_dir,'tiapp.xml')\n\t\t\tti = TiAppXML(tiapp_xml)\n\t\t\tappid = ti.properties['id']\n\t\t\tname = ti.properties['name']\n\t\t\tcommand = 'simulator' # switch it so that the rest of the stuff works\n\t\telse:\n\t\t\tiphone_version = dequote(args[2].decode(\"utf-8\"))\n\t\t\tiphonesim = os.path.abspath(os.path.join(template_dir,'ios-sim'))\n\t\t\tproject_dir = os.path.expanduser(dequote(args[3].decode(\"utf-8\")))\n\t\t\tappid = dequote(args[4].decode(\"utf-8\"))\n\t\t\tname = dequote(args[5].decode(\"utf-8\"))\n\t\t\ttiapp_xml = os.path.join(project_dir,'tiapp.xml')\n\t\t\tti = TiAppXML(tiapp_xml)\n\t\t\t\n\t\tapp_name = make_app_name(name)\n\t\tiphone_dir = os.path.abspath(os.path.join(project_dir,'build','iphone'))\n\t\tproject_xcconfig = os.path.join(iphone_dir,'project.xcconfig')\n\t\ttarget = 'Release'\n\t\tostype = 'os'\n\t\tversion_file = None\n\t\tlog_id = None\n\t\tprovisioning_profile = None\n\t\tdebughost = None\n\t\tdebugport = None\n\t\tpostbuild_modules = []\n\t\t\n\t\t# starting in 1.4, you don't need to actually keep the build/iphone directory\n\t\t# if we don't find it, we'll just simply re-generate it\n\t\tif not os.path.exists(iphone_dir):\n\t\t\tfrom iphone import IPhone\n\t\t\tprint \"[INFO] Detected missing project but that's OK. re-creating it...\"\n\t\t\tiphone_creator = IPhone(name,appid)\n\t\t\tiphone_creator.create(iphone_dir,True)\n\t\t\tsys.stdout.flush()\n\t\t\t\n\t\t# we use different arguments dependent on the command\n\t\t# pluck those out here\n\t\tif command == 'distribute':\n\t\t\tiphone_version = check_iphone_sdk(iphone_version)\n\t\t\tlink_version = iphone_version\n\t\t\tappuuid = dequote(args[6].decode(\"utf-8\"))\n\t\t\tdist_name = dequote(args[7].decode(\"utf-8\"))\n\t\t\toutput_dir = os.path.expanduser(dequote(args[8].decode(\"utf-8\")))\n\t\t\tif argc > 9:\n\t\t\t\tdevicefamily = dequote(args[9].decode(\"utf-8\"))\n\t\t\tprint \"[INFO] Switching to production mode for distribution\"\n\t\t\tdeploytype = 'production'\n\t\telif command in ['simulator', 'build']:\n\t\t\tlink_version = check_iphone_sdk(iphone_version)\n\t\t\tdeploytype = 'development'\n\t\t\tdebug = True\n\t\t\tsimulator = command == 'simulator'\n\t\t\tbuild_only = command == 'build'\n\t\t\ttarget = 'Debug'\n\t\t\tostype = 'simulator'\n\t\t\tif argc > 6:\n\t\t\t\tdevicefamily = dequote(args[6].decode(\"utf-8\"))\n\t\t\tif argc > 7:\n\t\t\t\tsimtype = dequote(args[7].decode(\"utf-8\"))\n\t\t\telse:\n\t\t\t\t# 'universal' helpfully translates into iPhone here... just in case.\n\t\t\t\tsimtype = devicefamily\n\t\t\tif argc > 8:\n\t\t\t\t# this is host:port from the debugger\n\t\t\t\tdebughost = dequote(args[8].decode(\"utf-8\"))\n\t\t\t\tif debughost=='':\n\t\t\t\t\tdebughost = None\n\t\t\t\t\tdebugport = None\n\t\t\t\telse:\n\t\t\t\t\tdebughost,debugport = debughost.split(\":\")\n\t\telif command == 'install':\n\t\t\tiphone_version = check_iphone_sdk(iphone_version)\n\t\t\tlink_version = iphone_version\n\t\t\tappuuid = dequote(args[6].decode(\"utf-8\"))\n\t\t\tdist_name = dequote(args[7].decode(\"utf-8\"))\n\t\t\tif argc > 8:\n\t\t\t\tdevicefamily = dequote(args[8].decode(\"utf-8\"))\n\t\t\tif argc > 9:\n\t\t\t\t# this is host:port from the debugger\n\t\t\t\tdebughost = dequote(args[9].decode(\"utf-8\"))\n\t\t\t\tif debughost=='':\n\t\t\t\t\tdebughost=None\n\t\t\t\t\tdebugport=None\n\t\t\t\telse:\n\t\t\t\t\tdebughost,debugport = debughost.split(\":\")\n\t\t\ttarget = 'Debug'\n\t\t\tdeploytype = 'test'\n\t\t\n\t\t# setup up the useful directories we need in the script\n\t\tbuild_out_dir = os.path.abspath(os.path.join(iphone_dir,'build'))\n\t\tbuild_dir = os.path.abspath(os.path.join(build_out_dir,'%s-iphone%s'%(target,ostype)))\n\t\tapp_dir = os.path.abspath(os.path.join(build_dir,name+'.app'))\n\t\tbinary = os.path.join(app_dir,name)\n\t\tsdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))\n\t\tiphone_resources_dir = os.path.join(iphone_dir,'Resources')\n\t\tversion_file = os.path.join(iphone_resources_dir,'.version')\n\t\tforce_rebuild = read_project_version(project_xcconfig)!=sdk_version or not os.path.exists(version_file)\n\t\tinfoplist = os.path.join(iphone_dir,'Info.plist')\n\t\tgithash = None\n\t\tcustom_fonts = []\n\n\t\t# if we're not running in the simulator we want to clean out the build directory\n\t\tif command!='simulator' and os.path.exists(build_out_dir):\n\t\t\tshutil.rmtree(build_out_dir)\n\t\tif not os.path.exists(build_out_dir): \n\t\t\tos.makedirs(build_out_dir)\n\t\t# write out the build log, useful for debugging\n\t\to = codecs.open(os.path.join(build_out_dir,'build.log'),'w',encoding='utf-8')\n\t\tdef log(msg):\n\t\t\tprint msg\n\t\t\to.write(msg)\n\t\ttry:\n\t\t\tbuildtime = datetime.datetime.now()\n\t\t\to.write(\"%s\\n\" % (\"=\"*80))\n\t\t\to.write(\"Appcelerator Titanium Diagnostics Build Log\\n\")\n\t\t\to.write(\"The contents of this file are useful to send to Appcelerator Support if\\n\")\n\t\t\to.write(\"reporting an issue to help us understand your environment, build settings\\n\")\n\t\t\to.write(\"and aid in debugging. Please attach this log to any issue that you report.\\n\")\n\t\t\to.write(\"%s\\n\\n\" % (\"=\"*80))\n\t\t\to.write(\"Starting build at %s\\n\\n\" % buildtime.strftime(\"%m/%d/%y %H:%M\"))\n\t\t\t\n\t\t\t# write out the build versions info\n\t\t\tversions_txt = read_config(os.path.join(template_dir,'..','version.txt'))\n\t\t\to.write(\"Build details:\\n\\n\")\n\t\t\tfor key in versions_txt:\n\t\t\t\to.write(\" %s=%s\\n\" % (key,versions_txt[key]))\n\t\t\to.write(\"\\n\\n\")\n\t\t\t\n\t\t\tif versions_txt.has_key('githash'): \n\t\t\t\tgithash = versions_txt['githash']\n\t\t\t\t\n\t\t\to.write(\"Script arguments:\\n\")\n\t\t\tfor arg in args:\n\t\t\t\to.write(unicode(\" %s\\n\" % arg, 'utf-8'))\n\t\t\to.write(\"\\n\")\n\t\t\to.write(\"Building from: %s\\n\" % template_dir)\n\t\t\to.write(\"Platform: %s\\n\\n\" % platform.version())\n\n\t\t\t# print out path to debug\n\t\t\txcode_path=run.run([\"/usr/bin/xcode-select\",\"-print-path\"],True,False)\n\t\t\tif xcode_path:\n\t\t\t\to.write(\"Xcode path is: %s\\n\" % xcode_path)\n\t\t\telse:\n\t\t\t\to.write(\"Xcode path undetermined\\n\")\n\n\t\t\t# find the module directory relative to the root of the SDK\t\n\t\t\ttitanium_dir = os.path.abspath(os.path.join(template_dir,'..','..','..','..'))\n\t\t\ttp_module_dir = os.path.abspath(os.path.join(titanium_dir,'modules','iphone'))\n\t\t\tforce_destroy_build = command!='simulator'\n\n\t\t\tdetector = ModuleDetector(project_dir)\n\t\t\tmissing_modules, modules = detector.find_app_modules(ti, 'iphone')\n\t\t\tmodule_lib_search_path, module_asset_dirs = locate_modules(modules, project_dir, app_dir, log)\n\n\t\t\t# search for modules that the project is using\n\t\t\t# and make sure we add them to the compile\n\t\t\tfor module in modules:\n\t\t\t\tmodule_id = module.manifest.moduleid.lower()\n\t\t\t\tmodule_version = module.manifest.version\n\t\t\t\tmodule_lib_name = ('lib%s.a' % module_id).lower()\n\t\t\t\t# check first in the local project\n\t\t\t\tlocal_module_lib = os.path.join(project_dir, 'modules', 'iphone', module_lib_name)\n\t\t\t\tlocal = False\n\t\t\t\tif os.path.exists(local_module_lib):\n\t\t\t\t\tmodule_lib_search_path.append([module_lib_name, local_module_lib])\n\t\t\t\t\tlocal = True\n\t\t\t\t\tlog(\"[INFO] Detected third-party module: %s\" % (local_module_lib))\n\t\t\t\telse:\n\t\t\t\t\tif module.lib is None:\n\t\t\t\t\t\tmodule_lib_path = module.get_resource(module_lib_name)\n\t\t\t\t\t\tlog(\"[ERROR] Third-party module: %s/%s missing library at %s\" % (module_id, module_version, module_lib_path))\n\t\t\t\t\t\tsys.exit(1)\n\t\t\t\t\tmodule_lib_search_path.append([module_lib_name, os.path.abspath(module.lib).rsplit('/',1)[0]])\n\t\t\t\t\tlog(\"[INFO] Detected third-party module: %s/%s\" % (module_id, module_version))\n\t\t\t\tforce_xcode = True\n\n\t\t\t\tif not local:\n\t\t\t\t\t# copy module resources\n\t\t\t\t\timg_dir = module.get_resource('assets', 'images')\n\t\t\t\t\tif os.path.exists(img_dir):\n\t\t\t\t\t\tdest_img_dir = os.path.join(app_dir, 'modules', module_id, 'images')\n\t\t\t\t\t\tif not os.path.exists(dest_img_dir):\n\t\t\t\t\t\t\tos.makedirs(dest_img_dir)\n\t\t\t\t\t\tmodule_asset_dirs.append([img_dir, dest_img_dir])\n\n\t\t\t\t\t# copy in any module assets\n\t\t\t\t\tmodule_assets_dir = module.get_resource('assets')\n\t\t\t\t\tif os.path.exists(module_assets_dir): \n\t\t\t\t\t\tmodule_dir = os.path.join(app_dir, 'modules', module_id)\n\t\t\t\t\t\tmodule_asset_dirs.append([module_assets_dir, module_dir])\n\n\t\t\tfull_version = sdk_version\n\t\t\tif 'version' in versions_txt:\n\t\t\t\tfull_version = versions_txt['version']\n\t\t\t\tif 'timestamp' in versions_txt or 'githash' in versions_txt:\n\t\t\t\t\tfull_version += ' ('\n\t\t\t\t\tif 'timestamp' in versions_txt:\n\t\t\t\t\t\tfull_version += '%s' % versions_txt['timestamp']\n\t\t\t\t\tif 'githash' in versions_txt:\n\t\t\t\t\t\tfull_version += ' %s' % versions_txt['githash']\n\t\t\t\t\tfull_version += ')'\n\n\t\t\tprint \"[INFO] Titanium SDK version: %s\" % full_version\n\t\t\tprint \"[INFO] iPhone Device family: %s\" % devicefamily\n\t\t\tprint \"[INFO] iPhone SDK version: %s\" % iphone_version\n\t\t\t\n\t\t\tif simulator or build_only:\n\t\t\t\tprint \"[INFO] iPhone simulated device: %s\" % simtype\n\t\t\t\t# during simulator we need to copy in standard built-in module files\n\t\t\t\t# since we might not run the compiler on subsequent launches\n\t\t\t\tfor module_name in ('facebook','ui'):\n\t\t\t\t\timg_dir = os.path.join(template_dir,'modules',module_name,'images')\n\t\t\t\t\tdest_img_dir = os.path.join(app_dir,'modules',module_name,'images')\n\t\t\t\t\tif not os.path.exists(dest_img_dir):\n\t\t\t\t\t\tos.makedirs(dest_img_dir)\n\t\t\t\t\tmodule_asset_dirs.append([img_dir,dest_img_dir])\n\n\t\t\t\t# when in simulator since we point to the resources directory, we need\n\t\t\t\t# to explicitly copy over any files\n\t\t\t\tird = os.path.join(project_dir,'Resources','iphone')\n\t\t\t\tif os.path.exists(ird): \n\t\t\t\t\tmodule_asset_dirs.append([ird,app_dir])\n\t\t\t\t\t\n\t\t\t\t# We also need to copy over the contents of 'platform/iphone'\n\t\t\t\tplatform_iphone = os.path.join(project_dir,'platform','iphone')\n\t\t\t\tif os.path.exists(platform_iphone):\n\t\t\t\t\tmodule_asset_dirs.append([platform_iphone,app_dir])\n\t\t\t\t\n\t\t\t\tfor ext in ('ttf','otf'):\n\t\t\t\t\tfor f in glob.glob('%s/*.%s' % (os.path.join(project_dir,'Resources'),ext)):\n\t\t\t\t\t\tcustom_fonts.append(f)\n\t\t\t\t\t\n\n\t\t\tif not (simulator or build_only):\n\t\t\t\tversion = ti.properties['version']\n\t\t\t\t# we want to make sure in debug mode the version always changes\n\t\t\t\tversion = \"%s.%d\" % (version,time.time())\n\t\t\t\tif (deploytype != 'production'):\n\t\t\t\t\tti.properties['version']=version\n\t\t\t\tpp = os.path.expanduser(\"~/Library/MobileDevice/Provisioning Profiles/%s.mobileprovision\" % appuuid)\n\t\t\t\tprovisioning_profile = read_provisioning_profile(pp,o)\n\t\n\t\t\tcreate_info_plist(ti, template_dir, project_dir, infoplist)\n\n\t\t\tapplogo = None\n\t\t\tclean_build = False\n\n\t\t\t# check to see if the appid is different (or not specified) - we need to re-generate\n\t\t\tif read_project_appid(project_xcconfig)!=appid or not infoplist_has_appid(infoplist,appid):\n\t\t\t\tclean_build = True\n\t\t\t\tforce_xcode = True\n\n\n\t\t\tnew_lib_hash = None\n\t\t\tlib_hash = None\t\n\t\t\texisting_git_hash = None\n\n\t\t\t# this code simply tries and detect if we're building a different\n\t\t\t# version of the project (or same version but built from different git hash)\n\t\t\t# and if so, make sure we force rebuild so to propograte any code changes in\n\t\t\t# source code (either upgrade or downgrade)\n\t\t\tif os.path.exists(app_dir):\n\t\t\t\tif os.path.exists(version_file):\n\t\t\t\t\tline = open(version_file).read().strip()\n\t\t\t\t\tlines = line.split(\",\")\n\t\t\t\t\tv = lines[0]\n\t\t\t\t\tlog_id = lines[1]\n\t\t\t\t\tif len(lines) > 2:\n\t\t\t\t\t\tlib_hash = lines[2]\n\t\t\t\t\t\texisting_git_hash = lines[3]\n\t\t\t\t\tif lib_hash==None:\n\t\t\t\t\t\tforce_rebuild = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tif template_dir==v and force_rebuild==False:\n\t\t\t\t\t\t\tforce_rebuild = False\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlog_id = None\n\t\t\t\telse:\n\t\t\t\t\tforce_rebuild = True\n\n\t\t\telse:\n\t\t\t\tforce_rebuild = True\n\n\t\t\to.write(\"\\ngithash=%s, existing_git_hash=%s\\n\" %(githash,existing_git_hash))\n\t\t\t\t\n\t\t\tif githash!=existing_git_hash:\n\t\t\t\tforce_rebuild = True\n\n\t\t\t# we want to read the md5 of the libTiCore.a library since it must match\n\t\t\t# the current one we're building and if not, we need to force a rebuild since\n\t\t\t# that means we've copied in a different version of the library and we need\n\t\t\t# to rebuild clean to avoid linking errors\n\t\t\tsource_lib=os.path.join(template_dir,'libTiCore.a')\n\t\t\tfd = open(source_lib,'rb')\n\t\t\tm = hashlib.md5()\n\t\t\tm.update(fd.read(1024)) # just read 1K, it's binary\n\t\t\tnew_lib_hash = m.hexdigest()\n\t\t\tfd.close()\n\t\t\t\n\t\t\tif new_lib_hash!=lib_hash:\n\t\t\t\tforce_rebuild=True\n\t\t\t\to.write(\"forcing rebuild since libhash (%s) not matching (%s)\\n\" % (lib_hash,new_lib_hash))\n\n\t\t\tlib_hash=new_lib_hash\n\n\t\t\t# when we force rebuild, we need to re-compile and re-copy source, libs etc\n\t\t\tif force_rebuild:\n\t\t\t\to.write(\"Performing full rebuild\\n\")\n\t\t\t\tprint \"[INFO] Performing full rebuild. This will take a little bit. Hold tight...\"\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tproject = Projector(name,sdk_version,template_dir,project_dir,appid)\n\t\t\t\tproject.create(template_dir,iphone_dir)\t\n\t\t\t\tforce_xcode = True\n\t\t\t\tif os.path.exists(app_dir): shutil.rmtree(app_dir)\n\t\t\t\t# we have to re-copy if we have a custom version\n\t\t\t\tcreate_info_plist(ti, template_dir, project_dir, infoplist)\n\t\t\t\t# since compiler will generate the module dependencies, we need to \n\t\t\t\t# attempt to compile to get it correct for the first time.\n\t\t\t\tcompiler = Compiler(project_dir,appid,name,deploytype,xcode_build,devicefamily,iphone_version,True)\n\t\t\telse:\n\t\t\t\tcontents=\"TI_VERSION=%s\\n\"% sdk_version\n\t\t\t\tcontents+=\"TI_SDK_DIR=%s\\n\" % template_dir.replace(sdk_version,'$(TI_VERSION)')\n\t\t\t\tcontents+=\"TI_APPID=%s\\n\" % appid\n\t\t\t\tcontents+=\"OTHER_LDFLAGS[sdk=iphoneos*]=$(inherited) -weak_framework iAd\\n\"\n\t\t\t\tcontents+=\"OTHER_LDFLAGS[sdk=iphonesimulator*]=$(inherited) -weak_framework iAd\\n\"\n\t\t\t\tcontents+=\"#include \\\"module\\\"\\n\"\n\t\t\t\txcconfig = open(project_xcconfig,'w+')\n\t\t\t\txccontents = xcconfig.read()\n\t\t\t\tif contents!=xccontents:\n\t\t\t\t\to.write(\"writing contents of %s:\\n\\n%s\\n\" % (project_xcconfig,contents))\n\t\t\t\t\to.write(\"old contents\\n\\n%s\\n\" % (xccontents))\n\t\t\t\t\txcconfig.write(contents)\n\t\t\t\t\txcconfig.close()\n\t\t\t\telse:\n\t\t\t\t\to.write(\"Skipping writing contents of xcconfig %s\\n\" % project_xcconfig)\n\n\t\t\t# write out any modules into the xcode project\n\t\t\t# this must be done after project create above or this will be overriden\n\t\t\tlink_modules(module_lib_search_path, name, iphone_dir)\n\n\t\t\tcwd = os.getcwd()\n\n\t\t\t# check to see if the symlink exists and that it points to the\n\t\t\t# right version of the library\n\t\t\tlibticore = os.path.join(template_dir,'libTiCore.a')\n\t\t\tmake_link = True\n\t\t\tsymlink = os.path.join(iphone_dir,'lib','libTiCore.a')\n\t\t\tif os.path.islink(symlink):\n\t\t\t\tpath = os.path.realpath(symlink)\n\t\t\t\tif path.find(sdk_version) > 0:\n\t\t\t\t\tmake_link = False\n\t\t\tif make_link:\n\t\t\t\tlibdir = os.path.join(iphone_dir,'lib')\n\t\t\t\tif not os.path.exists(libdir): os.makedirs(libdir)\n\t\t\t\tos.chdir(libdir)\n\t\t\t\t# a broken link will not return true on os.path.exists\n\t\t\t\t# so we need to use brute force\n\t\t\t\ttry:\n\t\t\t\t\tos.unlink(\"libTiCore.a\")\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tos.symlink(libticore,\"libTiCore.a\")\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t\tos.chdir(cwd)\n\n\t\t\t# if the lib doesn't exist, force a rebuild since it's a new build\n\t\t\tif not os.path.exists(os.path.join(iphone_dir,'lib','libtiverify.a')):\n\t\t\t\tshutil.copy(os.path.join(template_dir,'libtiverify.a'),os.path.join(iphone_dir,'lib','libtiverify.a'))\n\n\t\t\tif not os.path.exists(os.path.join(iphone_dir,'lib','libti_ios_debugger.a')):\n\t\t\t\tshutil.copy(os.path.join(template_dir,'libti_ios_debugger.a'),os.path.join(iphone_dir,'lib','libti_ios_debugger.a'))\n\n\t\t\t# compile JSS files\n\t\t\tcssc = csscompiler.CSSCompiler(os.path.join(project_dir,'Resources'),devicefamily,appid)\n\t\t\tapp_stylesheet = os.path.join(iphone_dir,'Resources','stylesheet.plist')\n\t\t\tasf = codecs.open(app_stylesheet,'w','utf-8')\n\t\t\tasf.write(cssc.code)\n\t\t\tasf.close()\n\n\t\t\t# compile debugger file\n\t\t\tdebug_plist = os.path.join(iphone_dir,'Resources','debugger.plist')\n\t\t\t\n\t\t\t# Force an xcodebuild if the debugger.plist has changed\n\t\t\tforce_xcode = write_debugger_plist(debughost, debugport, template_dir, debug_plist)\n\n\t\t\tif command not in ['simulator', 'build']:\n\t\t\t\t# compile plist into binary format so it's faster to load\n\t\t\t\t# we can be slow on simulator\n\t\t\t\tos.system(\"/usr/bin/plutil -convert binary1 \\\"%s\\\"\" % app_stylesheet)\n\t\t\t\n\t\t\to.write(\"Generated the following stylecode code:\\n\\n\")\n\t\t\to.write(cssc.code)\n\t\t\to.write(\"\\n\")\n\n\t\t\t# generate the Info.plist file with the appropriate device family\n\t\t\tif devicefamily!=None:\n\t\t\t\tapplogo = ti.generate_infoplist(infoplist,appid,devicefamily,project_dir,iphone_version)\n\t\t\telse:\n\t\t\t\tapplogo = ti.generate_infoplist(infoplist,appid,'iphone',project_dir,iphone_version)\n\t\t\t\t\n\t\t\t# attempt to load any compiler plugins\n\t\t\tif len(ti.properties['plugins']) > 0:\n\t\t\t\tlocal_compiler_dir = os.path.abspath(os.path.join(project_dir,'plugins'))\n\t\t\t\ttp_compiler_dir = os.path.abspath(os.path.join(titanium_dir,'plugins'))\n\t\t\t\tif not os.path.exists(tp_compiler_dir) and not os.path.exists(local_compiler_dir):\n\t\t\t\t\to.write(\"+ Missing plugins directory at %s\\n\" % tp_compiler_dir)\n\t\t\t\t\tprint \"[ERROR] Build Failed (Missing plugins directory). Please see output for more details\"\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tsys.exit(1)\n\t\t\t\tcompiler_config = {\n\t\t\t\t\t'platform':'ios',\n\t\t\t\t\t'devicefamily':devicefamily,\n\t\t\t\t\t'simtype':simtype,\n\t\t\t\t\t'tiapp':ti,\n\t\t\t\t\t'project_dir':project_dir,\n\t\t\t\t\t'titanium_dir':titanium_dir,\n\t\t\t\t\t'appid':appid,\n\t\t\t\t\t'iphone_version':iphone_version,\n\t\t\t\t\t'template_dir':template_dir,\n\t\t\t\t\t'project_name':name,\n\t\t\t\t\t'command':command,\n\t\t\t\t\t'deploytype':deploytype,\n\t\t\t\t\t'build_dir':build_dir,\n\t\t\t\t\t'app_name':app_name,\n\t\t\t\t\t'app_dir':app_dir,\n\t\t\t\t\t'iphone_dir':iphone_dir\n\t\t\t\t}\n\t\t\t\tfor plugin in ti.properties['plugins']:\n\t\t\t\t\tlocal_plugin_file = os.path.join(local_compiler_dir,plugin['name'],'plugin.py')\n\t\t\t\t\tplugin_file = os.path.join(tp_compiler_dir,plugin['name'],plugin['version'],'plugin.py')\n\t\t\t\t\tif not os.path.exists(local_plugin_file) and not os.path.exists(plugin_file):\n\t\t\t\t\t\to.write(\"+ Missing plugin at %s (checked %s also)\\n\" % (plugin_file,local_plugin_file))\n\t\t\t\t\t\tprint \"[ERROR] Build Failed (Missing plugin for %s). Please see output for more details\" % plugin['name']\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tsys.exit(1)\n\t\t\t\t\to.write(\"+ Detected plugin: %s/%s\\n\" % (plugin['name'],plugin['version']))\n\t\t\t\t\tprint \"[INFO] Detected compiler plugin: %s/%s\" % (plugin['name'],plugin['version'])\n\t\t\t\t\tcode_path = plugin_file\n\t\t\t\t\tif os.path.exists(local_plugin_file):\t\n\t\t\t\t\t\tcode_path = local_plugin_file\n\t\t\t\t\to.write(\"+ Loading compiler plugin at %s\\n\" % code_path)\n\t\t\t\t\tcompiler_config['plugin']=plugin\n\t\t\t\t\tfin = open(code_path, 'rb')\n\t\t\t\t\tm = hashlib.md5()\n\t\t\t\t\tm.update(open(code_path,'rb').read()) \n\t\t\t\t\tcode_hash = m.hexdigest()\n\t\t\t\t\tp = imp.load_source(code_hash, code_path, fin)\n\t\t\t\t\tmodule_functions = dict(inspect.getmembers(p, inspect.isfunction))\n\t\t\t\t\tif module_functions.has_key('postbuild'):\n\t\t\t\t\t\tprint \"[DBEUG] Plugin has postbuild\"\n\t\t\t\t\t\to.write(\"+ Plugin has postbuild\")\n\t\t\t\t\t\tpostbuild_modules.append((plugin['name'], p))\n\t\t\t\t\tp.compile(compiler_config)\n\t\t\t\t\tfin.close()\n\t\t\t\t\t\n\t\t\ttry:\t\t\n\t\t\t\tos.chdir(iphone_dir)\n\n\t\t\t\t# we always target backwards to 4.0 even when we use a later\n\t\t\t\t# version iOS SDK. this ensures our code will run on old devices\n\t\t\t\t# no matter which SDK we compile with\n\t\t\t\tdeploy_target = \"IPHONEOS_DEPLOYMENT_TARGET=4.0\"\n\t\t\t\tdevice_target = 'TARGETED_DEVICE_FAMILY=1' # this is non-sensical, but you can't pass empty string\n\n\t\t\t\t# clean means we need to nuke the build \n\t\t\t\tif clean_build or force_destroy_build: \n\t\t\t\t\tprint \"[INFO] Performing clean build\"\n\t\t\t\t\to.write(\"Performing clean build...\\n\")\n\t\t\t\t\tif os.path.exists(app_dir):\n\t\t\t\t\t\tshutil.rmtree(app_dir)\n\n\t\t\t\tif not os.path.exists(app_dir): os.makedirs(app_dir)\n\n\t\t\t\t# compile localization files\n\t\t\t\t# Using app_name here will cause the locale to be put in the WRONG bundle!!\n\t\t\t\tlocalecompiler.LocaleCompiler(name,project_dir,devicefamily,deploytype).compile()\n\t\t\t\t\n\t\t\t\t# copy any module resources\n\t\t\t\tif len(module_asset_dirs)>0:\n\t\t\t\t\tfor e in module_asset_dirs:\n\t\t\t\t\t\tcopy_module_resources(e[0],e[1],True)\n\t\t\t\t\n\t\t\t\t# copy any custom fonts in (only runs in simulator)\n\t\t\t\t# since we need to make them live in the bundle in simulator\n\t\t\t\tif len(custom_fonts)>0:\n\t\t\t\t\tfor f in custom_fonts:\n\t\t\t\t\t\tprint \"[INFO] Detected custom font: %s\" % os.path.basename(f)\n\t\t\t\t\t\tshutil.copy(f,app_dir)\n\n\t\t\t\t# dump out project file info\n\t\t\t\tif command not in ['simulator', 'build']:\n\t\t\t\t\tdump_resources_listing(project_dir,o)\n\t\t\t\t\tdump_infoplist(infoplist,o)\n\n\t\t\t\tinstall_logo(ti, applogo, project_dir, template_dir, app_dir)\n\t\t\t\tinstall_defaults(project_dir, template_dir, iphone_resources_dir)\n\n\t\t\t\textra_args = None\n\n\t\t\t\trecompile = copy_tiapp_properties(project_dir)\n\t\t\t\t# if the anything changed in the application defaults then we have to force a xcode build.\n\t\t\t\tif recompile == True:\n\t\t\t\t\tforce_xcode = recompile\n\n\t\t\t\tif devicefamily!=None:\n\t\t\t\t\t# Meet the minimum requirements for ipad when necessary\n\t\t\t\t\tif devicefamily == 'ipad' or devicefamily == 'universal':\n\t\t\t\t\t\tdevice_target=\"TARGETED_DEVICE_FAMILY=2\"\n\t\t\t\t\t\t# NOTE: this is very important to run on device -- i dunno why\n\t\t\t\t\t\t# xcode warns that 3.2 needs only armv7, but if we don't pass in \n\t\t\t\t\t\t# armv6 we get crashes on device\n\t\t\t\t\t\textra_args = [\"VALID_ARCHS=armv6 armv7 i386\"]\n\t\t\t\t\t# Additionally, if we're universal, change the device family target\n\t\t\t\t\tif devicefamily == 'universal':\n\t\t\t\t\t\tdevice_target=\"TARGETED_DEVICE_FAMILY=1,2\"\n\n\t\t\t\tkroll_coverage = \"\"\n\t\t\t\tif ti.has_app_property(\"ti.ios.enablecoverage\"):\n\t\t\t\t\tenable_coverage = ti.to_bool(ti.get_app_property(\"ti.ios.enablecoverage\"))\n\t\t\t\t\tif enable_coverage:\n\t\t\t\t\t\tkroll_coverage = \"KROLL_COVERAGE=1\"\n\n\t\t\t\tdef execute_xcode(sdk,extras,print_output=True):\n\n\t\t\t\t\tconfig = name\n\t\t\t\t\tif devicefamily=='ipad':\n\t\t\t\t\t\tconfig = \"%s-iPad\" % config\n\t\t\t\t\tif devicefamily=='universal':\n\t\t\t\t\t\tconfig = \"%s-universal\" % config\n\n\t\t\t\t\t# these are the arguments for running a command line xcode build\n\t\t\t\t\targs = [\"xcodebuild\",\"-target\",config,\"-configuration\",target,\"-sdk\",sdk]\n\t\t\t\t\tif extras!=None and len(extras)>0: \n\t\t\t\t\t\targs += extras\n\t\t\t\t\targs += [deploy_target,device_target]\n\t\t\t\t\tif extra_args!=None and len(extra_args)>0:\n\t\t\t\t\t\targs += extra_args\n\n\t\t\t\t\to.write(\"Starting Xcode compile with the following arguments:\\n\\n\")\n\t\t\t\t\tfor arg in args: o.write(\" %s\\n\" % arg)\n\t\t\t\t\to.write(\"\\napp_id = %s\\n\" % appid)\n\t\t\t\t\to.write(\"\\n\\n\")\n\t\t\t\t\to.flush()\n\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint \"[DEBUG] compile checkpoint: %0.2f seconds\" % (time.time()-start_time)\n\t\t\t\t\t\tprint \"[INFO] Executing XCode build...\"\n\t\t\t\t\t\tprint \"[BEGIN_VERBOSE] Executing XCode Compiler <span>[toggle output]</span>\"\n\n\t\t\t\t\toutput = run.run(args,False,False,o)\n\n\t\t\t\t\tif print_output:\n\t\t\t\t\t\tprint output\n\t\t\t\t\t\tprint \"[END_VERBOSE]\"\n\t\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\t\t# Output already written by run.run\n\t\t\t\t\t#o.write(output)\n\n\t\t\t\t\t# check to make sure the user doesn't have a custom build location \n\t\t\t\t\t# configured in Xcode which currently causes issues with titanium\n\t\t\t\t\tidx = output.find(\"TARGET_BUILD_DIR \")\n\t\t\t\t\tif idx > 0:\n\t\t\t\t\t\tendidx = output.find(\"\\n\",idx)\n\t\t\t\t\t\tif endidx > 0:\n\t\t\t\t\t\t\ttarget_build_dir = dequote(output[idx+17:endidx].strip())\n\t\t\t\t\t\t\tif target_build_dir!=build_dir:\n\t\t\t\t\t\t\t\to.write(\"+ TARGET_BUILD_DIR = %s\\n\" % target_build_dir)\n\t\t\t\t\t\t\t\tprint \"[ERROR] Your TARGET_BUILD_DIR is incorrectly set. Most likely you have configured in Xcode a customized build location. Titanium does not currently support this configuration.\"\n\t\t\t\t\t\t\t\tprint \"[ERROR] Expected dir %s, was: %s\" % (build_dir,target_build_dir)\n\t\t\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\t\t\tsys.exit(1)\n\n\t\t\t\t\t# look for build error\n\t\t\t\t\tif output.find(\"** BUILD FAILED **\")!=-1 or output.find(\"ld returned 1\")!=-1 or output.find(\"The following build commands failed:\")!=-1:\n\t\t\t\t\t\to.write(\"+ Detected build failure\\n\")\n\t\t\t\t\t\tprint \"[ERROR] Build Failed. Please see output for more details\"\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tsys.exit(1)\n\n\t\t\t\t\to.write(\"+ Looking for application binary at %s\\n\" % binary)\n\n\t\t\t\t\t# make sure binary exists\n\t\t\t\t\tif not os.path.exists(binary):\n\t\t\t\t\t\to.write(\"+ Missing application binary at %s\\n\" % binary)\n\t\t\t\t\t\tprint \"[ERROR] Build Failed (Missing app at %s). Please see output for more details\" % binary\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tsys.exit(1)\n\n\t\t\t\t\t# look for a code signing error\n\t\t\t\t\terror = re.findall(r'Code Sign error:(.*)',output)\n\t\t\t\t\tif len(error) > 0:\n\t\t\t\t\t\to.write(\"+ Detected code sign error: %s\\n\" % error[0])\n\t\t\t\t\t\tprint \"[ERROR] Code sign error: %s\" % error[0].strip()\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tsys.exit(1)\n\t\t\t\t\t\n\t\t\t\tdef run_postbuild():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif postbuild_modules:\n\t\t\t\t\t\t\tfor p in postbuild_modules:\n\t\t\t\t\t\t\t\to.write(\"Running postbuild %s\" % p[0])\n\t\t\t\t\t\t\t\tprint \"[INFO] Running postbuild %s...\" % p[0]\n\t\t\t\t\t\t\t\tp[1].postbuild()\n\t\t\t\t\texcept Exception,e:\n\t\t\t\t\t\to.write(\"Error in post-build: %s\" % e)\n\t\t\t\t\t\tprint \"[ERROR] Error in post-build: %s\" % e\n\t\t\t\t\t\t\n\n\t\t\t\t# build the final release distribution\n\t\t\t\targs = []\n\n\t\t\t\tif command not in ['simulator', 'build']:\n\t\t\t\t\t# allow the project to have its own custom entitlements\n\t\t\t\t\tcustom_entitlements = os.path.join(project_dir,\"Entitlements.plist\")\n\t\t\t\t\tentitlements_contents = None\n\t\t\t\t\tif os.path.exists(custom_entitlements):\n\t\t\t\t\t\tentitlements_contents = open(custom_entitlements).read()\n\t\t\t\t\t\to.write(\"Found custom entitlements: %s\\n\" % custom_entitlements)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# attempt to customize it by reading prov profile\n\t\t\t\t\t\tentitlements_contents = generate_customized_entitlements(provisioning_profile,appid,appuuid,command,o)\n\t\t\t\t\to.write(\"Generated the following entitlements:\\n\\n%s\\n\\n\" % entitlements_contents)\n\t\t\t\t\tf=open(os.path.join(iphone_resources_dir,'Entitlements.plist'),'w+')\n\t\t\t\t\tf.write(entitlements_contents)\n\t\t\t\t\tf.close()\n\t\t\t\t\targs+=[\"CODE_SIGN_ENTITLEMENTS=Resources/Entitlements.plist\"]\n\n\t\t\t\t# only build if force rebuild (different version) or \n\t\t\t\t# the app hasn't yet been built initially\n\t\t\t\tif ti.properties['guid']!=log_id or force_xcode:\n\t\t\t\t\tlog_id = ti.properties['guid']\n\t\t\t\t\tf = open(version_file,'w+')\n\t\t\t\t\tf.write(\"%s,%s,%s,%s\" % (template_dir,log_id,lib_hash,githash))\n\t\t\t\t\tf.close()\n\n\t\t\t\t# both simulator and build require an xcodebuild\n\t\t\t\tif command in ['simulator', 'build']:\n\t\t\t\t\tdebugstr = ''\n\t\t\t\t\tif debughost:\n\t\t\t\t\t\tdebugstr = 'DEBUGGER_ENABLED=1'\n\t\t\t\t\t\n\t\t\t\t\tif force_rebuild or force_xcode or not os.path.exists(binary):\n\t\t\t\t\t\texecute_xcode(\"iphonesimulator%s\" % link_version,[\"GCC_PREPROCESSOR_DEFINITIONS=__LOG__ID__=%s DEPLOYTYPE=development TI_DEVELOPMENT=1 DEBUG=1 TI_VERSION=%s %s %s\" % (log_id,sdk_version,debugstr,kroll_coverage)],False)\n\t\t\t\t\t\t\n\t\t\t\t\trun_postbuild()\n\t\t\t\t\t\n\t\t\t\t\to.write(\"Finishing build\\n\")\n\n\t\t\t\tif command == 'simulator':\n\t\t\t\t\t# first make sure it's not running\n\t\t\t\t\tkill_simulator()\n\n\t\t\t\t\t# sometimes the simulator doesn't remove old log files\n\t\t\t\t\t# in which case we get our logging jacked - we need to remove\n\t\t\t\t\t# them before running the simulator\n\n\t\t\t\t\tcleanup_app_logfiles(ti, log_id, iphone_version)\n\n\t\t\t\t\tsim = None\n\n\t\t\t\t\t# this handler will simply catch when the simulator exits\n\t\t\t\t\t# so we can exit this script\n\t\t\t\t\tdef handler(signum, frame):\n\t\t\t\t\t\tglobal script_ok\n\t\t\t\t\t\tprint \"[INFO] Simulator is exiting\"\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\tif not log == None:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tos.system(\"kill -2 %s\" % str(log.pid))\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tpass\n\t\t\t\t\t\tif not sim == None and signum!=3:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tos.system(\"kill -3 %s\" % str(sim.pid))\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\tkill_simulator()\n\t\t\t\t\t\tscript_ok = True\n\t\t\t\t\t\tsys.exit(0)\n\n\t\t\t\t\t# make sure we're going to stop this script whenever \n\t\t\t\t\t# the simulator exits\n\t\t\t\t\tsignal.signal(signal.SIGHUP, handler)\n\t\t\t\t\tsignal.signal(signal.SIGINT, handler)\n\t\t\t\t\tsignal.signal(signal.SIGQUIT, handler)\n\t\t\t\t\tsignal.signal(signal.SIGABRT, handler)\n\t\t\t\t\tsignal.signal(signal.SIGTERM, handler)\n\n\t\t\t\t\tprint \"[INFO] Launching application in Simulator\"\n\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tsys.stderr.flush()\n\n\t\t\t\t\t# set the DYLD_FRAMEWORK_PATH environment variable for the following Popen iphonesim command\n\t\t\t\t\t# this allows the XCode developer folder to be arbitrarily named\n\t\t\t\t\txcodeselectpath = os.popen(\"/usr/bin/xcode-select -print-path\").readline().rstrip('\\n')\n\t\t\t\t\tiphoneprivateframeworkspath = xcodeselectpath + '/Platforms/iPhoneSimulator.platform/Developer/Library/PrivateFrameworks'\n\t\t\t\t\tos.putenv('DYLD_FRAMEWORK_PATH', iphoneprivateframeworkspath)\n\n\t\t\t\t\t# launch the simulator\n\t\t\t\t\t\n\t\t\t\t\t# Awkard arg handling; we need to take 'retina' to be a device type,\n\t\t\t\t\t# even though it's really not (it's a combination of device type and configuration).\n\t\t\t\t\t# So we translate it into two args:\n\t\t\t\t\tif simtype == 'retina':\n\t\t\t\t\t\t# Manually overrule retina type if we're an ipad\n\t\t\t\t\t\tif devicefamily == 'ipad':\n\t\t\t\t\t\t\tsimtype = 'ipad'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsimtype = 'iphone --retina'\n\t\t\t\t\tif devicefamily==None:\n\t\t\t\t\t\tsim = subprocess.Popen(\"\\\"%s\\\" launch \\\"%s\\\" --sdk %s\" % (iphonesim,app_dir,iphone_version),shell=True,cwd=template_dir)\n\t\t\t\t\telse:\n\t\t\t\t\t\tsim = subprocess.Popen(\"\\\"%s\\\" launch \\\"%s\\\" --sdk %s --family %s\" % (iphonesim,app_dir,iphone_version,simtype),shell=True,cwd=template_dir)\n\n\t\t\t\t\t# activate the simulator window - we use a OSA script to \n\t\t\t\t\t# cause the simulator window to come into the foreground (otherwise\n\t\t\t\t\t# it will be behind Titanium Developer window)\n\t\t\t\t\tass = os.path.join(template_dir,'iphone_sim_activate.scpt')\n\t\t\t\t\tcmd = \"osascript \\\"%s\\\" 2>/dev/null\" % ass\n\t\t\t\t\tos.system(cmd)\n\n\t\t\t\t\tend_time = time.time()-start_time\n\n\t\t\t\t\tprint \"[INFO] Launched application in Simulator (%0.2f seconds)\" % end_time\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tsys.stderr.flush()\n\n\t\t\t\t\t# give the simulator a bit to get started and up and running before \n\t\t\t\t\t# starting the logger\n\t\t\t\t\ttime.sleep(2)\n\n\t\t\t\t\tlogger = os.path.realpath(os.path.join(template_dir,'logger.py'))\n\n\t\t\t\t\t# start the logger tail process. this will simply read the output\n\t\t\t\t\t# from the logs and stream them back to Titanium Developer on the console\n\t\t\t\t\tlog = subprocess.Popen([\n\t\t\t\t\t \tlogger,\n\t\t\t\t\t\tstr(log_id)+'.log',\n\t\t\t\t\t\tiphone_version\n\t\t\t\t\t])\t\n\n\t\t\t\t\t# wait (blocking this script) until the simulator exits\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.waitpid(sim.pid,0)\n\t\t\t\t\texcept SystemExit:\n\t\t\t\t\t\t# If the user terminates the app here, it's via a\n\t\t\t\t\t\t# soft kill of some kind (i.e. like what TiDev does)\n\t\t\t\t\t\t# and so we should suppress the usual error message.\n\t\t\t\t\t\t# Fixes #2086\n\t\t\t\t\t\tpass\n\n\t\t\t\t\tprint \"[INFO] Application has exited from Simulator\"\n\n\t\t\t\t\t# in this case, the user has exited the simulator itself\n\t\t\t\t\t# and not clicked Stop Emulator from within Developer so we kill\n\t\t\t\t\t# our tail log process but let simulator keep running\n\t\t\t\t\tif not log == None:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tos.system(\"kill -2 %s\" % str(log.pid))\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\tscript_ok = True\n\t\t\t\t\t\n\t\t\t\t###########################################################################\t\n\t\t\t\t# END OF SIMULATOR COMMAND\t\n\t\t\t\t###########################################################################\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t#\n\t\t\t\t# this command is run for installing an app on device\n\t\t\t\t#\n\t\t\t\telif command == 'install':\n\n\t\t\t\t\tdebugstr = ''\n\t\t\t\t\tif debughost:\n\t\t\t\t\t\tdebugstr = 'DEBUGGER_ENABLED=1'\n\t\t\t\t\t\t\n\t\t\t\t\targs += [\n\t\t\t\t\t\t\"GCC_PREPROCESSOR_DEFINITIONS=DEPLOYTYPE=test TI_TEST=1 %s %s\" % (debugstr, kroll_coverage),\n\t\t\t\t\t\t\"PROVISIONING_PROFILE=%s\" % appuuid,\n\t\t\t\t\t\t\"CODE_SIGN_IDENTITY=iPhone Developer: %s\" % dist_name,\n\t\t\t\t\t\t\"DEPLOYMENT_POSTPROCESSING=YES\"\n\t\t\t\t\t]\n\t\t\t\t\texecute_xcode(\"iphoneos%s\" % iphone_version,args,False)\n\n\t\t\t\t\tprint \"[INFO] Installing application in iTunes ... one moment\"\n\t\t\t\t\tsys.stdout.flush()\n\n\t\t\t\t\tif os.path.exists(\"/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/PackageApplication\"):\n\t\t\t\t\t\to.write(\"+ Preparing to run /Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/PackageApplication\\n\")\n\t\t\t\t\t\toutput = run.run([\"/Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/PackageApplication\",app_dir],True)\n\t\t\t\t\t\to.write(\"+ Finished running /Developer/Platforms/iPhoneOS.platform/Developer/usr/bin/PackageApplication\\n\")\n\t\t\t\t\t\tif output: o.write(output)\n\n\t\t\t\t\t# for install, launch itunes with the app\n\t\t\t\t\tipa = os.path.join(os.path.dirname(app_dir),\"%s.ipa\" % name)\n\t\t\t\t\to.write(\"+ IPA file should be at %s\\n\" % ipa);\n\n\t\t\t\t\t# it appears that sometimes this command above fails on certain installs\n\t\t\t\t\t# or is missing. let's just open if we have it otherwise, open the app \n\t\t\t\t\t# directory\n\t\t\t\t\tif not os.path.exists(ipa):\n\t\t\t\t\t\t# just open the app dir itself\n\t\t\t\t\t\to.write(\"+ IPA didn't exist at %s\\n\" % ipa)\n\t\t\t\t\t\to.write(\"+ Will try and open %s\\n\" % app_dir)\n\t\t\t\t\t\tipa = app_dir\n\n\t\t\t\t\t# to force iTunes to install our app, we simply open the IPA\n\t\t\t\t\t# file in itunes\n\t\t\t\t\tcmd = \"open -b com.apple.itunes \\\"%s\\\"\" % ipa\n\t\t\t\t\to.write(\"+ Executing the command: %s\\n\" % cmd)\n\t\t\t\t\tos.system(cmd)\n\t\t\t\t\to.write(\"+ After executing the command: %s\\n\" % cmd)\n\n\t\t\t\t\t# now run our applescript to tell itunes to sync to get\n\t\t\t\t\t# the application on the phone\n\t\t\t\t\tass = os.path.join(template_dir,'itunes_sync.scpt')\n\t\t\t\t\tcmd = \"osascript \\\"%s\\\"\" % ass\n\t\t\t\t\to.write(\"+ Executing the command: %s\\n\" % cmd)\n\t\t\t\t\tos.system(cmd)\n\t\t\t\t\to.write(\"+ After executing the command: %s\\n\" % cmd)\n\n\t\t\t\t\tprint \"[INFO] iTunes sync initiated\"\n\n\t\t\t\t\to.write(\"Finishing build\\n\")\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tscript_ok = True\n\t\t\t\t\t\n\t\t\t\t\trun_postbuild()\n\t\t\t\t\t\n\t\t\t\t###########################################################################\t\n\t\t\t\t# END OF INSTALL COMMAND\t\n\t\t\t\t###########################################################################\t\n\n\t\t\t\t#\n\t\t\t\t# this command is run for packaging an app for distribution\n\t\t\t\t#\n\t\t\t\telif command == 'distribute':\n\n\t\t\t\t\tdeploytype = \"production\"\n\n\t\t\t\t\targs += [\n\t\t\t\t\t\t\"GCC_PREPROCESSOR_DEFINITIONS=DEPLOYTYPE=%s TI_PRODUCTION=1\" % deploytype,\n\t\t\t\t\t\t\"PROVISIONING_PROFILE=%s\" % appuuid,\n\t\t\t\t\t\t\"CODE_SIGN_IDENTITY=iPhone Distribution: %s\" % dist_name,\n\t\t\t\t\t\t\"DEPLOYMENT_POSTPROCESSING=YES\"\n\t\t\t\t\t]\n\t\t\t\t\texecute_xcode(\"iphoneos%s\" % iphone_version,args,False)\n\n\t\t\t\t\t# switch to app_bundle for zip\n\t\t\t\t\tos.chdir(build_dir)\n\t\t\t\t\tif xcode_version() >= 4.0:\n\t\t\t\t\t\tdistribute_xc4(name, applogo, o)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdistribute_xc3(uuid, provisioning_profile, name, o)\n\n\t\t\t\t\t# open xcode + organizer after packaging\n\t\t\t\t\t# Have to force the right xcode open...\n\t\t\t\t\txc_path = os.path.join(run.run(['xcode-select','-print-path'],True,False).rstrip(),'Applications','Xcode.app')\n\t\t\t\t\to.write(\"Launching xcode: %s\\n\" % xc_path)\n\t\t\t\t\tos.system('open -a %s' % xc_path)\n\t\t\t\t\t\n\t\t\t\t\tass = os.path.join(template_dir,'xcode_organizer.scpt')\n\t\t\t\t\tcmd = \"osascript \\\"%s\\\"\" % ass\n\t\t\t\t\tos.system(cmd)\n\t\t\t\t\t\n\t\t\t\t\to.write(\"Finishing build\\n\")\n\t\t\t\t\tscript_ok = True\n\t\t\t\t\t\n\t\t\t\t\trun_postbuild()\n\n\t\t\t\t###########################################################################\t\n\t\t\t\t# END OF DISTRIBUTE COMMAND\t\n\t\t\t\t###########################################################################\t\n\n\t\t\tfinally:\n\t\t\t\tos.chdir(cwd)\n\t\texcept:\n\t\t\tprint \"[ERROR] Error: %s\" % traceback.format_exc()\n\t\t\tif not script_ok:\n\t\t\t\to.write(\"\\nException detected in script:\\n\")\n\t\t\t\ttraceback.print_exc(file=o)\n\t\t\t\to.close()\n\t\t\t\tsys.exit(1)\n\t\t\telse:\n\t\t\t\to.close()\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n\tsys.exit(0)\n", "id": "9650101", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "support/iphone/builder.py" }, { "content": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n#\r\n# Project Compiler\r\n#\r\n\r\nimport os, sys, re, shutil, time, base64, sgmllib, codecs, xml, datetime\r\n\r\n# Add the Android support dir, since mako is located there, and import mako\r\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),\"..\", \"android\")))\r\nimport mako.template\r\nimport simplejson as json\r\n\r\ntemplate_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))\r\nfrom tiapp import *\r\nimport jspacker \r\n\r\nignoreFiles = ['.gitignore', '.cvsignore', '.DS_Store'];\r\nignoreDirs = ['.git','.svn','_svn','CVS','android','iphone'];\r\n\r\nyear = datetime.datetime.now().year\r\n\r\nHTML_HEADER = \"\"\"<!--\r\n\tWARNING: this is generated code and will be lost if changes are made.\r\n\tThis generated source code is Copyright (c) 2010-%d by Appcelerator, Inc. All Rights Reserved.\r\n\t-->\"\"\" % year\r\n\r\nHEADER = \"\"\"/**\r\n * WARNING: this is generated code and will be lost if changes are made.\r\n * This generated source code is Copyright (c) 2010-%d by Appcelerator, Inc. All Rights Reserved.\r\n */\r\n\"\"\" % year\r\n\r\nFOOTER = \"\"\"\"\"\"\r\n\r\nclass Compiler(object):\r\n\tdef __init__(self,project_dir,deploytype):\r\n\t\tself.project_dir = project_dir\r\n\r\n#\t\tself.modules = []\r\n\r\n\t\tself.defines = [\r\n\t\t\t\t# these MUST be ordered correctly!\r\n\t\t\t\t'eventdriven.js',\r\n\t\t\t\t\r\n\t\t\t\t# base classes\r\n\t\t\t\t'Ti/_/Evented.js',\r\n\t\t\t\t'Ti/_/UI/Element.js',\r\n\t\t\t\t'Ti/_/Layouts/Base.js',\r\n\t\t\t\t'Ti/_/Layouts/Absolute.js',\r\n\t\t\t\t'Ti/_/Layouts/Horizontal.js',\r\n\t\t\t\t'Ti/_/Layouts/Vertical.js',\r\n\t\t\t\t'Ti/_/Layouts.js',\r\n\t\t\t\t\r\n\t\t\t\t# core classes\r\n\t\t\t\t'Ti/ti.js',\r\n\t\t\t\t'Ti/Accelerometer.js',\r\n\t\t\t\t'Ti/Analytics.js',\r\n\t\t\t\t'Ti/API.js',\r\n\t\t\t\t'Ti/App.js',\r\n\t\t\t\t'Ti/App/Properties.js',\r\n\t\t\t\t'Ti/Blob.js',\r\n\t\t\t\t'Ti/Contacts.js',\r\n\t\t\t\t'Ti/Database.js',\r\n\t\t\t\t'Ti/Facebook.js',\r\n\t\t\t\t'Ti/Filesystem.js',\r\n\t\t\t\t'Ti/Geolocation.js',\r\n\t\t\t\t'Ti/Locale.js',\r\n\t\t\t\t'Ti/Map.js',\r\n\t\t\t\t'Ti/Media.js',\r\n\t\t\t\t'Ti/Network.js',\r\n\t\t\t\t'Ti/Network/HTTPClient.js',\r\n\t\t\t\t'Ti/Platform.js',\r\n\t\t\t\t'Ti/Platform/DisplayCaps.js',\r\n\t\t\t\t'Ti/UI.js',\r\n\t\t\t\t'Ti/Gesture.js',\r\n\t\t\t\t'Ti/XML.js',\r\n\t\t\t\t\r\n\t\t\t\t# View classes\r\n\t\t\t\t'Ti/UI/View.js',\r\n\t\t\t\t'Ti/Media/VideoPlayer.js',\r\n\t\t\t\t'Ti/UI/TableViewRow.js',\r\n\t\t\t\t\r\n\t\t\t\t# SuperView classes\r\n\t\t\t\t'Ti/_/UI/SuperView.js',\r\n\t\t\t\t'Ti/UI/Tab.js',\r\n\t\t\t\t'Ti/UI/TabGroup.js',\r\n\t\t\t\t'Ti/UI/Window.js',\r\n\t\t\t\t\r\n\t\t\t\t# Widget classes\r\n\t\t\t\t'Ti/_/UI/Widget.js',\r\n\t\t\t\t'Ti/_/UI/FontWidget.js',\r\n\t\t\t\t'Ti/UI/2DMatrix.js',\r\n\t\t\t\t'Ti/UI/ActivityIndicator.js',\r\n\t\t\t\t'Ti/UI/AlertDialog.js',\r\n\t\t\t\t'Ti/UI/Button.js',\r\n\t\t\t\t'Ti/UI/ImageView.js',\r\n\t\t\t\t'Ti/UI/Label.js',\r\n\t\t\t\t'Ti/UI/ScrollableView.js',\r\n\t\t\t\t'Ti/UI/ScrollView.js',\r\n\t\t\t\t'Ti/UI/Slider.js',\r\n\t\t\t\t'Ti/UI/Switch.js',\r\n\t\t\t\t'Ti/UI/TableView.js',\r\n\t\t\t\t'Ti/UI/TableViewSection.js',\r\n\t\t\t\t'Ti/UI/TextArea.js',\r\n\t\t\t\t'Ti/UI/TextField.js',\r\n\t\t\t\t'Ti/UI/WebView.js',\r\n\t\t\t\t'Ti/Utils.js',\r\n\t\t\t\t\r\n\t\t\t\t# resources\r\n\t\t\t\t'titanium.css',\r\n\r\n\t\t\t\t# everything below will eventually go away :)\r\n\t\t\t\t#'screen.js',\r\n\t\t\t\t#'interactable.js',\r\n\t\t\t\t#'clickable.js',\r\n\t\t\t\t#'styleable.js',\r\n\t\t\t\t#'touchable.js',\r\n\t\t\t\t#'positionable.js',\r\n\t\t\t\t#'domview.js',\r\n\t\t\t\t#'Ti.App/properties.js',\r\n\t\t\t\t#'Ti.Locale/locale.js',\r\n\t\t\t]\r\n\t\t\r\n#\t\tself.css_defines = []\r\n#\t\tself.ti_includes = {}\r\n#\t\tself.api_map = {}\r\n\t\t\r\n\t\tself.build_dir = os.path.join(self.project_dir,'build','mobileweb')\r\n\t\t\r\n\t\tself.resources_dir = os.path.join(self.project_dir,'Resources')\r\n\t\tself.debug = True # temporarily forcing debug (i.e. development) mode until jsmin is replaced\r\n\t\tself.count = 0\r\n\t\t\r\n\t\tif deploytype == 'development' or deploytype == 'all':\r\n\t\t\tself.debug = True\r\n\r\n\t\tsrc_dir = os.path.join(template_dir,'src')\r\n\r\n\t\tif os.path.exists(self.build_dir):\r\n\t\t\tshutil.rmtree(self.build_dir, True)\r\n\r\n\t\ttry:\r\n\t\t\tos.makedirs(self.build_dir)\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\t\r\n\t\t# load up our API map\r\n#\t\tmap_props = open(os.path.join(src_dir,'map.prop')).read()\r\n#\t\tfor line in map_props.split(\"\\n\"):\r\n#\t\t\tif line[0:1] == '#' or line[0:1]=='': continue\r\n#\t\t\tkey,value = line.split(\"=\")\r\n#\t\t\tself.api_map[key.strip()]=value.strip().split()\r\n\r\n\t\ttiapp_xml = os.path.join(project_dir,'tiapp.xml')\r\n\t\tti = TiAppXML(tiapp_xml)\r\n\t\tsdk_version = os.path.basename(os.path.abspath(os.path.join(template_dir,'../')))\r\n\r\n\t\tself.project_name = ti.properties['name']\r\n\t\tself.appid = ti.properties['id']\r\n\r\n# temporarily already being forced, will need to re-enable\r\n#\t\tif ti.properties['analytics']:\r\n#\t\t\tself.defines.append(\"Ti.Platform/platform.js\")\r\n\r\n#\t\tdef compile_js(from_,to_):\r\n#\t\t\ttry:\r\n#\t\t\t\tjs = Compiler.make_function_from_file(from_,self)\r\n#\t\t\t\to = codecs.open(to_,'w',encoding='utf-8')\r\n#\t\t\t\to.write(js)\r\n#\t\t\t\to.close()\r\n#\t\t\t\tself.count+=1\r\n#\t\t\texcept:\r\n#\t\t\t\tpass\r\n\r\n\t\tsource = self.resources_dir\r\n\t\ttarget = self.build_dir\r\n\r\n\t\tfor root, dirs, files in os.walk(source):\r\n\t\t\tfor name in ignoreDirs:\r\n\t\t\t\tif name in dirs:\r\n\t\t\t\t\tdirs.remove(name)\t# don't visit ignored directories\r\n\t\t\tfor file in files:\r\n\t\t\t\tif file in ignoreFiles:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tfrom_ = os.path.join(root, file)\r\n\t\t\t\tto_ = os.path.expanduser(from_.replace(source, target, 1))\r\n\t\t\t\tto_directory = os.path.expanduser(os.path.split(to_)[0])\r\n\t\t\t\tif not os.path.exists(to_directory):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tos.makedirs(to_directory)\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\tfp = os.path.splitext(file)\r\n\t\t\t\tif fp[1]=='.js':\r\n\t\t\t\t\tself.count+=1\r\n\t\t\t\t#\tcompile_js(from_,to_)\r\n\t\t\t\t#else:\r\n\t\t\t\tshutil.copy(from_,to_)\r\n\t\t\r\n\t\ttitanium_js = mako.template.Template(\"<%!\\n\\\r\n\tdef jsQuoteEscapeFilter(str):\\n\\\r\n\t\treturn str.replace(\\\"\\\\\\\"\\\",\\\"\\\\\\\\\\\\\\\"\\\")\\n\\\r\n%>\\n\" + \"var require={\\n\\\r\n\tanalytics: ${app_analytics | jsQuoteEscapeFilter},\\n\\\r\n\tapp: {\\n\\\r\n\t\tcopyright: \\\"${app_copyright | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tdescription: \\\"${app_description | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tguid: \\\"${app_guid | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tid: \\\"${app_name | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tname: \\\"${app_name | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tpublisher: \\\"${app_publisher | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\turl: \\\"${app_url | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tversion: \\\"${app_version | jsQuoteEscapeFilter}\\\"\\n\\\r\n\t},\\n\\\r\n\tdeployType: \\\"${deploy_type | jsQuoteEscapeFilter}\\\",\\n\\\r\n\thas: {\\n\\\r\n\t\t\\\"declare-property-methods\\\": true\\n\\\r\n\t},\\n\\\r\n\tproject: {\\n\\\r\n\t\tid: \\\"${project_id | jsQuoteEscapeFilter}\\\",\\n\\\r\n\t\tname: \\\"${project_name | jsQuoteEscapeFilter}\\\"\\n\\\r\n\t},\\n\\\r\n\tti: {\\n\\\r\n\t\tversion: \\\"${ti_version | jsQuoteEscapeFilter}\\\"\\n\\\r\n\t},\\n\\\r\n\tvendorPrefixes: {\\n\\\r\n\t\tcss: [\\\"\\\", \\\"-webkit-\\\", \\\"-moz-\\\", \\\"-ms-\\\", \\\"-o-\\\", \\\"-khtml-\\\"],\\n\\\r\n\t\tdom: [\\\"\\\", \\\"Webkit\\\", \\\"Moz\\\", \\\"ms\\\", \\\"O\\\", \\\"Khtml\\\"]\\n\\\r\n\t}\\n\\\r\n};\\n\".encode('utf-8')).render(\r\n\t\t\t\tproject_name=self.project_name,\r\n\t\t\t\tproject_id=self.appid,\r\n\t\t\t\tdeploy_type=deploytype,\r\n\t\t\t\tapp_id=self.appid,\r\n\t\t\t\tapp_analytics='true' if ti.properties['analytics']=='true' else 'false',\r\n\t\t\t\tapp_publisher=ti.properties['publisher'],\r\n\t\t\t\tapp_url=ti.properties['url'],\r\n\t\t\t\tapp_name=ti.properties['name'],\r\n\t\t\t\tapp_version=ti.properties['version'],\r\n\t\t\t\tapp_description=ti.properties['description'],\r\n\t\t\t\tapp_copyright=ti.properties['copyright'],\r\n\t\t\t\tapp_guid=ti.properties['guid'],\r\n\t\t\t\tti_version=sdk_version\r\n\t\t\t) + self.load_api(os.path.join(src_dir,\"loader.js\")) + self.load_api(os.path.join(src_dir,\"titanium.js\"))\r\n\t\t\r\n\t\tif deploytype == 'all':\r\n\t\t\tprint \"Deploy type is 'all' - all modules will be included into dist\"\r\n\t\t\tfor root, dirs, files in os.walk(src_dir):\r\n\t\t\t\tfor name in ignoreDirs:\r\n\t\t\t\t\tif name in dirs:\r\n\t\t\t\t\t\tdirs.remove(name)\t# don't visit ignored directories\r\n\t\t\t\tfor file in files:\r\n\t\t\t\t\tif file in ignoreFiles or file == 'titanium.js':\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tpath = os.path.join(root, file)\r\n\t\t\t\t\tfp = os.path.splitext(file)\r\n\t\t\t\t\tif fp[1]=='.js':\r\n\t\t\t\t\t\t(path, fname) = os.path.split(path)\r\n\t\t\t\t\t\t(path, ddir) = os.path.split(path)\r\n\t\t\t\t\t\tif ddir != 'src':\r\n\t\t\t\t\t\t\tfname = ddir + \"/\" + fname\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tself.defines.index(fname)\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tself.defines.append(fname)\r\n\t\t\r\n\t\ttitanium_css = ''\r\n\t\t\r\n\t\ttry:\r\n\t\t\tshutil.rmtree(os.path.join(self.build_dir, 'Ti'))\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\tprint \"Copying %s to %s\" % (os.path.join(src_dir, 'Ti'), self.build_dir)\r\n\t\tshutil.copytree(os.path.join(src_dir, 'Ti'), os.path.join(self.build_dir, 'Ti'))\r\n\t\t\r\n\t\tfor api in self.defines:\r\n\t\t\tapi_file = os.path.join(src_dir,api)\r\n\t\t\tif not os.path.exists(api_file):\r\n\t\t\t\tprint \"[ERROR] couldn't find file: %s\" % api_file\r\n\t\t\t\tsys.exit(1)\r\n\t\t\telse:\r\n\t\t\t\tprint \"[DEBUG] found: %s\" % api_file\r\n\t\t\t\t\r\n\t\t\t\tdest = os.path.join(self.build_dir, api)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tos.makedirs(os.path.dirname(dest))\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\t\t\t\tshutil.copy(api_file, dest)\r\n\t\t\t\t\r\n\t\t\t\tif api_file.find('.js') != -1:\r\n\t\t\t\t\t# TODO: it would be nice to detect if we *need* to add a ;\r\n\t\t\t\t\ttitanium_js += '%s;\\n' % self.load_api(api_file, api)\r\n\t\t\t\telif api_file.find('.css') != -1:\r\n\t\t\t\t\ttitanium_css += '%s\\n\\n' % self.load_api(api_file, api)\r\n\t\t\t\telse:\r\n\t\t\t\t\ttarget_file = os.path.abspath(os.path.join(self.build_dir,'titanium', api))\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tos.makedirs(os.path.dirname(target_file))\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tshutil.copy(api_file, target_file)\r\n\t\t\t\t\t# open(target_file,'wb').write(open(api_file,'rb').read())\r\n\t\t\r\n\t\tif len(ti.app_properties):\r\n\t\t\ttitanium_js += '(function(p){'\r\n\t\t\t\r\n\t\t\tfor name in ti.app_properties:\r\n\t\t\t\tprop = ti.app_properties[name]\r\n\t\t\t\t\r\n\t\t\t\tif prop['type'] == 'bool':\r\n\t\t\t\t\ttitanium_js += 'p.setBool(\"' + name + '\",' + prop['value'] + ');'\r\n\t\t\t\telif prop['type'] == 'int':\r\n\t\t\t\t\ttitanium_js += 'p.setInt(\"' + name + '\",' + prop['value'] + ');'\r\n\t\t\t\telif prop['type'] == 'double':\r\n\t\t\t\t\ttitanium_js += 'p.setDouble(\"' + name + '\",' + prop['value'] + ');'\r\n\t\t\t\telse:\r\n\t\t\t\t\ttitanium_js += 'p.setString(\"' + name + '\",\"' + str(prop['value']).replace('\"', '\\\\\"') + '\");'\r\n\t\t\t\r\n\t\t\ttitanium_js += '}(Ti.App.Properties));'\r\n\t\t\r\n\t\tti_dir = os.path.join(self.build_dir,'titanium')\r\n\t\ttry:\r\n\t\t\tos.makedirs(ti_dir)\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\to = codecs.open(os.path.join(ti_dir,'titanium.js'),'w',encoding='utf-8')\r\n\t\to.write(HEADER + titanium_js + FOOTER)\r\n\t\to.close()\r\n\t\t\r\n\t\t# detect any fonts and add font face rules to the css file\r\n\t\tresource_dir = os.path.join(project_dir, 'Resources')\r\n\t\tfonts = {}\r\n\t\tfor dirname, dirnames, filenames in os.walk(resource_dir):\r\n\t\t\tfor filename in filenames:\r\n\t\t\t\tfname, ext = os.path.splitext(filename)\r\n\t\t\t\text = ext.lower()\r\n\t\t\t\tif ext == '.otf' or ext == '.woff':\r\n\t\t\t\t\tif not fname in fonts:\r\n\t\t\t\t\t\tfonts[fname] = []\r\n\t\t\t\t\tfonts[fname].append(os.path.join(dirname, filename)[len(resource_dir):])\r\n\t\tfor font in fonts:\r\n\t\t\ttitanium_css += \"@font-face{font-family:%s;src:url(%s);}\\n\" % (font, \"),url(\".join(fonts[font]))\r\n\t\t\r\n\t\to = codecs.open(os.path.join(ti_dir,'titanium.css'), 'w', encoding='utf-8')\r\n\t\to.write(HEADER + titanium_css + 'end' + FOOTER)\r\n\t\to.close()\r\n\r\n\t\ttry:\r\n\t\t\tstatus_bar_style = ti.properties['statusbar-style']\r\n\t\t\t\r\n\t\t\tif status_bar_style == 'default' or status_bar_style=='grey':\r\n\t\t\t\tstatus_bar_style = 'default'\r\n\t\t\telif status_bar_style == 'opaque_black' or status_bar_style == 'opaque' or status_bar_style == 'black':\r\n\t\t\t\tstatus_bar_style = 'black'\r\n\t\t\telif status_bar_style == 'translucent_black' or status_bar_style == 'transparent' or status_bar_style == 'translucent':\r\n\t\t\t\tstatus_bar_style = 'black-translucent'\r\n\t\t\telse:\t\r\n\t\t\t\tstatus_bar_style = 'default'\r\n\t\texcept:\r\n\t\t\tstatus_bar_style = 'default'\r\n\r\n\t\tmain_template = codecs.open(os.path.join(src_dir,'index.html'), encoding='utf-8').read().encode(\"utf-8\")\r\n\t\tmain_template = mako.template.Template(main_template).render(\r\n\t\t\t\tti_version=sdk_version,\r\n\t\t\t\tti_statusbar_style=status_bar_style,\r\n\t\t\t\tti_generator=\"Appcelerator Titanium Mobile \"+sdk_version,\r\n\t\t\t\tproject_name=self.project_name,\r\n\t\t\t\tproject_id=self.appid,\r\n\t\t\t\tdeploy_type=deploytype,\r\n\t\t\t\tapp_id=self.appid,\r\n\t\t\t\tapp_analytics=ti.properties['analytics'],\r\n\t\t\t\tapp_publisher=ti.properties['publisher'],\r\n\t\t\t\tapp_url=ti.properties['url'],\r\n\t\t\t\tapp_name=ti.properties['name'],\r\n\t\t\t\tapp_version=ti.properties['version'],\r\n\t\t\t\tapp_description=ti.properties['description'],\r\n\t\t\t\tapp_copyright=ti.properties['copyright'],\r\n\t\t\t\tapp_guid=ti.properties['guid'],\r\n\t\t\t\tti_header=HTML_HEADER,\r\n\t\t\t\tti_css=titanium_css,\r\n\t\t\t\tti_js=titanium_js)\r\n\r\n\t\tindex_file = os.path.join(self.build_dir,'index.html')\r\n\t\to = codecs.open(index_file,'w',encoding='utf-8')\r\n\t\to.write(main_template)\r\n\t\to.close()\r\n\r\n\t\t# write localization data\r\n\t\ti18n_content = \"Titanium._5.setLocaleData(\"\r\n\t\tdef xml2json(collector, node):\r\n\t\t\tcollector[node.attributes.items()[0][1]] = node.firstChild.nodeValue\r\n\t\t\treturn collector\r\n\r\n\t\tlang_arr = {}\r\n\t\tfor root, dirs, files in os.walk(os.path.join(self.project_dir,'i18n')):\r\n\t\t\tfor file in files:\r\n\t\t\t\tif file != 'strings.xml':\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tlang = os.path.split(root)[1]\r\n\t\t\t\tlang_arr[lang] = {}\r\n\t\t\t\tlang_file = codecs.open(os.path.join(root, file), 'r', 'utf-8').read().encode(\"utf-8\")\r\n\t\t\t\tdom = xml.dom.minidom.parseString(lang_file)\r\n\t\t\t\tstrings = dom.getElementsByTagName(\"string\")\r\n\t\t\t\treduce(xml2json, strings, lang_arr[lang])\r\n\t\ti18n_content += json.dumps(lang_arr)\r\n\r\n\t\ti18n_content += \");\";\r\n\t\ti18n_file = os.path.join(self.build_dir,'titanium', 'i18n.js')\r\n\t\to = codecs.open(i18n_file,'w', encoding='utf-8')\r\n\t\to.write(i18n_content)\r\n\t\to.close()\r\n\t\tprint \"[INFO] Compiled %d files for %s\" % (self.count,ti.properties['name'])\r\n\t\t\r\n\t\t\r\n\tdef load_api(self,file, api=\"\"):\r\n\t\tfile_contents = codecs.open(file, 'r', 'utf-8').read()\r\n\t\tif not self.debug and file.find('.js') != -1:\r\n\t\t\treturn jspacker.jsmin(file_contents)\r\n\t\telif file.find('.css') != -1:\r\n\t\t\t# need to replace urls to add directory prefix into path\r\n\t\t\treturn re.sub(r'(url\\s*\\([\\'\"]?)', r'\\1' + os.path.split(api)[0] + '/', file_contents)\r\n\t\telse:\r\n\t\t\treturn file_contents\r\n\t\t\r\n#\tdef add_symbol(self,api):\r\n#\t\tprint \"[DEBUG] detected symbol: %s\" % api\r\n#\t\tcurtoken = ''\r\n#\t\ttokens = api.split(\".\")\r\n#\t\tif len(tokens) > 1:\r\n#\t\t\ttry:\r\n#\t\t\t\tself.modules.index(tokens[0])\r\n#\t\t\texcept:\r\n#\t\t\t\tself.modules.append(tokens[0])\r\n#\t\t\t\r\n#\t\tif self.api_map.has_key(api):\r\n#\t\t\tfor file in self.api_map[api]:\r\n#\t\t\t\tif len(tokens) > 1:\r\n#\t\t\t\t\tfn = \"Ti.%s/%s\" % (tokens[0],file)\r\n#\t\t\t\telse:\r\n#\t\t\t\t\tfn = \"Ti/%s\" % file\r\n#\t\t\t\ttry:\r\n#\t\t\t\t\tself.defines.index(fn)\r\n#\t\t\t\texcept:\r\n#\t\t\t\t\tself.defines.append(fn)\r\n#\t\telse:\r\n#\t\t\tprint \"[WARN] couldn't find API: %s\" % api\r\n#\t\t\t#sys.exit(1)\r\n\r\n#\tdef extract_tokens(self,sym,line):\r\n#\t\t# sloppy joe parsing coooode\r\n#\t\t# could be prettier and faster but it works and rather reliable\r\n#\t\tc = 0\r\n#\t\ttokens = []\r\n#\t\tsearch = sym + \".\"\r\n#\t\tsize = len(search)\r\n#\t\twhile True:\r\n#\t\t\ti = line.find(search,c)\r\n#\t\t\tif i < 0:\r\n#\t\t\t\tbreak\r\n#\t\t\tfound = False\r\n#\t\t\tbuf = ''\r\n#\t\t\tx = 0\r\n#\t\t\tfor n in line[i+size:]:\r\n#\t\t\t\t# look for a terminal - this could probably be easier\r\n#\t\t\t\tif n in ['(',')','{','}','=',',',' ',':','!','[',']','+','*','/','~','^','%','\\n','\\t','\\r']:\r\n#\t\t\t\t\tfound = True\r\n#\t\t\t\t\tbreak\r\n#\t\t\t\tbuf+=n\r\n#\t\t\t\tx+=1\r\n#\t\t\ttokens.append(buf)\r\n#\t\t\tif found:\r\n#\t\t\t\tc = i + x + 1\r\n#\t\t\t\tcontinue\r\n#\t\t\tbreak\r\n#\t\treturn tokens\t\r\n\r\n#\tdef expand_ti_includes(self,line,filename):\r\n#\t\t'''idx = line.find('Ti.include')\r\n#\t\tif idx!=-1:\r\n#\t\t\tsrcs = line[idx+11:-1]\r\n#\t\t\tfor srcQ in srcs.split(','):\r\n#\t\t\t\t# remove leading and trailing slashes and spaces\r\n#\t\t\t\tsrc = re.sub(r'\\s*([\\\"\\'])([^\\1]*)\\1[\\w\\W]*$', r'\\2', srcQ, 0, re.M)\r\n#\r\n#\t\t\t\t# replace dir separator with platform specific\r\n#\t\t\t\t# if first char is / - consider it as absolute to resources dir\r\n#\t\t\t\tif src[0] == '/':\r\n#\t\t\t\t\tsrc_path = os.path.join(self.resources_dir,src[1:len(src)])\r\n#\t\t\t\telse:\r\n#\t\t\t\t\tsrc_path = os.path.join(os.path.dirname(filename),src)\r\n#\t\t\t\t# normalize path to match all dir separators\r\n#\t\t\t\tsrc_path = os.path.normpath(src_path)\r\n#\r\n#\t\t\t\tif not os.path.exists(src_path):\r\n#\t\t\t\t\tprint \"[ERROR] Cannot find include file at: %s\" % src_path\r\n#\t\t\t\t\tsys.exit(1)\r\n#\t\t\t\tsource = Compiler.make_function_from_file(src_path,self)\r\n#\t\t\t\tself.ti_includes[src] = source'''\r\n\r\n#\tdef compile_js(self,file_contents,fn):\r\n#\t\tcontents = \"\"\r\n#\t\tfor line in file_contents.split(';'):\r\n#\t\t\tself.expand_ti_includes(line,fn)\r\n#\t\t\tif line == None or line=='' or line == '\\n': continue\r\n#\t\t\tfor sym in self.extract_tokens('Ti',line):\r\n#\t\t\t\tself.add_symbol(sym)\r\n#\t\t\tcontents+='%s;' % line\r\n#\t\treturn contents\r\n\t\r\n#\t@classmethod\r\n#\tdef make_function_from_file(cls,file,instance=None):\r\n#\t\tf = os.path.expanduser(file)\r\n#\t\tfile_contents = codecs.open(f, 'r', 'utf-8').read()\r\n#\t\tif not instance or not instance.debug:\r\n#\t\t\tfile_contents = jspacker.jsmin(file_contents)\r\n#\t\tfile_contents = file_contents.replace('Titanium.','Ti.')\r\n#\t\tif instance:\r\n#\t\t\tfile_contents = instance.compile_js(file_contents, f)\r\n#\t\treturn file_contents\r\n", "id": "3172598", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "support/mobileweb/compiler.py" } ]
0
sunnnymskang
[ { "content": "import nltk\nnltk.download('wordnet')\nimport nltk.data\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nfrom nltk.stem.porter import *\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nnltk.download('vader_lexicon')\nnltk.download('punkt')\n\nimport spacy\nspacy.load('en')\nfrom spacy.lang.en import English\nparser = English()\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nimport glob\nimport operator\n\nimport gensim\nfrom gensim.utils import simple_preprocess\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.metrics import f1_score, precision_score, confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.pipeline import make_pipeline\n\nfrom lime import lime_text\nfrom lime.lime_text import LimeTextExplainer\nfrom nltk.tokenize import RegexpTokenizer\n\nimport sys\nfrom pathlib import Path\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nfrom src.features.build_features import *\nfrom modelanalys import *\nfrom analys import *\nfrom text_analys import *\nimport CV_TF_Word2Vec_Anal\n\n\n\ndef text_process(mess):\n \"\"\"\n Takes in a string of text, then performs the following:\n 1. Remove all punctuation\n 2. Remove all stopwords\n 3. Returns a list of the cleaned text\n \"\"\"\n # Check characters to see if they are in punctuation\n nopunc = [char for char in mess if char not in string.punctuation]\n\n # Join the characters again to form the string.\n nopunc = ''.join(nopunc)\n\n # Now just remove any stopwords\n words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n words = [word for word in words if word.lower() not in pills['BrandName'].values]\n # words = [word for word in words if word.lower() not in pills['ChemName'].values]\n words = [word.lower() for word in words if word.isalpha()]\n words = [word.lower() for word in words if len(word) > 2]\n return words\n\n\ndef cv(data):\n count_vectorizer = CountVectorizer(analyzer=text_process)\n emb = count_vectorizer.fit_transform(data)\n return emb, count_vectorizer\n\n\n# ['Wellbutrin', 'Zoloft', 'Effexor', 'Lexapro', 'Prozac']\n# ['Wellbutrin', 'Zoloft', 'Effexor', 'Lexapro', 'Prozac', 'Paxil', 'Cymbalta', 'Celexa', 'Remeron', 'Seroquel']\n# using CV for vector transformation and feature extraction\n\ndef tfidf(data):\n tfidf_vectorizer = TfidfVectorizer(analyzer=text_process)\n train = tfidf_vectorizer.fit_transform(data)\n return train, tfidf_vectorizer\n\n\ndef CV_anal(top_5_all_drugs_clean, newpath_1):\n # accuracy\n list_corpus = top_5_all_drugs_clean[\"body\"].tolist()\n list_labels = top_5_all_drugs_clean[\"label\"].tolist()\n X_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, test_size=0.2,\n random_state=40)\n\n ####################################################################################Count vectorizer\n X_train_counts, count_vectorizer = cv(X_train)\n X_test_counts = count_vectorizer.transform(X_test)\n plot = plot_LSA(X_train_counts, y_train)\n plot.savefig(\"%s/LSA_CV_post_process.png\" % (newpath_1))\n plot.close()\n ########################################################################### Logistic regression/Accuracy/CM\n\n clf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg',\n multi_class='multinomial', n_jobs=-1, random_state=40)\n clf.fit(X_train_counts, y_train)\n y_predicted_counts = clf.predict(X_test_counts)\n\n accuracy, precision, recall, f1 = get_metrics(y_test, y_predicted_counts)\n print(\" = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy, precision, recall, f1))\n\n labels = ['Lexapro', 'Effexor', 'Wellbutrin', 'Prozac', 'Zoloft']\n cm = confusion_matrix(y_test, y_predicted_counts, labels)\n plot = plot_confusion_matrix(cm, normalize=True, title='Confusion matrix')\n plot.savefig(\"%s/cm_CV_LR.png\" % (newpath_1))\n plot.close()\n importance = get_most_important_features(count_vectorizer, clf, 10)\n print(importance[1])\n for i in range(len(importance)):\n top_scores = [a[0] for a in importance[i]['tops']]\n top_words = [a[1] for a in importance[i]['tops']]\n bottom_scores = [a[0] for a in importance[i]['bottom']]\n bottom_words = [a[1] for a in importance[i]['bottom']]\n title = importance[i]['name']\n plot = plot_important_words(top_scores, top_words, bottom_scores, bottom_words,\n \"Most important words for relevance\", title)\n plot.savefig(\"%s/%simportance_CV_LR.png\" % (newpath_1, title))\n plot.close()\n\n\n##################################################################################### TF IDF transform\ndef TFIDF_anal(top_5_all_drugs_clean, newpath_1):\n # accuracy\n list_corpus = top_5_all_drugs_clean[\"body\"].tolist()\n list_labels = top_5_all_drugs_clean[\"label\"].tolist()\n X_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, test_size=0.2,\n random_state=40)\n X_train_tfidf, tfidf_vectorizer = tfidf(X_train)\n X_test_tfidf = tfidf_vectorizer.transform(X_test)\n\n plot = plot_LSA(X_train_tfidf, y_train)\n plot.savefig(\"%s/LSA_TFIDF_post_process.png\" % (newpath_1))\n plot.close()\n ######################################################################################LR on TFIDF/CM,importance\n clf_tfidf = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg',\n multi_class='multinomial', n_jobs=-1, random_state=40)\n clf_tfidf.fit(X_train_tfidf, y_train)\n y_predicted_tfidf = clf_tfidf.predict(X_test_tfidf)\n accuracy_tfidf, precision_tfidf, recall_tfidf, f1_tfidf = get_metrics(y_test, y_predicted_tfidf)\n print(\"accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy_tfidf, precision_tfidf,\n recall_tfidf, f1_tfidf))\n cm2 = confusion_matrix(y_test, y_predicted_tfidf)\n plot = plot_confusion_matrix(cm2, normalize=True, title='Confusion matrix', )\n plot.savefig(\"%s/cm_TFIDF_LR.png\" % (newpath_1))\n plot.close()\n print(\"TFIDF confusion matrix\")\n print(cm2)\n # These are just marginally better.. for obviuos reason: You don't see much of a separation\n importance_tfidf = get_most_important_features(tfidf_vectorizer, clf_tfidf, 10)\n\n for i in range(len(importance_tfidf)):\n top_scores = [a[0] for a in importance_tfidf[i]['tops']]\n top_words = [a[1] for a in importance_tfidf[i]['tops']]\n bottom_scores = [a[0] for a in importance_tfidf[i]['bottom']]\n bottom_words = [a[1] for a in importance_tfidf[i]['bottom']]\n title = importance_tfidf[i]['name']\n plot = plot_important_words(top_scores, top_words, bottom_scores, bottom_words,\n \"Most important words for relevance\", title)\n plot.savefig(\"%s/%simportance_TFIDF_LR.png\" % (newpath_1, title))\n plot.close()\n\n\n#############################################################################################word2vectransform/CM/\n# # Now on to Word2Vec representation: this incorporates the synonym structure\n#\n#\ndef get_average_word2vec(tokens_list, vector, generate_missing=False, k=300):\n if len(tokens_list) < 1:\n return np.zeros(k)\n if generate_missing:\n vectorized = [vector[word] if word in vector else np.random.rand(k) for word in tokens_list]\n else:\n vectorized = [vector[word] if word in vector else np.zeros(k) for word in tokens_list]\n length = len(vectorized)\n summed = np.sum(vectorized, axis=0)\n averaged = np.divide(summed, length)\n return averaged\n\n\n# top_5_all_drugs_clean = pd.DataFrame(columns = ['author', 'body', 'id', 'score', 'selftext_bysent', 'selftext_byWords', 'label'])\n\ndef get_word2vec_embeddings(vectors, clean_questions, generate_missing=False):\n embeddings = clean_questions['selftext_byWords'].apply(lambda x: get_average_word2vec(x, vectors,\n generate_missing=generate_missing))\n return list(embeddings)\n\n\ndef word2vec_pipeline(examples, word2vec):\n vector_store = word2vec\n tokenizer = RegexpTokenizer(r'\\w+')\n tokenized_list = []\n for example in examples:\n example_tokens = tokenizer.tokenize(example)\n vectorized_example = get_average_word2vec(example_tokens, vector_store, generate_missing=False, k=300)\n tokenized_list.append(vectorized_example)\n return clf_w2v.predict_proba(tokenized_list)\n\n\ndef explain_one_instance(instance, class_names):\n explainer = LimeTextExplainer(class_names=class_names)\n exp = explainer.explain_instance(instance, word2vec_pipeline, word2vec, num_features=6)\n return exp\n\n\ndef visualize_one_exp(features, labels, index, class_names):\n exp = explain_one_instance(features[index], word2vec, class_names=class_names)\n print('Index: %d' % index)\n print('True class: %s' % [labels[index]])\n exp.show_in_notebook(text=True)\n\n\ndef get_statistical_explanation(test_set, sample_size, word2vec_pipeline, label_dict):\n sample_sentences = random.sample(test_set, sample_size)\n explainer = LimeTextExplainer()\n\n labels_to_sentences = defaultdict(list)\n contributors = defaultdict(dict)\n\n # First, find contributing words to each class\n for sentence in sample_sentences:\n probabilities = word2vec_pipeline([sentence])\n curr_label = probabilities[0].argmax()\n labels_to_sentences[curr_label].append(sentence)\n exp = explainer.explain_instance(sentence, word2vec_pipeline, num_features=6, labels=[curr_label])\n listed_explanation = exp.as_list(label=curr_label)\n\n for word, contributing_weight in listed_explanation:\n if word in contributors[curr_label]:\n contributors[curr_label][word].append(contributing_weight)\n else:\n contributors[curr_label][word] = [contributing_weight]\n\n # average each word's contribution to a class, and sort them by impact\n average_contributions = {}\n sorted_contributions = {}\n for label, lexica in contributors.items():\n curr_label = label\n curr_lexica = lexica\n average_contributions[curr_label] = pd.Series(index=curr_lexica.keys())\n for word, scores in curr_lexica.items():\n average_contributions[curr_label].loc[word] = np.sum(np.array(scores)) / sample_size\n detractors = average_contributions[curr_label].sort_values()\n supporters = average_contributions[curr_label].sort_values(ascending=False)\n sorted_contributions[label_dict[curr_label]] = {\n 'detractors': detractors,\n 'supporters': supporters\n }\n return sorted_contributions\n\n\ndef Word2Vec_anal(top_5_all_drugs_clean, newpath_1):\n # accuracy\n print(\"test_train_split\")\n list_corpus = top_5_all_drugs_clean[\"body\"].tolist()\n list_labels = top_5_all_drugs_clean[\"label\"].tolist()\n X_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, test_size=0.2,\n random_state=40)\n\n print(\"word2vecembedding\")\n word2vec_path = \"GoogleNews-vectors-negative300.bin.gz\"\n word2vec = gensim.models.keyedvectors.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)\n embeddings = get_word2vec_embeddings(word2vec, top_5_all_drugs_clean)\n X_train_word2vec, X_test_word2vec, y_train_word2vec, y_test_word2vec = train_test_split(embeddings, list_labels,\n test_size=0.2,\n random_state=40)\n plot = plot_LSA(embeddings, list_labels)\n plot.savefig(\"%s/Word2Vec_LSA_post_process\" % (newpath_1))\n plot.close()\n\n plot_TSNE(embeddings, list_labels, newpath_1, \"word2vec\", savepath=\"TSNE_demo.csv\", plot=True)\n plt.close()\n print(\"word2vecLR\")\n clf_w2v = LogisticRegression(C=30.0, class_weight='balanced', solver='newton-cg',\n multi_class='multinomial', random_state=40)\n clf_w2v.fit(X_train_word2vec, y_train_word2vec)\n y_predicted_word2vec = clf_w2v.predict(X_test_word2vec)\n\n accuracy_word2vec, precision_word2vec, recall_word2vec, f1_word2vec = get_metrics(y_test_word2vec,\n y_predicted_word2vec)\n print(\"accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy_word2vec, precision_word2vec,\n recall_word2vec, f1_word2vec))\n\n cm_w2v = confusion_matrix(y_test_word2vec, y_predicted_word2vec)\n plot = plot_confusion_matrix(cm_w2v, normalize=True, title='Confusion matrix')\n plot.savefig(\"%s/Word2Vec_cm_LR\" % (newpath_1))\n plot.close()\n print(\"Word2Vec confusion matrix\")\n print(cm_w2v)\n\n X_train_data, X_test_data, y_train_data, y_test_data = train_test_split(list_corpus, list_labels, test_size=0.2,\n random_state=40)\n vector_store = word2vec\n # X_train_counts, count_vectorizer = cv(X_train)\n # c = make_pipeline(count_vectorizer, clf)\n drug_class = {'Zoloft': 0, 'Lexapro': 1, 'Prozac': 2, 'Effexor': 3, 'Wellbutrin': 4}\n visualize_one_exp(X_test_data, y_test_data, 65, drug_class.keys())\n import random\n from collections import defaultdict\n random.seed(40)\n\n label_to_text = {0: 'Zoloft', 1: 'Lexapro', 2: 'Prozac', 3: 'Effexor', 4: 'Wellbutrin'}\n sorted_contributions = get_statistical_explanation(X_test_data, 100, word2vec_pipeline, label_to_text, word2vec)\n\n # First index is the class (Disaster)\n # Second index is 0 for detractors, 1 for supporters\n # Third is how many words we sample\n for i in (label_to_text.values()):\n title = i\n top_words = sorted_contributions[i]['supporters'][30:40].index.tolist()\n top_scores = sorted_contributions[i]['supporters'][30:40].tolist()\n bottom_words = sorted_contributions[i]['detractors'][30:40].index.tolist()\n bottom_scores = sorted_contributions[i]['detractors'][30:40].tolist()\n plot = plot_important_words(top_scores, top_words, bottom_scores, bottom_words,\n \"Most important words for relevance\", title)\n plot.savefig(\"%s/%simportance_Word2Vec_LR.png\" % (newpath_1, title))\n plot.close()\n", "id": "7199334", "language": "Python", "matching_score": 7.5784502029418945, "max_stars_count": 0, "path": "src/models/CV_TF_Word2Vec_Anal.py" }, { "content": "import nltk\nnltk.download('wordnet')\nimport nltk.data\n\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nnltk.download('vader_lexicon')\nnltk.download('punkt')\nimport pandas as pd\nimport numpy as np\nimport re\nfrom analys import *\nfrom text_analys import *\n\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nfrom src.features.build_features import *\nimport re\nimport gensim\n\nspacy.load('en')\nfrom spacy.lang.en import English\nparser = English()\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nimport matplotlib.patches as mpatches\nfrom sklearn.preprocessing import LabelEncoder\nimport numpy as np\nfrom modelanalys import *\nfrom sklearn.metrics import confusion_matrix\n\nimport string\nfrom nltk.corpus import stopwords\n\ndocume = pd.read_excel(\"Pushift_doc.xlsx\")\npills = pd.read_csv('antidepressants.txt')\npills['BrandName']= pills['Name'].str.split('\\s+').str[0].str.strip()\npills['ChemName']= pills['Name'].str.split('\\s+').str[1].str.strip(\"()\").str.lower()\n\ndef text_process(mess):\n \"\"\"\n Takes in a string of text, then performs the following:\n 1. Remove all punctuation\n 2. Remove all stopwords\n 3. Returns a list of the cleaned text\n \"\"\"\n # Check characters to see if they are in punctuation\n nopunc = [char for char in mess if char not in string.punctuation]\n\n # Join the characters again to form the string.\n nopunc = ''.join(nopunc)\n\n # Now just remove any stopwords\n words = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n words = [word for word in words if word.lower() not in pills['BrandName'].values]\n# words = [word for word in words if word.lower() not in pills['ChemName'].values]\n words = [word.lower() for word in words if word.isalpha()]\n words = [word.lower() for word in words if len(word)>2]\n return words\n\n\ndef get_average_word2vec(tokens_list, vector, generate_missing=False, k=300):\n if len(tokens_list)<1:\n return np.zeros(k)\n if generate_missing:\n vectorized = [vector[word] if word in vector else np.random.rand(k) for word in tokens_list]\n else:\n vectorized = [vector[word] if word in vector else np.zeros(k) for word in tokens_list]\n length = len(vectorized)\n summed = np.sum(vectorized, axis=0)\n averaged = np.divide(summed, length)\n return averaged\n\n# top_5_all_drugs_clean = pd.DataFrame(columns = ['author', 'body', 'id', 'score', 'selftext_bysent', 'selftext_byWords', 'label'])\n\ndef get_word2vec_embeddings(vectors, clean_questions, generate_missing=False):\n embeddings = clean_questions['selftext_byWords'].apply(lambda x: get_average_word2vec(x, vectors,\n generate_missing=generate_missing))\n return list(embeddings)\ndef word2vec_pipeline(examples,word2vec):\n vector_store= word2vec\n tokenizer = RegexpTokenizer(r'\\w+')\n tokenized_list = []\n for example in examples:\n example_tokens = tokenizer.tokenize(example)\n vectorized_example = get_average_word2vec(example_tokens, vector_store, generate_missing=False, k=300)\n tokenized_list.append(vectorized_example)\n return clf_w2v.predict_proba(tokenized_list)\n\n\ndef LSVC_anal(top_5_all_drugs_clean,newpath_1):\n list_corpus = top_5_all_drugs_clean[\"body\"].tolist()\n list_labels = top_5_all_drugs_clean[\"label\"].tolist()\n\n #top_10_all_drugs_clean = pd.DataFrame(columns = ['author', 'body', 'id', 'score', 'selftext_bysent', 'selftext_byWords',\n # 'label', 'sentiment_body','rating_body'])\n top_5_all_drugs_clean['rating_body']= top_5_all_drugs_clean['rating_body'].astype(float)\n\n mean_score = top_5_all_drugs_clean.groupby(['label'], as_index=False)['rating_body'].mean()\n weight_dict = {k:v for (k,v) in mean_score.loc[:,['label','rating_body']].values}\n print (weight_dict)\n\n X_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, random_state=0, test_size=0.3)\n print (y_train[:3])\n\n # TF-IDF\n count_vect = CountVectorizer(analyzer= text_process)\n X_train_counts = count_vect.fit_transform(X_train)\n tf_transformer = TfidfTransformer().fit(X_train_counts)\n X_train_transformed = tf_transformer.transform(X_train_counts)\n\n X_test_counts = count_vect.transform(X_test)\n X_test_transformed = tf_transformer.transform(X_test_counts)\n\n labels = LabelEncoder()\n y_train_labels_fit = labels.fit(y_train)\n y_train_lables_trf = labels.transform(y_train)\n\n # print(labels.classes_)\n from sklearn.svm import LinearSVC\n from sklearn.calibration import CalibratedClassifierCV\n\n maps=list(labels.inverse_transform(range(len(labels.classes_))))\n values=range(len(labels.classes_))\n decodekeys= dict(zip(maps,values))\n print(decodekeys)\n weight_dict_encoded = {decodekeys[k]:v for (k,v) in weight_dict.items()}\n print (weight_dict_encoded)\n\n linear_svc = LinearSVC(class_weight = weight_dict_encoded)\n clf = linear_svc.fit(X_train_transformed,y_train_lables_trf)\n # linear_svc = LinearSVC()\n # clf = linear_svc.fit(X_train_transformed,y_train_lables_trf,top_5_all_drugs_clean['rating_body'].values)\n\n\n y_predicted_trf = clf.predict(X_test_transformed)\n y_test_labels_fit = labels.fit(y_test)\n # print (\"y_test_labels_fit\")\n # print (list(labels.classes_))\n y_test_lables_trf = labels.transform(y_test)\n # print (\"y_test_lables_trf\")\n # print (y_test_lables_trf[:10])\n # print (\"y_test\")\n # print (y_test[:10])\n # print (\"y_test_lables_trf inverse\")\n print (list(labels.inverse_transform(y_test_lables_trf[:10])))\n\n accuracy, precision, recall, f1 = get_metrics(y_test, labels.inverse_transform(y_predicted_trf))\n print(\" = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy, precision, recall, f1))\n\n labels_1 =list(labels.classes_)\n cm = confusion_matrix(y_test, labels.inverse_transform(y_predicted_trf))\n plot = plot_confusion_matrix(cm, normalize=True,title='Confusion matrix',labels=labels_1)\n plot.savefig(\"%s/TFIDF_LSVC.png\"%(newpath_1))\n plot.close()\n\n importance = get_most_important_features(count_vect , clf, 10)\n print (importance[1] )\n for i in range(len(importance)):\n top_scores = [a[0] for a in importance[i]['tops']]\n top_words = [a[1] for a in importance[i]['tops']]\n bottom_scores = [a[0] for a in importance[i]['bottom']]\n bottom_words = [a[1] for a in importance[i]['bottom']]\n title= importance[i]['name']\n plot= plot_important_words(top_scores, top_words, bottom_scores, bottom_words, \"Most important words for relevance\", title)\n plot.savefig(\"%s/%simportance_TFIDF_SVC.png\"%(newpath_1,title))\n plot.close()\n\n\n\n calibrated_svc = CalibratedClassifierCV(base_estimator=linear_svc,\n cv=\"prefit\")\n\n calibrated_svc.fit(X_train_transformed,y_train_lables_trf)\n predicted = calibrated_svc.predict(X_test_transformed)\n\n to_predict = [\"I have hyperinsomnia and social anxiety\"]\n p_count = count_vect.transform(to_predict)\n p_tfidf = tf_transformer.transform(p_count)\n print('Average accuracy on test set={}'.format(np.mean(predicted == labels.transform(y_test))))\n print('Predicted probabilities of demo input string are')\n print(calibrated_svc.predict_proba(p_tfidf))\n\n result = pd.DataFrame(calibrated_svc.predict_proba(p_tfidf)*100, columns=labels.classes_)\n new = pd.melt(result,var_name='Drug name', value_name= 'likelihood(%)' )\n new= new.round(2)\n print(new)\n result_sorted =new.sort_values(by=['likelihood(%)'], ascending=False )\n result_sorted= result_sorted.reset_index(drop=True)\n result_sorted.rename(index= {0:'1st',1:'2nd',2:'3rd',3:'4th',4:'5th'}, inplace=True)\n result_sorted.index.name= \"Rank\"\n print (result_sorted)\n\n # # Save to file in the current working directory\n from sklearn.externals import joblib\n joblib.dump(calibrated_svc, '%s/linSVC_mode.joblib'%(newpath_1))\n joblib.dump(count_vect, '%s/CountVect_model.joblib'%(newpath_1))\n joblib.dump(tf_transformer, '%s/TFIDF_model.joblib'%(newpath_1))\n\n pkl_filename = \"%s/labels.pkl\" %(newpath_1)\n with open(pkl_filename, 'wb') as file:\n pickle.dump(labels, file)\n file.close()\n\n\n\n\n # Word2vec\n word2vec_path = \"GoogleNews-vectors-negative300.bin.gz\"\n word2vec = gensim.models.keyedvectors.KeyedVectors.load_word2vec_format(word2vec_path, binary=True)\n embeddings = get_word2vec_embeddings(word2vec, top_5_all_drugs_clean)\n X_train_word2vec, X_test_word2vec, y_train_word2vec, y_test_word2vec = train_test_split(embeddings, list_labels, test_size=0.2, random_state=40)\n\n # print(labels.classes_)\n from sklearn.svm import LinearSVC\n from sklearn.calibration import CalibratedClassifierCV\n\n # maps=list(labels.inverse_transform(range(len(labels.classes_))))\n # values=range(len(labels.classes_))\n # decodekeys= dict(zip(maps,values))\n # print(decodekeys)\n # weight_dict_encoded = {decodekeys[k]:v for (k,v) in weight_dict.items()}\n # print (weight_dict_encoded)\n\n linear_svc = LinearSVC(class_weight = weight_dict)\n clf = linear_svc.fit(X_train_word2vec, y_train_word2vec)\n y_predicted_word2vec = clf.predict(X_test_word2vec)\n\n accuracy_word2vec, precision_word2vec, recall_word2vec, f1_word2vec = get_metrics(y_test_word2vec, y_predicted_word2vec)\n print(\"accuracy = %.3f, precision = %.3f, recall = %.3f, f1 = %.3f\" % (accuracy_word2vec, precision_word2vec,\n recall_word2vec, f1_word2vec))\n\n cm_w2v = confusion_matrix(y_test_word2vec, y_predicted_word2vec,labels= top_5_all_drugs_clean[\"label\"].unique())\n plot = plot_confusion_matrix(cm_w2v, normalize=True, title='Confusion matrix')\n plot.savefig(\"%s/Word2Vec_SVC\"%(newpath_1))\n plot.close()\n print(\"Word2Vec SVC confusion matrix\")\n print(cm_w2v)\n\n\n calibrated_svc = CalibratedClassifierCV(base_estimator=linear_svc,\n cv=\"prefit\")\n\n calibrated_svc.fit(X_train_word2vec, y_train_word2vec)\n predicted = calibrated_svc.predict(X_test_word2vec)\n\n to_predict = [[\"I\", \"have\", \"hyperinsomnia\", \"and\", \"social\", \"anxiety\"]]\n p_word2vec = get_word2vec_embeddings(word2vec, to_predict)\n print('Predicted probabilities of demo input string are')\n print(calibrated_svc.predict_proba(p_tfidf))\n\n result = pd.DataFrame(calibrated_svc.predict_proba(p_tfidf)*100, columns=top_5_all_drugs_clean[\"label\"].unique().values)\n new = pd.melt(result,var_name='Drug name', value_name= 'likelihood(%)' )\n new= new.round(2)\n print(new)\n result_sorted =new.sort_values(by=['likelihood(%)'], ascending=False )\n result_sorted= result_sorted.reset_index(drop=True)\n result_sorted.rename(index= {0:'1st',1:'2nd',2:'3rd',3:'4th',4:'5th'}, inplace=True)\n result_sorted.index.name= \"Rank\"\n print (result_sorted)\n\n # # Save to file in the current working directory\n from sklearn.externals import joblib\n joblib.dump(calibrated_svc, '%s/word2veclinSVC_model.joblib'%(newpath_1))\n\n pkl_filename = \"%s/labels_word2vec_SVC.pkl\" %(newpath_1)\n with open(pkl_filename, 'wb') as file:\n pickle.dump(labels, file)\n file.close()\n", "id": "4785384", "language": "Python", "matching_score": 3.8245978355407715, "max_stars_count": 0, "path": "src/models/LinearSVC_Anal.py" }, { "content": "import nltk\nnltk.download('wordnet')\nimport nltk.data\n\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nnltk.download('vader_lexicon')\nnltk.download('punkt')\nimport spacy\nimport pandas as pd\nfrom analys import *\nimport re\nimport matplotlib.pyplot as plt\nfrom nltk.stem.porter import *\n\nspacy.load('en')\nfrom spacy.lang.en import English\nparser = English()\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\nimport matplotlib\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport itertools\n\n\ndef get_most_important_features(vectorizer, model, n=5):\n index_to_word = {v:k for k,v in vectorizer.vocabulary_.items()}\n\n # loop for each class\n classes ={}\n for class_index in range(model.coef_.shape[0]):\n# print (class_index)\n# print (model.classes_[class_index])\n word_importances = [(el, index_to_word[i]) for i,el in enumerate(model.coef_[class_index])]\n sorted_coeff = sorted(word_importances, key = lambda x : x[0], reverse=True)\n tops = sorted(sorted_coeff[:n], key = lambda x : x[0])\n bottom = sorted_coeff[-n:]\n classes[class_index] = {\n 'tops':tops,\n 'bottom':bottom,\n 'name': model.classes_[class_index] }\n\n return classes\n\n\ndef plot_important_words(top_scores, top_words, bottom_scores, bottom_words, name, title):\n y_pos = np.arange(len(top_words))\n top_pairs = [(a,b) for a,b in zip(top_words, top_scores)]\n top_pairs = sorted(top_pairs, key=lambda x: x[1])\n\n bottom_pairs = [(a,b) for a,b in zip(bottom_words, bottom_scores)]\n bottom_pairs = sorted(bottom_pairs, key=lambda x: x[1], reverse=True)\n\n top_words = [a[0] for a in top_pairs]\n top_scores = [a[1] for a in top_pairs]\n\n bottom_words = [a[0] for a in bottom_pairs]\n bottom_scores = [a[1] for a in bottom_pairs]\n\n fig = plt.figure(figsize=(10, 10))\n\n plt.barh(y_pos,bottom_scores, align='center', alpha=0.5)\n plt.title(title, fontsize=20)\n plt.yticks(y_pos, bottom_words, fontsize=14)\n plt.suptitle('Key words', fontsize=16)\n plt.xlabel('Importance', fontsize=20)\n return plt\n\ndef get_metrics(y_test, y_predicted):\n # true positives / (true positives+false positives)\n precision = precision_score(y_test, y_predicted, pos_label=None,\n average='weighted')\n # true positives / (true positives + false negatives)\n recall = recall_score(y_test, y_predicted, pos_label=None,\n average='weighted')\n\n # harmonic mean of precision and recall\n f1 = f1_score(y_test, y_predicted, pos_label=None, average='weighted')\n # true positives + true negatives/ total\n accuracy = accuracy_score(y_test, y_predicted)\n return accuracy, precision, recall, f1\n\ndef plot_confusion_matrix(cm,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.winter,labels=['']):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n fig = plt.figure(figsize=(11, 11))\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] < thresh else \"black\", fontsize=40)\n\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] < thresh else \"black\", fontsize=40)\n\n plt.title('Confusion matrix of the classifier')\n fig.colorbar(cax)\n ax.set_xticklabels([''] + labels)\n ax.set_yticklabels([''] + labels)\n plt.xlabel('Predicted')\n plt.ylabel('True')\n return plt\n\n\ndef plot_LSA(test_data, test_labels, savepath=\"PCA_demo.csv\", plot=True):\n lsa = TruncatedSVD(n_components=2)\n lsa.fit(test_data)\n lsa_scores = lsa.transform(test_data)\n color_mapper = {label:idx for idx,label in enumerate(set(test_labels))}\n color_column = [color_mapper[label] for label in test_labels]\n# print(color_column)\n# print(test_data.shape)\n print (lsa_scores[:,1])\n print(color_mapper)\n colors = ['red','green','yellow','blue','black','blue']\n\n # LSA scatter plot\n if plot:\n fig1 =plt.figure(figsize=(11, 11))\n fig1.scatter(lsa_scores[:,0], lsa_scores[:,1], s=8, alpha=.8, c=color_column, cmap=matplotlib.colors.ListedColormap(colors))\n# red_patch = mpatches.Patch(color='orange', label='Irrelevant')\n# green_patch = mpatches.Patch(color='blue', label='Disaster')\n patch_1 = mpatches.Patch(color='red', label='Effexor')\n patch_2 = mpatches.Patch(color='green', label='Lexapro')\n patch_3 = mpatches.Patch(color='yellow', label='Wellbutrin')\n patch_4 = mpatches.Patch(color='blue', label='Prozac')\n patch_5 = mpatches.Patch(color='black', label='Zoloft')\n\n fig1.legend(handles=[patch_1, patch_2,patch_3,patch_4,patch_5], prop={'size': 20})\n\n return fig1\n\ndef plot_LSA(test_data, test_labels, savepath=\"PCA_demo.csv\", plot=True):\n lsa = TruncatedSVD(n_components=2)\n lsa.fit(test_data)\n lsa_scores = lsa.transform(test_data)\n color_mapper = {label:idx for idx,label in enumerate(set(test_labels))}\n color_column = [color_mapper[label] for label in test_labels]\n# print(color_column)\n# print(test_data.shape)\n print (lsa_scores[:,1])\n print(color_mapper)\n colors = ['red','green','yellow','blue','black','blue']\n\n # LSA scatter plot\n if plot:\n fig1 =plt.figure(figsize=(11, 11))\n plt.scatter(lsa_scores[:,0], lsa_scores[:,1], s=8, alpha=.8, c=color_column, cmap=matplotlib.colors.ListedColormap(colors))\n# red_patch = mpatches.Patch(color='orange', label='Irrelevant')\n# green_patch = mpatches.Patch(color='blue', label='Disaster')\n patch_1 = mpatches.Patch(color='red', label='Effexor')\n patch_2 = mpatches.Patch(color='green', label='Lexapro')\n patch_3 = mpatches.Patch(color='yellow', label='Wellbutrin')\n patch_4 = mpatches.Patch(color='blue', label='Prozac')\n patch_5 = mpatches.Patch(color='black', label='Zoloft')\n\n plt.legend(handles=[patch_1, patch_2,patch_3,patch_4,patch_5], prop={'size': 20})\n\n return plt\n\n\n\n\n# # Apply clustering instead of class names.\n# from sklearn.cluster import KMeans\n#\n# clusters = KMeans(n_clusters=5)\n# clusters.fit(docs)\n#\n# tsne = TSNEVisualizer()\n# tsne.fit(docs, [\"c{}\".format(c) for c in clusters.labels_])\n# tsne.poof()\n\ndef plot_TSNE(test_data, test_labels, newpath_1,kind, savepath=\"TSNE_demo.csv\", plot=True):\n from sklearn.manifold import TSNE\n from yellowbrick.text import TSNEVisualizer\n tsne = TSNEVisualizer()\n tsne.fit(test_data, [\"c{}\".format(c) for c in test_labels])\n fnm = \"%s/%stSNE.png\" %(newpath_1,kind)\n tsne.poof()\n\n # return fig1\n", "id": "134679", "language": "Python", "matching_score": 1.867391586303711, "max_stars_count": 0, "path": "src/models/modelanalys.py" }, { "content": "import argparse\nimport os\nimport random\nimport math\nimport pandas as pd\nimport re\nfrom datetime import datetime\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.preprocessing import OneHotEncoder\nfrom xgboost import XGBRegressor\n\n\ndef load_data(data_dir):\n data = list()\n for folder_name in [name for name in os.listdir(data_dir) if not os.path.isfile(os.path.join(data_dir, name))]:\n try:\n transmitter_location, receiver_location = folder_name.split(\"_\")\n except ValueError:\n print(\"Skipping folder: {}\".format(folder_name))\n continue\n file_names = os.listdir(os.path.join(data_dir, folder_name))\n for file_name in file_names:\n try:\n distance = int(re.findall(r\"_(\\d+)ft_l\", file_name)[0])\n except IndexError:\n # ignore files that aren't log files\n print(\"Skipping file: {}\".format(file_name))\n continue\n with open(os.path.join(data_dir, folder_name, file_name), 'r') as f:\n for i in range(10):\n # ignoring first 10 lines, only using bluetooth data\n f.readline()\n bluetooth_data = list()\n for line in f:\n # As per README.md, only using BlueProxTx readings\n if \"BlueProxTx\" in line:\n record = line.split(\",\")\n # remove data that is not needed\n record = [\n # remove decimal as datetime crashes with decimal in seconds\n # some have \"T\" instead of a space in between days and hours\n record[0][:record[0].index(\".\")].replace(\"T\", \" \"),\n record[3] # rssi value\n ]\n bluetooth_data.append(record)\n # take random pairs of readings and take the time in between\n random.shuffle(bluetooth_data)\n for i in range(math.floor(len(bluetooth_data)/2)):\n record_1 = bluetooth_data[i]\n record_2 = bluetooth_data[i+1]\n try:\n # there are some entries with messed up data, just skip for now\n time_1 = datetime.strptime(record_1[0], \"%Y-%m-%d %H:%M:%S\")\n time_2 = datetime.strptime(record_2[0], \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n continue\n time_interval = abs(time_1 - time_2).seconds\n # todo: add phone types as a feature\n data.append((float(record_1[1]), float(record_2[1]), transmitter_location, receiver_location,\n time_interval, distance))\n\n df = pd.DataFrame(data)\n df.columns = [\"rssi1\", \"rssi2\", \"transmitter_position\", \"receiver_position\", \"time\", \"distance\"]\n return df\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='detects TC4TL')\n parser.add_argument('--data-dir', '-d', type=str, required=True, help='path to MIT-Matrix-Data repo')\n parser.add_argument('--model', '-m', type=str, required=True, help='xgboost or random-forest')\n parser.add_argument('--seed', '-s', type=int, default=100, required=False)\n\n args = parser.parse_args()\n\n data_dir = os.path.expanduser(args.data_dir)\n model_name = args.model\n seed = args.seed\n\n random.seed(seed)\n\n print(\"Using {} model on {}\".format(model_name, data_dir))\n\n df = load_data(data_dir)\n\n # Create X\n features = ['rssi1', 'rssi2', 'transmitter_position', 'receiver_position']\n categorical_features = ['transmitter_position', 'receiver_position']\n X = df[features]\n\n # One hot encoding for categorical columns\n OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)\n OH_cols = pd.DataFrame(OH_encoder.fit_transform(X[categorical_features]))\n\n # put index back\n OH_cols.index = X.index\n\n # Remove old categorical columns\n num_X = X.drop(categorical_features, axis=1)\n\n # Add the one-hot encoded columns\n OH_X = pd.concat([num_X, OH_cols], axis=1)\n\n ##### Predicting Distance\n y_distance = df.distance\n train_X_distance, val_X_distance, train_y_distance, val_y_distance = train_test_split(OH_X, y_distance, random_state=1)\n if model_name == \"xgboost\":\n distance_model = XGBRegressor(random_state=seed)\n elif model_name == \"random-forest\":\n distance_model = RandomForestRegressor(random_state=seed)\n else:\n raise Exception(\"Model {} not supported\".format(model_name))\n distance_model.fit(train_X_distance, train_y_distance)\n distance_mae = mean_absolute_error(distance_model.predict(val_X_distance), val_y_distance)\n print(\"[{}] Mean Absolute Error when predicting distance: {}\".format(model_name, distance_mae))\n\n ##### Predicting Time\n y_time = df.time\n train_X_time, val_X_time, train_y_time, val_y_time = train_test_split(OH_X, y_time, random_state=1)\n if model_name == \"xgboost\":\n time_model = XGBRegressor(random_state=seed)\n elif model_name == \"random-forest\":\n time_model = RandomForestRegressor(random_state=seed)\n else:\n raise Exception(\"Model {} not supported\".format(model_name))\n time_model.fit(train_X_time, train_y_time)\n time_mae = mean_absolute_error(time_model.predict(val_X_time), val_y_time)\n print(\"[{}] Mean Absolute Error when predicting distance: {}\".format(model_name, time_mae))\n", "id": "563997", "language": "Python", "matching_score": 0.43845903873443604, "max_stars_count": 1, "path": "scripts/detect_TC4TL.py" }, { "content": "import nltk\n\nnltk.download('wordnet')\nimport nltk.data\n\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nnltk.download('vader_lexicon')\nnltk.download('punkt')\nimport pandas as pd\nfrom analys import *\nimport matplotlib.pyplot as plt\nimport gensim\n\nspacy.load('en')\nfrom spacy.lang.en import English\n\nparser = English()\nfrom modelanalys import *\nimport sys\nfrom pathlib import Path\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nfrom src.visualization.visualize import *\n\n\ndef sent_to_words(sentences):\n for sentence in sentences:\n yield (gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations\n\n\ndef sent_to_score(sent):\n if sent == 'neu':\n score = 3\n elif sent == 'pos':\n score = 5\n else:\n # sent == 'neg':\n score = 1\n return score\n\n\ndef sentiment_analysis(message_text):\n # next, we initialize VADER so we can use it within our Python script\n sid = SentimentIntensityAnalyzer()\n\n # the variable 'message_text' now contains the text we will analyze.\n message_text = ''' %s''' % (message_text)\n\n # Calling the polarity_scores method on sid and passing in the message_text outputs a dictionary with negative, neutral, positive, and compound scores for the input text\n scores = sid.polarity_scores(message_text)\n scores.pop('compound')\n sentiment = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)\n sent, score = (tuple(sentiment)[0])\n rating_score = sent_to_score(sent)\n return sent, rating_score\n\n\ndef body_to_sent_words(df, clm):\n # Take the body of a dataframe and parse it to sentence and words\n # Store them in separate columns\n\n df['selftext_bysent'] = [[]] * len(df)\n df['selftext_byWords'] = [[]] * len(df)\n df['sentiment_body'] = [[]] * len(df)\n df['rating_body'] = [[]] * len(df)\n total_txt_data = [] # Sentence list\n total_txt_word = [] # words list\n\n for i in df[clm].index:\n # Parse into sentences\n parsed = tokenizer.tokenize(df.at[i, clm])\n df.at[i, 'selftext_bysent'] = parsed\n\n # Parse into words - create list of lists\n words = list(sent_to_words(df.at[i, 'selftext_bysent']))\n words_flatten = [item for sublist in words for item in sublist]\n\n df.at[i, 'selftext_byWords'] = words_flatten\n total_txt_word.extend(words_flatten)\n total_txt_data.extend(tokenizer.tokenize(df.loc[i]['body'])[:])\n\n sent, score = sentiment_analysis(df.at[i, 'body'])\n df.at[i, 'sentiment_body'] = sent\n df.at[i, 'rating_body'] = score\n return df, total_txt_data, total_txt_word\n\n\ndef sentanal(top_5_drug_ment_sub, newpath_1):\n \"\"\"\n args:\n top_5_drug_ment_sub - list of tuple (drug_name, df)\n df: columns = []\n Take df\n Broke msg into sentences , words and score of sentiment on the whole message\n df is mutated\n returns parsed text data as a dictionary\n \"\"\"\n text_data = []\n top5_drug_txt_parse = {}\n\n for drug_name, df in top_5_drug_ment_sub:\n print('\\n')\n print(drug_name)\n df['label'] = drug_name\n # Create new column of selt text parsed into sentences\n df, total_txt_data, total_txt_word = body_to_sent_words(df, 'body')\n top5_drug_txt_parse[drug_name] = total_txt_data\n\n plot_n_prev_rare(total_txt_word, drug_name, 20, newpath_1, opt=\"largest\")\n plot_n_prev_rare(total_txt_word, drug_name, 20, newpath_1, opt=\"smallest\")\n\n plt.figure()\n pd.Series((df['sentiment_body'].values)).value_counts().plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n plt.title(drug_name + \"sentiment distribution\")\n plt.savefig(\"%s/%ssentiment_distribution.png\" % (newpath_1, drug_name))\n plt.close()\n\n sentence_lengths = [len(tokens) for tokens in df[\"selftext_byWords\"]]\n VOCAB = sorted(list(set(total_txt_word)))\n print(\"%s words total, with a vocabulary size of %s\" % (len(total_txt_word), len(VOCAB)))\n print(\"Max sentence length is %s\" % max(sentence_lengths))\n\n fig = plt.figure(figsize=(11, 11))\n plt.xlabel('Sentence length')\n plt.ylabel('Number of sentences')\n plt.hist(sentence_lengths)\n plt.title(drug_name)\n plt.savefig(\"%s/%ssentence_distribution.png\" % (newpath_1, drug_name))\n plt.close()\n return top5_drug_txt_parse, top_5_drug_ment_sub", "id": "12591430", "language": "Python", "matching_score": 2.5255210399627686, "max_stars_count": 0, "path": "src/features/build_features.py" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy\nfrom scipy.integrate import odeint, solve_ivp\n# import matplotlib as mpl\n# mpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport sklearn\nimport os\nimport numpy as np\nimport nltk\n# nltk.download('wordnet')\nfrom nltk.corpus import wordnet as wn\nimport spacy\nspacy.load('en_core_web_sm')\nfrom spacy.lang.en import Englishs\nparser = English()\nimport nltk.data\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n# nltk.download('vader_lexicon')\n# nltk.download('punkt')\nimport operator\nimport gensim\nimport re\n\t# functionize\n\t###########################\n\t# Show pie chart of n prevalent words in the text\n\n\n\n# FUNCTIONIZE\n########åå###################################\n# find the mathches of drug names from the dataframe['body']\n# dict_a: dictionary name, store the result as dict_a[drugname]\n# Pass : df['body']\n# nm =drug_name å+ '_mentioned_all' + tag + source\ndef find_in_df(dict_a, df_subs, drug_name, filter_cols,nm,newpath_1,tag=\"save\", col= 'body'):\n\n dict_a[drug_name] = pd.DataFrame(columns=filter_cols)\n dict_a[drug_name][filter_cols] = df_subs[filter_cols][df_subs[col].str.contains(drug_name, flags=re.IGNORECASE, regex=True)]\n print(len(dict_a[drug_name]))\n print(\"%f percent out of all comments \" % (100 * len(dict_a[drug_name]) / len(df_subs)))\n print(dict_a[drug_name].describe())\n if tag ==\"save\":\n if len(nm)==0:\n pth = newpath_1\n else:\n pth = newpath_1 + \"/%s.pkl\" % (nm)\n print (\"pth:\"+ pth)\n dict_a[drug_name].to_pickle(pth)\n else:\n pass\n return dict_a\n##############################################\n\n\n# FUNCTIONIZE: Top_n_ones\n############################\ndef top_k_inDF(dict_a,k):\n\t# List of tuples (drug nane, dataframe for each drugs )\n\ttop_k_drug_ment_sub = sorted(dict_a.items(), key=lambda x: len(x[1]),reverse=True)[0:k]\n\ttop_k_drugs= [x for (x,df) in top_k_drug_ment_sub]\n\treturn top_k_drug_ment_sub, top_k_drugs\n############################\n\n#FUNCTIONIZE:Pickle the list of tuples\n#######################\n# Pickle information about top k mentioned drugs\n# dict_a = data to pickle\n# top_k_drug_ment_sub: list of tuple (drug_name, df); get name\n# nm: file name\n# new_path_1: path to file\ndef pickl_list_tupl( top_k_drug_ment_sub, nm, newpath_1):\n\tfor (x, df) in top_k_drug_ment_sub:\n\t\tname = x + '_top5_' + nm\n\t\tdf.to_pickle(newpath_1 + \"/%s.pkl\" % (name))\n##########################\n\ndef plot_n_prev_rare(list,drug_name,n, newpath_1,opt= None):\n if opt == 'largest':\n plt.figure()\n print(pd.Series(list).describe())\n pd.Series(list).value_counts().nlargest(n).plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n tit = drug_name + \"20 prevalent words\"\n plt.title(tit)\n plt.savefig(\"%s/%s.png\"%(newpath_1,tit))\n plt.close()\n elif opt == 'smallest':\n plt.figure()\n pd.Series(list).value_counts().nsmallest(n).plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n tit = drug_name + \"20 prevalent words\"\n plt.title(drug_name + \"20 rare words\")\n plt.savefig(\"%s/%s.png\"%(newpath_1,tit))\n plt.close()\n else:\n print (\"use either largest or smallest\")\n print(\"Distribution of top 30 words as default \")\n plt.figure()\n print(pd.Series(list).describe())\n pd.Series(list).value_counts().nlargest(30).plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n tit = drug_name + \"20 prevalent words\"\n plt.title(tit)\n plt.savefig(\"%s/%s.png\"%(newpath_1,tit))\n plt.close()\n\n\n\n\n", "id": "7907173", "language": "Python", "matching_score": 4.124332427978516, "max_stars_count": 0, "path": "src/data/analys.py" }, { "content": "# mpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\n# nltk.download('wordnet')\nimport spacy\n\nspacy.load('en')\nfrom spacy.lang.en import English\n\nparser = English()\nimport nltk.data\n\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\n\ndef plot_n_prev_rare(list,drug_name,n, newpath_1,opt= None):\n if opt == 'largest':\n plt.figure()\n print(pd.Series(list).describe())\n pd.Series(list).value_counts().nlargest(n).plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n tit = drug_name + \"20 prevalent words\"\n plt.title(tit)\n plt.savefig(\"%s/%s.png\"%(newpath_1,tit))\n plt.close()\n elif opt == 'smallest':\n plt.figure()\n pd.Series(list).value_counts().nsmallest(n).plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n tit = drug_name + \"20 prevalent words\"\n plt.title(drug_name + \"20 rare words\")\n plt.savefig(\"%s/%s.png\"%(newpath_1,tit))\n plt.close()\n else:\n print (\"use either largest or smallest\")\n print(\"Distribution of top 30 words as default \")\n plt.figure()\n print(pd.Series(list).describe())\n pd.Series(list).value_counts().nlargest(30).plot.pie(autopct='%.2f', fontsize=10, figsize=(6, 6))\n tit = drug_name + \"20 prevalent words\"\n plt.title(tit)\n plt.savefig(\"%s/%s.png\"%(newpath_1,tit))\n plt.close()\n", "id": "10600902", "language": "Python", "matching_score": 1.016399621963501, "max_stars_count": 0, "path": "src/visualization/visualize.py" }, { "content": "import nltk\nnltk.download('wordnet')\nimport nltk.data\n\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nnltk.download('vader_lexicon')\nnltk.download('punkt')\nimport spacy\nimport pandas as pd\nimport json\nfrom analys import *\nfrom text_analys import *\nimport CV_TF_Word2Vec_Anal\nimport LDA_topic_model\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\nfrom src.features.build_features import *\nimport re\nimport matplotlib.pyplot as plt\nimport gensim\nfrom gensim.utils import simple_preprocess\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom nltk.stem.porter import *\nimport numpy as np\nspacy.load('en')\nfrom spacy.lang.en import English\nparser = English()\nfrom sklearn.decomposition import PCA, TruncatedSVD\nimport matplotlib\nimport matplotlib.patches as mpatches\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\nfrom gensim import corpora\nimport pickle\nimport pyLDAvis\nimport pyLDAvis.gensim\nfrom modelanalys import *\n\ndef LDA_topic_anal(top5_drug_txt_parse,newpath_1):\n top5_drug_txt_lda= {}\n for (drug_name, total_txt) in top5_drug_txt_parse.items():\n text_data =[]\n for line in total_txt:\n tokens = prepare_text_for_lda(line)\n if random.random() > .99:\n # print (line)\n # print(tokens)\n text_data.append(tokens)\n # print (text_data)\n top5_drug_txt_lda[drug_name]=text_data\n\n print (top5_drug_txt_lda['Zoloft'])\n\n # LDA with Gensim\n # First, we are creating a dictionary from the data, then convert to bag-of-words corpus and save the dictionary and corpus for future use.\n\n for (drug_name, text_data) in top5_drug_txt_lda.items():\n print (drug_name)\n dictionary = corpora.Dictionary(text_data)\n corpus = [dictionary.doc2bow(text) for text in text_data]\n # Dumped on total corpus & Dictionary\n pkn ='%s/corpus%s.pkl' %(newpath_1,drug_name)\n pickle.dump(corpus, open(pkn, 'wb'))\n dkn ='%s/dictionary%s.gensim' %(newpath_1,drug_name)\n dictionary.save(dkn)\n\n NUM_TOPICS = 5\n ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = NUM_TOPICS, id2word=dictionary, passes=15)\n nm = '%s/model5%s.gensim' %(newpath_1,drug_name)\n ldamodel.save(nm)\n topics = ldamodel.print_topics(num_words=4)\n for topic in topics:\n print(topic)\n\n drug_name ='Zoloft'\n pkn ='%s/corpus%s.pkl' %(newpath_1,drug_name)\n dkn ='%s/dictionary%s.gensim' %(newpath_1,drug_name)\n dictionary = gensim.corpora.Dictionary.load(dkn)\n corpus = pickle.load(open(pkn, 'rb'))\n nm = '%s/model5%s.gensim' %(newpath_1,drug_name)\n lda = gensim.models.ldamodel.LdaModel.load(nm)\n lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=True)\n pyLDAvis.display(lda_display)\n\n # 80]:\n\n drug_name ='Prozac'\n pkn ='%s/corpus%s.pkl' %(newpath_1,drug_name)\n dkn ='%s/dictionary%s.gensim' %(newpath_1,drug_name)\n dictionary = gensim.corpora.Dictionary.load(dkn)\n corpus = pickle.load(open(pkn, 'rb'))\n nm = '%s/model5%s.gensim' %(newpath_1,drug_name)\n lda = gensim.models.ldamodel.LdaModel.load(nm)\n lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=True)\n pyLDAvis.display(lda_display)\n\n # 81]:\n drug_name ='Lexapro'\n pkn ='%s/corpus%s.pkl' %(newpath_1,drug_name)\n dkn ='%s/dictionary%s.gensim' %(newpath_1,drug_name)\n dictionary = gensim.corpora.Dictionary.load(dkn)\n corpus = pickle.load(open(pkn, 'rb'))\n nm = '%s/model5%s.gensim' %(newpath_1,drug_name)\n lda = gensim.models.ldamodel.LdaModel.load(nm)\n lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=True)\n pyLDAvis.display(lda_display)\n\n drug_name ='Effexor'\n pkn ='%s/corpus%s.pkl' %(newpath_1,drug_name)\n dkn ='%s/dictionary%s.gensim' %(newpath_1,drug_name)\n dictionary = gensim.corpora.Dictionary.load(dkn)\n corpus = pickle.load(open(pkn, 'rb'))\n nm = '%s/model5%s.gensim' %(newpath_1,drug_name)\n lda = gensim.models.ldamodel.LdaModel.load(nm)\n lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=True)\n pyLDAvis.display(lda_display)\n\n drug_name ='Wellbutrin'\n pkn ='%s/corpus%s.pkl' %(newpath_1,drug_name)\n dkn ='%s/dictionary%s.gensim' %(newpath_1,drug_name)\n dictionary = gensim.corpora.Dictionary.load(dkn)\n corpus = pickle.load(open(pkn, 'rb'))\n nm = '%s/model5%s.gensim' %(newpath_1, drug_name)\n lda = gensim.models.ldamodel.LdaModel.load(nm)\n lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=True)\n pyLDAvis.display(lda_display)\n\n # ]ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = 3, id2word=dictionary, passes=15)\n ldamodel.save('%s/model3.gensim'%(newpath_1))\n topics = ldamodel.print_topics(num_words=4)\n for topic in topics:\n print(topic)\n\n ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics = 10, id2word=dictionary, passes=15)\n ldamodel.save('%s/model10.gensim'%(newpath_1))\n topics = ldamodel.print_topics(num_words=4)\n for topic in topics:\n print(topic)\n # pyLDAvis\n # pyLDAvis is designed to help users interpret the topics in a topic model that has been fit to a corpus of text data. The package extracts information from a fitted LDA topic model to inform an interactive web-based visualization.\n # Visualizing 5 topics:\n\n dictionary = gensim.corpora.Dictionary.load('%s/dictionary.gensim'%(newpath_1))\n corpus = pickle.load(open('%s/corpus.pkl'%(newpath_1), 'rb'))\n lda = gensim.models.ldamodel.LdaModel.load('%s/model5.gensim'%(newpath_1))\n import pyLDAvis.gensim\n lda_display = pyLDAvis.gensim.prepare(lda, corpus, dictionary, sort_topics=False)\n pyLDAvis.display(lda_display)\n # Saliency: a measure of how much the term tells you about the topic.\n # Relevance: a weighted average of the probability of the word given the topic and the word given the topic normalized by the probability of the topic.\n # The size of the bubble measures the importance of the topics, relative to the data.\n # First, we got the most salient terms, means terms mostly tell us about what’s going on relative to the topics. We can also look at individual topic.\n # Visualizing 3 topics:\n\n lda3 = gensim.models.ldamodel.LdaModel.load('%s/model3.gensim'%(newpath_1))\n lda_display3 = pyLDAvis.gensim.prepare(lda3, corpus, dictionary, sort_topics=False)\n pyLDAvis.display(lda_display3)\n\n # Visualizing 10 topics:\n lda10 = gensim.models.ldamodel.LdaModel.load('%s/model10.gensim'%(newpath_1))\n lda_display10 = pyLDAvis.gensim.prepare(lda10, corpus, dictionary, sort_topics=False)\n pyLDAvis.display(lda_display10)\n", "id": "3447430", "language": "Python", "matching_score": 1.603155493736267, "max_stars_count": 0, "path": "src/models/LDA_topic_model.py" }, { "content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport scipy\nfrom scipy.integrate import odeint, solve_ivp\n# import matplotlib as mpl\n#\n# mpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport sklearn\nimport os\nimport nltk\n\n# sns.set() # over-write plt format with sns. plot made with plt will have sns formatting\n\ndef tokenize(text):\n lda_tokens = []\n tokens = parser(text)\n for token in tokens:\n if token.orth_.isspace():\n continue\n elif token.like_url:\n lda_tokens.append('URL')\n elif token.orth_.startswith('@'):\n lda_tokens.append('SCREEN_NAME')\n else:\n lda_tokens.append(token.lower_)\n return lda_tokens\n# Lemmatization: get one type of verb\ndef get_lemma(word):\n lemma = wn.morphy(word)\n if lemma is None:\n return word\n else:\n return lemma\n\nfrom nltk.stem.wordnet import WordNetLemmatizer\ndef get_lemma2(word):\n return WordNetLemmatizer().lemmatize(word)\n\n# Stop words - MAY HAVE TO EDIT THIS BETTER\nnltk.download('stopwords')\nen_stop = set(nltk.corpus.stopwords.words('english'))\n\ndef prepare_text_for_lda(text):\n tokens = tokenize(text)\n tokens = [token for token in tokens if len(token) > 2]\n tokens = [token for token in tokens if token not in en_stop]\n tokens = [get_lemma(token) for token in tokens]\n return tokens\n", "id": "9108522", "language": "Python", "matching_score": 1.1925450563430786, "max_stars_count": 0, "path": "src/data/text_analys.py" } ]
1.867392
aiwarrior-23
[ { "content": "#-*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nfrom gensim.models import KeyedVectors\nimport data_parser\nimport config\n\nfrom model import Seq2Seq_chatbot\nimport tensorflow as tf\nimport numpy as np\n\nimport re\nimport os\nimport sys\nimport time\n\n\n#=====================================================\n# Global Parameters\n#=====================================================\ndefault_model_path = './model/model-20'\ndefault_simulate_type = 1 # type 1 use one former sent, type 2 use two former sents\n\ntesting_data_path = 'sample_input.txt' if len(sys.argv) <= 3 else sys.argv[3]\noutput_path = 'sample_dialog_output.txt' if len(sys.argv) <= 4 else sys.argv[4]\n\nmax_turns = config.MAX_TURNS\nword_count_threshold = config.WC_threshold\n\n#=====================================================\n# Train Parameters\n#=====================================================\ndim_wordvec = 300\ndim_hidden = 1000\n\nn_encode_lstm_step = 22 # need to plus 1 later, because one random normal as the first timestep\nn_decode_lstm_step = 22\n\nbatch_size = 1\n\n\"\"\" Extract only the vocabulary part of the data \"\"\"\ndef refine(data):\n words = re.findall(\"[a-zA-Z'-]+\", data)\n words = [\"\".join(word.split(\"'\")) for word in words]\n # words = [\"\".join(word.split(\"-\")) for word in words]\n data = ' '.join(words)\n return data\n\ndef generate_question_vector(state, word_vector, dim_wordvec, n_encode_lstm_step):\n state = [refine(w) for w in state.lower().split()]\n state = [word_vector[w] if w in word_vector else np.zeros(dim_wordvec) for w in state]\n state.insert(0, np.random.normal(size=(dim_wordvec,))) # insert random normal at the first step\n\n if len(state) > n_encode_lstm_step:\n state = state[:n_encode_lstm_step]\n else:\n for _ in range(len(state), n_encode_lstm_step):\n state.append(np.zeros(dim_wordvec))\n\n return np.array([state]) # 1 x n_encode_lstm_step x dim_wordvec\n\ndef generate_answer_sentence(generated_word_index, prob_logit, ixtoword):\n # remove <unk> to second high prob. word\n for i in range(len(generated_word_index)):\n if generated_word_index[i] == 3:\n sort_prob_logit = sorted(prob_logit[i][0])\n # print('max val', sort_prob_logit[-1])\n # print('second max val', sort_prob_logit[-2])\n maxindex = np.where(prob_logit[i][0] == sort_prob_logit[-1])[0][0]\n secmaxindex = np.where(prob_logit[i][0] == sort_prob_logit[-2])[0][0]\n # print('max ind', maxindex, ixtoword[maxindex])\n # print('second max ind', secmaxindex, ixtoword[secmaxindex])\n generated_word_index[i] = secmaxindex\n\n generated_words = []\n for ind in generated_word_index:\n generated_words.append(ixtoword[ind])\n\n # generate sentence\n punctuation = np.argmax(np.array(generated_words) == '<eos>') + 1\n generated_words = generated_words[:punctuation]\n generated_sentence = ' '.join(generated_words)\n\n # modify the output sentence \n generated_sentence = generated_sentence.replace('<bos> ', '')\n generated_sentence = generated_sentence.replace(' <eos>', '')\n generated_sentence = generated_sentence.replace('--', '')\n generated_sentence = generated_sentence.split(' ')\n for i in range(len(generated_sentence)):\n generated_sentence[i] = generated_sentence[i].strip()\n if len(generated_sentence[i]) > 1:\n generated_sentence[i] = generated_sentence[i][0].upper() + generated_sentence[i][1:] + '.'\n else:\n generated_sentence[i] = generated_sentence[i].upper()\n generated_sentence = ' '.join(generated_sentence)\n generated_sentence = generated_sentence.replace(' i ', ' I ')\n generated_sentence = generated_sentence.replace(\"i'm\", \"I'm\")\n generated_sentence = generated_sentence.replace(\"i'd\", \"I'd\")\n\n return generated_sentence\n\ndef init_history(simulate_type, start_sentence):\n history = []\n history += ['' for _ in range(simulate_type-1)]\n history.append(start_sentence)\n return history\n\ndef get_cur_state(simulate_type, dialog_history):\n return ' '.join(dialog_history[-1*simulate_type:]).strip()\n\ndef simulate(model_path=default_model_path, simulate_type=default_simulate_type):\n ''' args:\n model_path: <type 'str'> the pre-trained model using for inference\n simulate_type: <type 'int'> how many former sents should use as state\n '''\n\n testing_data = open(testing_data_path, 'r').read().split('\\n')\n\n word_vector = KeyedVectors.load_word2vec_format('model/word_vector.bin', binary=True)\n\n _, ixtoword, bias_init_vector = data_parser.preProBuildWordVocab(word_count_threshold=word_count_threshold)\n\n model = Seq2Seq_chatbot(\n dim_wordvec=dim_wordvec,\n n_words=len(ixtoword),\n dim_hidden=dim_hidden,\n batch_size=batch_size,\n n_encode_lstm_step=n_encode_lstm_step,\n n_decode_lstm_step=n_decode_lstm_step,\n bias_init_vector=bias_init_vector)\n\n word_vectors, caption_tf, probs, _ = model.build_generator()\n\n sess = tf.InteractiveSession()\n\n saver = tf.train.Saver()\n try:\n print('\\n=== Use model {} ===\\n'.format(model_path))\n saver.restore(sess, model_path)\n except:\n print('\\nUse default model\\n')\n saver.restore(sess, default_model_path)\n\n with open(output_path, 'w') as out:\n for idx, start_sentence in enumerate(testing_data):\n print('dialog {}'.format(idx))\n print('A => {}'.format(start_sentence))\n out.write('dialog {}\\nA: {}\\n'.format(idx, start_sentence))\n\n dialog_history = init_history(simulate_type, start_sentence)\n\n for turn in range(max_turns):\n question = generate_question_vector(state=get_cur_state(simulate_type, dialog_history), \n word_vector=word_vector, \n dim_wordvec=dim_wordvec, \n n_encode_lstm_step=n_encode_lstm_step)\n\n generated_word_index, prob_logit = sess.run([caption_tf, probs], feed_dict={word_vectors: question})\n\n generated_sentence = generate_answer_sentence(generated_word_index=generated_word_index, \n prob_logit=prob_logit, \n ixtoword=ixtoword)\n\n dialog_history.append(generated_sentence)\n print('B => {}'.format(generated_sentence))\n\n question_2 = generate_question_vector(state=get_cur_state(simulate_type, dialog_history), \n word_vector=word_vector, \n dim_wordvec=dim_wordvec, \n n_encode_lstm_step=n_encode_lstm_step)\n\n generated_word_index, prob_logit = sess.run([caption_tf, probs], feed_dict={word_vectors: question_2})\n\n generated_sentence_2 = generate_answer_sentence(generated_word_index=generated_word_index, \n prob_logit=prob_logit, \n ixtoword=ixtoword)\n\n dialog_history.append(generated_sentence_2)\n print('A => {}'.format(generated_sentence_2))\n out.write('B: {}\\nA: {}\\n'.format(generated_sentence, generated_sentence_2))\n\n\nif __name__ == \"__main__\":\n model_path = default_model_path if len(sys.argv) <= 1 else sys.argv[1]\n simulate_type = default_simulate_type if len(sys.argv) <= 2 else int(sys.argv[2])\n n_encode_lstm_step = n_encode_lstm_step * simulate_type + 1 # sent len * sent num + one random normal\n print('simulate_type', simulate_type)\n print('n_encode_lstm_step', n_encode_lstm_step)\n simulate(model_path=model_path, simulate_type=simulate_type)\n", "id": "11594832", "language": "Python", "matching_score": 3.0534958839416504, "max_stars_count": 312, "path": "python/simulate.py" }, { "content": "# coding=utf-8\n\nfrom __future__ import print_function\nimport pickle\nimport codecs\nimport re\nimport os\nimport time\nimport numpy as np\nimport config\n\ndef preProBuildWordVocab(word_count_threshold=5, all_words_path=config.all_words_path):\n # borrowed this function from NeuralTalk\n\n if not os.path.exists(all_words_path):\n parse_all_words(all_words_path)\n\n corpus = open(all_words_path, 'r').read().split('\\n')[:-1]\n captions = np.asarray(corpus, dtype=np.object)\n\n captions = map(lambda x: x.replace('.', ''), captions)\n captions = map(lambda x: x.replace(',', ''), captions)\n captions = map(lambda x: x.replace('\"', ''), captions)\n captions = map(lambda x: x.replace('\\n', ''), captions)\n captions = map(lambda x: x.replace('?', ''), captions)\n captions = map(lambda x: x.replace('!', ''), captions)\n captions = map(lambda x: x.replace('\\\\', ''), captions)\n captions = map(lambda x: x.replace('/', ''), captions)\n\n print('preprocessing word counts and creating vocab based on word count threshold %d' % (word_count_threshold))\n word_counts = {}\n nsents = 0\n for sent in captions:\n nsents += 1\n for w in sent.lower().split(' '):\n word_counts[w] = word_counts.get(w, 0) + 1\n vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]\n print('filtered words from %d to %d' % (len(word_counts), len(vocab)))\n\n ixtoword = {}\n ixtoword[0] = '<pad>'\n ixtoword[1] = '<bos>'\n ixtoword[2] = '<eos>'\n ixtoword[3] = '<unk>'\n\n wordtoix = {}\n wordtoix['<pad>'] = 0\n wordtoix['<bos>'] = 1\n wordtoix['<eos>'] = 2\n wordtoix['<unk>'] = 3\n\n for idx, w in enumerate(vocab):\n wordtoix[w] = idx+4\n ixtoword[idx+4] = w\n\n word_counts['<pad>'] = nsents\n word_counts['<bos>'] = nsents\n word_counts['<eos>'] = nsents\n word_counts['<unk>'] = nsents\n\n bias_init_vector = np.array([1.0 * word_counts[ixtoword[i]] for i in ixtoword])\n bias_init_vector /= np.sum(bias_init_vector) # normalize to frequencies\n bias_init_vector = np.log(bias_init_vector)\n bias_init_vector -= np.max(bias_init_vector) # shift to nice numeric range\n\n return wordtoix, ixtoword, bias_init_vector\n\ndef parse_all_words(all_words_path):\n raw_movie_lines = open('data/movie_lines.txt', 'r', encoding='utf-8', errors='ignore').read().split('\\n')[:-1]\n\n with codecs.open(all_words_path, \"w\", encoding='utf-8', errors='ignore') as f:\n for line in raw_movie_lines:\n line = line.split(' +++$+++ ')\n utterance = line[-1]\n f.write(utterance + '\\n')\n\n\"\"\" Extract only the vocabulary part of the data \"\"\"\ndef refine(data):\n words = re.findall(\"[a-zA-Z'-]+\", data)\n words = [\"\".join(word.split(\"'\")) for word in words]\n # words = [\"\".join(word.split(\"-\")) for word in words]\n data = ' '.join(words)\n return data\n\nif __name__ == '__main__':\n parse_all_words(config.all_words_path)\n\n raw_movie_lines = open('data/movie_lines.txt', 'r', encoding='utf-8', errors='ignore').read().split('\\n')[:-1]\n \n utterance_dict = {}\n with codecs.open('data/tokenized_all_words.txt', \"w\", encoding='utf-8', errors='ignore') as f:\n for line in raw_movie_lines:\n line = line.split(' +++$+++ ')\n line_ID = line[0]\n utterance = line[-1]\n utterance_dict[line_ID] = utterance\n utterance = \" \".join([refine(w) for w in utterance.lower().split()])\n f.write(utterance + '\\n')\n pickle.dump(utterance_dict, open('data/utterance_dict', 'wb'), True)\n", "id": "7281445", "language": "Python", "matching_score": 0.8289769291877747, "max_stars_count": 312, "path": "python/data_parser.py" }, { "content": "\nimport json\nfrom pymongo import MongoClient\nfrom flask import jsonify, request, Flask\nfrom flask_cors import CORS\nimport flask\nfrom utils.utils import *\nfrom bson import ObjectId\nimport copy\nimport requests\nimport datetime\n\napp = Flask(__name__)\nCORS(app)\n\[email protected](\"/login\",methods=['POST'])\ndef loginUser():\n details=request.get_json()[\"userDetails\"]\n userName=details[\"userName\"]\n password=details[\"password\"]\n isParent=details[\"isParent\"]\n response, uName, mailId,stdId,role,designation =checkforLogin(userName,password,isParent)\n if(response==\"Teacher\"):\n return jsonify({\"message\": \"Congratualtion for Login\",\n \"uName\": uName,\n \"mailID\": mailId,\n \"role\": role,\n \"stdId\":\"none\",\n \"designation\": designation} )\n if(response==\"Parent\"):\n return jsonify({\"message\": \"Congratualtion for Login\",\n \"uName\": uName,\n \"stdId\": stdId,\n })\n\n# @app.route(\"/login\", methods=['POST'])\n# def loginTest():\n# details = request.get_json()[\"userDetails\"]\n# userName, uName, mailID, designation = userNameCheck(details[\"userName\"])\n# passwordN = passwordCheck(details[\"password\"])\n# role = fetchrole(mailID[0])\n# if userName == \"Success\":\n# if passwordN == \"Success\":\n# return jsonify({\"message\": \"Congratualtion for Login\",\n# \"uName\": uName[-1],\n# \"mailID\": mailID,\n# \"role\": role,\n# \"designation\": designation})\n# else:\n# return jsonify({\"message\": \"UserName or Password is Incorrect\",\n# \"uName\": \"Not Defined\",\n# \"mailID\": \"Not Defined\",\n# \"role\": \"Not Defined\"})\n# else:\n# return jsonify({\"message\": \"UserName or Password is Incorrect\",\n# \"uName\": \"Not Defined\",\n# \"mailID\": \"Not Defined\",\n# \"role\": \"Not Defined\"})\n\n\[email protected](\"/createuser\", methods=['POST'])\ndef insert_document():\n req_data = request.get_json()[\"UserDetails\"]\n config.userDetailsTemplate[\"primaryEmail\"] = req_data[\"email\"]\n config.userDetailsTemplate[\"name\"][\"givenName\"] = req_data[\"firstName\"]\n config.userDetailsTemplate[\"name\"][\"familyName\"] = req_data[\"lastName\"]\n config.userDetailsTemplate[\"password\"] = sha(req_data[\"password\"])\n config.userDetailsTemplate[\"addresses\"][0][\"streetAddress\"] = req_data[\"address\"]\n config.userDetailsTemplate[\"phones\"][0][\"value\"] = req_data[\"phone\"]\n config.userDetailsTemplate[\"organizations\"][0][\"title\"] = req_data[\"title\"]\n config.userDetailsTemplate[\"organizations\"][0][\"name\"] = req_data[\"college\"]\n\n # url = \"https://admin.googleapis.com/admin/directory/v1/users\"\n\n # payload = json.dumps(config.userDetailsTemplate)\n # headers = {\n # 'Authorization': 'Bearer <KEY>',\n # 'Content-Type': 'application/json'\n # }\n\n # response = requests.request(\"POST\", url, headers=headers, data=payload)\n\n config.collection.insert_one(config.userDetailsTemplate).inserted_id\n return jsonify({\"message\": \"Congratualtions user inserted Sucessfully...\"})\n\n\[email protected](\"/creattask\", methods=['POST'])\ndef task():\n req_data = request.get_json()\n config.collection1.insert_one(req_data).inserted_id\n message = createCalendarEvent(req_data)\n return jsonify({\"message\": \"Task created Sucessfully...\"})\n\[email protected](\"/createMeeting\", methods=['POST'])\ndef meeting():\n data= []\n req_data = request.get_json()\n print(req_data)\n req_data = req_data[\"data\"]\n for i in req_data[\"attendees\"]:\n data.append(getEmail(i))\n data1 = createMeeting(req_data,data)\n config.collectionMeetings.insert_one(data1).inserted_id\n return jsonify({\"message\": \"Meeting created Sucessfully...\"})\n\n\[email protected](\"/taskassign\", methods=['POST'])\ndef task_assign():\n assignee = request.get_json()\n activeTask, urgentTask, futureTask, completedTask, backlogTask, activeTaskID, urgentTaskID, futureTaskID, completedTaskID, backlogTaskID = task_assigned(assignee[\"assigned\"])\n data = {}\n populator = {}\n populator[\"activeTaskID\"]=[{'Nothing To Display':\"message\"}] if len(activeTaskID)==0 else activeTaskID\n populator[\"urgentTaskID\"]=[{'Nothing To Display':\"message\"}] if len(urgentTaskID)==0 else urgentTaskID\n populator[\"futureTaskID\"]=[{'Nothing To Display':\"message\"}] if len(futureTaskID)==0 else futureTaskID\n populator[\"completedTaskID\"]=[{'Nothing To Display':\"message\"}] if len(completedTaskID)==0 else completedTaskID\n populator[\"backlogTaskID\"]=[{'Nothing To Display':\"message\"}] if len(backlogTaskID)==0 else backlogTaskID\n return jsonify({\"message\": \"tasks are assigned\", \"data\": data, \"ass\":assignee[\"assigned\"], \"populator\":populator})\n\[email protected](\"/taskToBeApproved\", methods=['POST'])\ndef task_to_be_approved():\n assignee = request.get_json()\n toBeApprovedTasks, toBeApprovedTaskID= task_approved(assignee[\"assigned\"])\n data = {}\n populator = {}\n data[\"toBeApprovedTask\"]=[{'Nothing To Display':\"message\"}] if len(toBeApprovedTasks)==0 else toBeApprovedTasks\n populator[\"toBeApprovedTaskID\"]=[{'Nothing To Display':\"message\"}] if len(toBeApprovedTaskID)==0 else toBeApprovedTaskID\n return jsonify({\"message\": \"tasks are assigned\", \"data\": data, \"ass\":assignee[\"assigned\"], \"populator\":populator})\n\[email protected](\"/taskapprove\", methods=['POST'])\ndef taskapproved():\n approver = request.get_json()\n approve, data1 = task_approver(approver[\"assigned\"])\n if approve == \"Success\":\n return jsonify({\"message\": \"tasks are assigned\", \"data\": data1})\n else:\n return jsonify({\"message\": \"tasks are not assigned\"})\n\n\[email protected](\"/staffDetails\", methods=['POST'])\ndef getStaffDetails():\n staffType = request.get_json()[\"staffType\"]\n staffList = getStaffType(staffType)\n return jsonify({\"staffList\": staffList})\n\n\[email protected](\"/department\", methods=['POST'])\ndef getDepartment():\n department = request.get_json()[\"department\"]\n responsibilities = getResponsibilities(department)\n return jsonify({\"responsibilities\": responsibilities})\n\[email protected](\"/taskstatus\", methods=['POST'])\ndef updateTasks():\n taskID = request.get_json(\"taskID\")\n status, data = getstatus(ObjectId(taskID[\"taskID\"]))\n return jsonify({\"status\":status, \"data\":data})\n\[email protected](\"/getjson\", methods=['POST'])\ndef gets():\n objid = request.get_json(\"objid\")\n reqJson = getJson(ObjectId(objid[\"objid\"]))\n return jsonify({\"message\":\"json retrived\",\"json\":eval(str(reqJson))})\n\[email protected](\"/edit\", methods=[\"POST\"])\ndef ej():\n response = request.get_json()\n key = response[\"key\"]\n oid = response[\"objid\"]\n msg = response[\"message\"]\n print(oid)\n editj = editjson(oid, msg, key)\n jsoni = getJson(ObjectId(oid))\n return(jsonify({\"message\":\"task updated sucessfully\",\"json\":jsoni}))\n\[email protected](\"/delete_collec\", methods=['POST'])\ndef delete_collection():\n req_data = request.get_json()\n temp = deletecollection(req_data)\n return jsonify({\"message\":\"Collection Deleted\"})\n\n#task_Assign using Object id\[email protected](\"/taskassign1\", methods=['POST'])\ndef task_assign1():\n objid = request.get_json(\"obji\")\n jsoni = getJson(ObjectId(objid[\"obji\"]))\n print(jsoni)\n return jsonify({\"message\":\"json retrived\",\"json\":jsoni})\n\[email protected](\"/updateComments\", methods=['POST'])\ndef checkmailfortaskupdate():\n x = request.get_json()\n js = x[\"data\"]\n objid = x[\"objid\"]\n key1 = x[\"key\"]\n print(objid)\n temp = {}\n ct = str(datetime.datetime.now().date())\n js[\"timeStamp\"]=ct\n for a in config.collection1.find():\n if ObjectId(objid) == a[\"_id\"]:\n for key, value in a.items():\n if key not in [\"_id\"]:\n temp[key]=value\n old = copy.deepcopy(temp);\n new = copy.deepcopy(temp)\n new[key1].append(js)\n edit = config.collection1.replace_one(old,new)\n return (\"Success\")\n\n\[email protected](\"/classInfo\", methods=['GET'])\ndef class_info():\n classes = getClassInfo()\n subjects = getSubjectInfo()\n return jsonify({\"xx\":list(classes), \"yy\":list(subjects)})\n\[email protected](\"/sectionInfo\", methods=['POST'])\ndef section_info():\n x = request.get_json()\n cls = x[\"class\"]\n section = getSectionInfo(cls)\n return jsonify({\"xx\":section})\n\[email protected](\"/getTeachersList\", methods=['GET'])\ndef get_teachers_list():\n teachers, nonTeachers, test , test1= getTeachersList()\n return jsonify({\"teachers\":list(teachers), \"nonTeachers\":list(nonTeachers), \"test\":test, \"test1\": test1})\n\[email protected](\"/teacherRS\", methods=['POST'])\ndef teacherResponsibilitySubmission():\n req_data = request.get_json()\n config.collectionTeacherAssignments.insert_one(req_data).inserted_id\n return jsonify({\"message\": \"Task created Sucessfully...\"})\n\[email protected](\"/getComments\", methods=['POST'])\ndef getComments():\n req_data = request.get_json()\n req_data = req_data[\"id\"]\n comments = getAllComments(ObjectId(req_data))\n return jsonify({\"comments\": comments})\n\[email protected](\"/getProfileInfo\", methods=['POST'])\ndef getProfileInfo():\n req_data = request.get_json()\n req_data = req_data[\"mail\"]\n ct, subjects, repMgr, reprMgrName = getInfo(req_data)\n return jsonify({\"classTeacher\": ct, \"subjects\":subjects, \"reportingManagerEmail\":repMgr, \"reportingManagerName\":reprMgrName})\n\[email protected](\"/student_list\", methods=[\"POST\"])\ndef s_list():\n data = request.get_json()\n slist = student_list(data[\"data\"])\n return jsonify({\"message\":\"milgaya\", \"data\":slist})\n\[email protected](\"/qrcode\", methods=[\"POST\"])\ndef qr():\n data = request.get_json()\n getqr = qrsearch(data[\"data\"])\n return jsonify({\"data\":getqr})\n\[email protected](\"/attendace\",methods=[\"POST\"])\ndef attendace():\n req_data = request.get_json()\n c = req_data['class']\n if c == \"1a\" or c== \"1b\" or c== \"1c\" or c== \"1d\":\n config.db4.class1.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class1 collection.\"})\n elif c== \"2a\" or c== \"2b\" or c==\"2c\" or c==\"2d\":\n config.db4.class2.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class2 collection.\"})\n elif c== \"3a\" or c==\"3b\" or c==\"3c\" or c==\"3d\":\n config.db4.class3.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class3 collection.\"})\n elif c== \"4a\" or c==\"4b\" or c==\"4c\" or c==\"4d\":\n config.db4.class4.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class4 collection.\"})\n elif c== \"5a\" or c==\"5b\" or c==\"5c\" or c==\"5d\":\n config.db4.class5.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class5 collection.\"})\n elif c== \"6a\" or c==\"6b\" or c==\"6c\" or c==\"6d\":\n config.db4.class6.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class6 collection.\"})\n elif c== \"7a\" or c==\"7b\" or c==\"7c\" or c==\"7d\":\n config.db4.class7.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class7 collection.\"})\n elif c== \"8a\" or c==\"8b\" or c==\"8c\" or c==\"8d\":\n config.db4.class8.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class8 collection.\"})\n elif c== \"9a\" or c==\"9b\" or c==\"9c\" or c==\"9d\":\n config.db4.class9.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class9 collection.\"})\n elif c== \"10a\" or c==\"10b\" or c==\"10c\" or c==\"10d\":\n config.db4.class10.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class10 collection.\"})\n elif c== \"nursery\":\n config.db4.nursery.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into nursey collection.\"})\n elif c== \"L.K.G\":\n config.db4.lkg.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into L.K.G collection.\"})\n elif c== \"U.K.G\":\n config.db4.ukg.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into U.K.G collection.\"})\n else:\n config.db4.garbage.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Garbage collection.\"})\n\[email protected](\"/filter\", methods=[\"POST\"])\ndef a_filter():\n req_data = request.get_json()\n cc=req_data[\"data\"][\"cc\"]\n date=req_data[\"data\"][\"date\"]\n st=req_data[\"data\"][\"st\"]\n et=req_data[\"data\"][\"et\"]\n sub=req_data[\"data\"][\"sub\"]\n data1 = attendacne_filter(cc, date, st, et, sub)\n return jsonify({\"message\":\"Data Retrived Sucessfully\",\"data\" : repr(data1)})\n\[email protected](\"/teacherAttendance\",methods=[\"POST\"])\ndef teacherLogin():\n req_data = request.get_json()\n print(req_data)\n insertStatus=[]\n mail=req_data[\"data\"][\"mail\"]\n date=req_data[\"data\"][\"date\"]\n sub=req_data[\"data\"][\"sub\"]\n cls=req_data[\"data\"][\"cls\"]\n data1=getLoginJson(mail,date,sub,cls)\n jsoni=jsonify({\"message\":\"Data Insereted Sucessfully\",\"data\" : \"Success\"})\n print (data1)\n if data1==None:\n config.teachers.insert_one(req_data).inserted_id\n final=jsoni\n else:\n js = req_data[\"data\"][\"logoutTime\"]\n temp = {}\n for key, value in data1.items():\n if key not in [\"_id\"]:\n temp[key]=value\n old = copy.deepcopy(temp)\n new = copy.deepcopy(temp)\n new[\"data\"][\"logoutTime\"]=js\n config.teachers.replace_one(old,new)\n final=jsonify({\"message\":\"Logout Sucessfully\",\"data\" : \"Success\"})\n return final\n\[email protected](\"/teachersJson\", methods=[\"POST\"])\ndef teachersJson():\n req_data = request.get_json()\n mail=req_data[\"data\"][\"mail\"]\n date=req_data[\"data\"][\"date\"]\n sub=req_data[\"data\"][\"sub\"]\n cls=req_data[\"data\"][\"cls\"]\n data1 = getLoginJson(mail, date,sub,cls)\n print(data1)\n jsoni=jsonify({\"message\":\"Data Retrived Sucessfully\",\"data\" : \"hello\"})\n if data1==None:\n final=jsoni\n else:\n final=jsonify({\"message\":\"Data Retrived Sucessfully\",\"data\" : data1})\n return final\n\[email protected](\"/studyCentral\",methods=[\"POST\"])\ndef SC():\n req_data = request.get_json()\n config.collectionSC.insert_one(req_data).inserted_id\n return jsonify({\"message\":\"data inserted into Class1 collection.\"}) \n\[email protected](\"/broadcast\", methods=[\"POST\"])\ndef broadcast_notice():\n data = request.get_json()\n print(data)\n config.broadcast.insert_one(data).inserted_id\n return jsonify({\"message\":\"data inserted into broadcast collection.\",\"data\":repr(data)})\n\[email protected](\"/getBroadcast\", methods=[\"GET\"])\ndef get():\n l1 = []\n for i in config.broadcast.find():\n data = i\n data[\"_id\"] = str(data[\"_id\"])\n l1.append(data)\n return jsonify({\"message\":\"broadcast Data Retrived Successfully\",\"data\":l1})\n\[email protected](\"/employeeProfile\",methods=[\"POST\"])\ndef ep():\n data = request.get_json()\n print(data)\n mail,designation,staffType = taskProfile(data[\"obj\"])\n ct, subjects, repMgr, reprMgrName= getInfo(mail)\n return jsonify({\"message\":\"Employee Information Retrived\",\"MailID\":mail,\"Reporting Manager Name\":reprMgrName,\"Designation\":designation,\"Staff Type\":staffType})\n\[email protected](\"/filter2\", methods=[\"POST\"])\ndef a_filter2():\n req_data = request.get_json()\n cc=req_data[\"data\"][\"cc\"]\n date=req_data[\"data\"][\"date\"]\n sub=req_data[\"data\"][\"sub\"]\n data1 = attendacne_filter(cc, sub, date)\n print(data1)\n return jsonify({\"message\":\"Data Retrived Sucessfully\",\"data\" : data1})\n\n# @app.route(\"/marksfilter\", methods=[\"POST\"])\n# def m_filter():\n# req_data = request.get_json()\n# cc=req_data[\"data\"][\"cc\"]\n# date=req_data[\"data\"][\"date\"]\n# sub=req_data[\"data\"][\"sub\"]\n# data1 = marks_filter(cc, sub, date)\n# print(data1)\n# return jsonify({\"message\":\"Data Retrived Sucessfully\",\"data\" : data1})\n\[email protected](\"/marksfilter\", methods=[\"POST\"])\ndef m_filter():\n req_data = request.get_json()\n classes=req_data[\"class\"]\n exam_type=req_data[\"exam_type\"]\n data1 = marks_filter(classes,exam_type)\n return jsonify({\"message\":\"Data Retrived\",\"data\" : data1})\n\[email protected](\"/studentwork\", methods=[\"GET\",\"POST\"])\ndef studenttask():\n task_details = request.get_json()\n class_name = task_details[\"class_name\"]\n subject_name = task_details[\"subject_name\"]\n st = student_task(class_name,subject_name)\n print(st)\n return jsonify({\"message\":st})\[email protected](\"/insertmarks\" ,methods=[\"POST\"])\ndef IM():\n marks=request.get_json()\n config.collectionMark.insert_one(marks).inserted_id\n return jsonify({\"message\": \"Success\"})\napp.run(debug=True, port=5001, host=\"0.0.0.0\")", "id": "75096", "language": "Python", "matching_score": 6.896525859832764, "max_stars_count": 0, "path": "python/pohuAPI.py" }, { "content": "import config\nimport hashlib\nfrom googleapiclient.discovery import build\n# from google_auth_oauthlib.flow import InstalledAppFlow\n# from google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom bson import ObjectId\nimport requests\nimport json\nfrom uuid import uuid4\nimport numpy as np\nimport datetime\n\ndef checkforLogin(userName,password,isParent):\n uName=[]\n mailID=[]\n stdId=[]\n role=[]\n designation = []\n shaPass=sha(password)\n print(\"process started\")\n if(isParent!=\"1\"):\n for doc in config.collection.find():\n if(userName==doc[\"primaryEmail\"] and shaPass==doc[\"password\"]):\n uName.append(doc[\"name\"][\"givenName\"])\n mailID.append(doc[\"primaryEmail\"])\n role.append(doc[\"organizations\"][0][\"title\"])\n designation.append(doc[\"organizations\"][0][\"title\"])\n return(\"Teacher\",uName,mailID,None,role,designation)\n else:\n for doc in config.parentCollection.find():\n if(userName==doc[\"phoneno\"] and password==doc[\"password\"]):\n print(\"yess\")\n uName.append(doc[\"name\"])\n stdId.append(doc[\"studentid\"])\n return(\"Parent\",uName,None,stdId,None,None)\n# def userNameCheck(userName):\n# uName = []\n# mailID = []\n# designation = []\n# for doc in config.collection.find():\n# if userName == doc[\"primaryEmail\"]:\n# uName.append(doc[\"name\"][\"givenName\"])\n# mailID.append(doc[\"primaryEmail\"])\n# designation.append(doc[\"organizations\"][0][\"title\"])\n# return(\"Success\", uName, mailID, designation)\n\n\n# def passwordCheck(password):\n# for doc in config.collection.find():\n# if sha(password) == doc[\"password\"]:\n# return(\"Success\")\n\n\ndef task_assigned(assigned):\n dataActive = {}\n idActive = {}\n dataUrgent = {}\n idUrgent = {}\n dataComplete = {}\n idComplete = {}\n dataFuture = {}\n idFuture = {}\n dataBacklog = {}\n idBacklog = {}\n activeTask = []\n urgentTask = []\n futureTask = []\n completedTask = []\n backlogTask=[]\n activeTaskID = []\n urgentTaskID = []\n futureTaskID = []\n completedTaskID = []\n backlogTaskID =[]\n for doc in config.collection1.find():\n if assigned == doc[\"task assigned to\"] and \"Update Task Status\" == doc[\"task status\"]:\n dataActive[doc[\"task title\"]] = str(doc[\"_id\"])\n idActive[str(doc[\"_id\"])] = [doc[\"task priority\"], doc[\"task assigned by\"],\n doc[\"task assigned to\"], doc[\"task title\"]]\n activeTask.append(dataActive)\n activeTaskID.append(idActive)\n\n\n if assigned == doc[\"task assigned to\"] and doc[\"task status\"] ==\"Start Task\" :\n dataFuture[doc[\"task title\"]] = str(doc[\"_id\"])\n idFuture[str(doc[\"_id\"])] = [doc[\"task priority\"], doc[\"task assigned by\"],\n doc[\"task assigned to\"], doc[\"task title\"]]\n futureTask.append(dataFuture)\n futureTaskID.append(idFuture)\n\n if assigned == doc[\"task assigned to\"] and \"Task Completed Successfully\" == doc[\"task status\"]:\n dataComplete[doc[\"task title\"]] = str(doc[\"_id\"])\n idComplete[str(doc[\"_id\"])] = [doc[\"task priority\"], doc[\"task assigned by\"],\n doc[\"task assigned to\"], doc[\"task title\"]]\n completedTask.append(dataComplete)\n completedTaskID.append(idComplete)\n\n if assigned == doc[\"task assigned to\"] and \"high\" == doc[\"task priority\"] and doc[\"task status\"]!=\"Approved\":\n dataUrgent[doc[\"task title\"]] = str(doc[\"_id\"])\n idUrgent[str(doc[\"_id\"])] = [doc[\"task priority\"], doc[\"task assigned by\"],\n doc[\"task assigned to\"], doc[\"task title\"]]\n urgentTask.append(dataUrgent)\n urgentTaskID.append(idUrgent)\n\n if assigned == doc[\"task assigned to\"] and datetime.datetime.now().date() >= datetime.datetime.strptime(doc[\"task deadline\"], \"%Y-%m-%d\").date() and doc[\"task status\"] !=\"Approved\":\n dataBacklog[doc[\"task title\"]] = str(doc[\"_id\"])\n idBacklog[str(doc[\"_id\"])] = [doc[\"task priority\"], doc[\"task assigned by\"],\n doc[\"task assigned to\"], doc[\"task title\"]]\n backlogTask.append(dataBacklog)\n backlogTaskID.append(idBacklog)\n\n\n return (activeTask, urgentTask, futureTask, completedTask, backlogTask, activeTaskID, urgentTaskID, futureTaskID, completedTaskID, backlogTaskID)\n\ndef task_approved(assigned):\n dataActive = {}\n idActive = {}\n activeTask = []\n activeTaskID = []\n for doc in config.collection1.find():\n if assigned == doc[\"task assigned by\"] and \"Task Completed Successfully\" == doc[\"task status\"]:\n dataActive[doc[\"task description\"]] = str(doc[\"_id\"])\n idActive[str(doc[\"_id\"])] = [doc[\"task priority\"], doc[\"task assigned by\"],\n doc[\"task assigned to\"], doc[\"task description\"]]\n activeTask.append(dataActive)\n activeTaskID.append(idActive)\n\n\n return (activeTask, activeTaskID)\n\n\ndef task_approver(approver):\n data1 = []\n for doc in config.collection1.find():\n if approver == doc[\"task\"][\"Task Approval require to complete\"]:\n data1.append(doc[\"task\"])\n ok = \"Success\"\n return ok, data1\n\n\ndef getStaffType(staffType):\n staffList = []\n for doc in config.collectionStaff.find():\n if staffType == doc[\"staffType\"]:\n staffList = doc[\"staffDepartment\"]\n return(staffList)\n\n\ndef getResponsibilities(department):\n responsibilities = []\n for doc in config.collectionResponsibilities.find():\n if department == doc[\"department\"]:\n responsibilities = doc[\"responsibilities\"]\n return(responsibilities)\n\n\ndef createCalendarEvent(req_data):\n SCOPES = ['https://www.googleapis.com/auth/calendar']\n creds = None\n creds = Credentials.from_authorized_user_file(f\"token.json\", SCOPES)\n service = build('calendar', 'v3', credentials=creds)\n config.eventTemplate[\"description\"] = req_data[\"task description\"]\n config.eventTemplate[\"summary\"] = req_data[\"task description\"]\n config.eventTemplate[\"start\"][\"dateTime\"] = req_data[\"task deadline\"] + \\\n \"T09:00:00-07:00\"\n config.eventTemplate[\"end\"][\"dateTime\"] = req_data[\"task deadline\"] + \\\n \"T09:00:00-07:00\"\n event = service.events().insert(calendarId='primary',\n body=config.eventTemplate).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n\n\ndef createMeeting(req_data,data):\n SCOPES = ['https://www.googleapis.com/auth/calendar']\n creds = None\n creds = Credentials.from_authorized_user_file(f\"token.json\", SCOPES)\n service = build('calendar', 'v3', credentials=creds)\n config.eventTemplate[\"description\"] = req_data[\"description\"]\n config.eventTemplate[\"summary\"] = req_data[\"summary\"]\n config.eventTemplate[\"start\"][\"dateTime\"] = req_data[\"date\"]+req_data[\"startTime\"]\n config.eventTemplate[\"start\"][\"timeZone\"] = \"Asia/Kolkata\"\n config.eventTemplate[\"end\"][\"dateTime\"] = req_data[\"date\"]+req_data[\"endTime\"]\n config.eventTemplate[\"end\"][\"timeZone\"] = \"Asia/Kolkata\"\n dict1=[]\n dict2 = {}\n for i in data:\n dict2[\"id\"] = np.random.randint(1,1000)\n dict2[\"email\"] = i \n dict1.append(dict2)\n config.eventTemplate[\"attendees\"] = dict1\n config.eventTemplate[\"conferenceData\"] = {\"createRequest\": {\"requestId\": f\"{uuid4().hex}\",\n \"conferenceSolutionKey\": {\"type\": \"hangoutsMeet\"}}}\n\n print(config.eventTemplate)\n\n event = service.events().insert(calendarId='primary',\n body=config.eventTemplate, sendNotifications=True, conferenceDataVersion=1).execute()\n print('Event created: %s' % (event.get('htmlLink')))\n return(config.eventTemplate)\n\n\ndef sha(password):\n result = hashlib.sha1(password.encode())\n return result.hexdigest()\n\n\ndef fetchrole(email):\n for i in config.collection.find():\n if email == i[\"primaryEmail\"]:\n user = (i[\"organizations\"][0][\"title\"])\n for j in config.collection2.find():\n if user == j[\"users\"][0]:\n role = (j[\"role\"])\n return(role)\n\n\ndef getstatus(taskID):\n data = {}\n for i in config.collection1.find():\n if taskID == i[\"_id\"]:\n status = i[\"task status\"]\n break\n for j in config.collectionStatus.find():\n if status == j[\"title\"]:\n data[\"componentsInput\"] = j[\"componentsInput\"]\n data[\"title\"] = j[\"title\"]\n data[\"componentsButtons\"] = j[\"componentsButtons\"]\n data[\"componentsUpload\"] = j[\"componentsUpload\"]\n data[\"message\"] = j[\"message\"]\n data[\"buttonValue\"] = j[\"buttonValue\"]\n data[\"view\"] = j[\"view\"]\n break\n return status, data\n\n\ndef editjson(obj, newMsg, key):\n url = \"http://34.136.41.197:5000/getjson\"\n payload = json.dumps({\n \"objid\": obj\n })\n headers = {\n 'Content-Type': 'application/json'\n }\n\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n old = response.json()[\"json\"]\n new = response.json()[\"json\"]\n\n new[key] = newMsg\n for i in config.collection1.find():\n if ObjectId(obj) == i[\"_id\"]:\n edit = config.collection1.replace_one(old, new)\n return edit\n\n\ndef getJson(objid):\n data = {}\n for i in config.collection1.find():\n if objid == i[\"_id\"]:\n for key, value in i.items():\n if key not in [\"_id\"]:\n data[key] = value\n return data\n\n\ndef deletecollection(obj):\n print(obj[\"obj\"])\n for j in config.collection1.find():\n if ObjectId(obj[\"obj\"]) == j[\"_id\"]:\n print(\"Success\")\n config.collection1.delete_one({\"_id\": ObjectId(obj[\"obj\"])})\n return(\"Success\")\n\n\ndef task_assigned1(obji):\n data = []\n for x in config.collection1.find():\n if ObjectId(obji) == x[\"_id\"]:\n data.append(x)\n return data\n\n\ndef getClassInfo():\n for x in config.collectionClassInfo.find():\n classes = x[\"classInformation\"].keys()\n return list(classes)\n\n\ndef getSectionInfo(cls):\n for x in config.collectionClassInfo.find():\n if cls in x[\"classInformation\"].keys():\n section = x[\"classInformation\"][cls]\n break\n return section\n\n\ndef getSubjectInfo():\n for x in config.collectionClassInfo.find():\n subjects = x[\"subjectInformation\"]\n return list(subjects)\n\n\ndef getTeachersList():\n names = []\n test = {}\n nonTeachers = []\n test1 = {}\n for x in config.collection.find():\n if x[\"organizations\"][0][\"title\"] == \"teacher\" or x[\"organizations\"][0][\"title\"][0] == \"teacher\":\n names.append(x[\"name\"][\"givenName\"]+\" \"+x[\"name\"][\"familyName\"])\n test[str(x[\"_id\"])] = x[\"name\"][\"givenName\"] + \\\n \" \"+x[\"name\"][\"familyName\"]\n else:\n nonTeachers.append(x[\"name\"][\"givenName\"] +\n \" \"+x[\"name\"][\"familyName\"])\n test1[str(x[\"_id\"])] = x[\"name\"][\"givenName\"] + \\\n \" \"+x[\"name\"][\"familyName\"]\n return names, nonTeachers, test, test1\n\n\ndef getAllComments(id):\n for x in config.collection1.find():\n if id == x[\"_id\"]:\n comments = x[\"task updates\"]\n break\n return comments\n\ndef taskProfile(obj):\n for i in config.collection.find():\n if ObjectId(obj) == i[\"_id\"]:\n data = i\n data[\"_id\"]= str(data[\"_id\"])\n mail = data[\"primaryEmail\"]\n print(mail)\n designation = data[\"organizations\"][0][\"title\"]\n if designation in [\"classTeacher\",\"teacher\",\"teacherCoordinator\", \"subCoordinator\"]:\n staffType = \"Teaching\"\n else:\n staffType = \"Non-Teaching\"\n return mail,designation,staffType\n\n\ndef getInfo(mail):\n for x in config.collection.find():\n if x[\"primaryEmail\"] == mail:\n id = x[\"_id\"]\n for y in config.collectionTeacherAssignments.find():\n if ObjectId(y[\"id\"]) == id:\n ct = y[\"classTeacher\"]\n subjects = y[\"subjectTeacher\"]\n rm = y[\"reportingManager\"]\n for x in config.collection.find():\n if x[\"_id\"] == ObjectId(rm):\n repMgr = x[\"primaryEmail\"]\n reprMgrName = x[\"name\"][\"givenName\"] + \" \"+ x[\"name\"][\"familyName\"]\n return ct, subjects, repMgr, reprMgrName\n\n\ndef getEmail(obji):\n data = []\n for x in config.collection.find():\n if ObjectId(obji) == x[\"_id\"]:\n data.append(x[\"primaryEmail\"])\n return data\n\ndef student_list(classn):\n collection = config.db2.collection_names(include_system_collections=False)\n dict2=[]\n for collect in collection:\n if classn == collect:\n for j in config.db2[collect].find():\n dict1 = j\n dict1[\"_id\"] = repr(dict1[\"_id\"])\n dict1[\"checked\"]=False\n dict2.append(dict1)\n return dict2\n\ndef qrsearch(test):\n for i in config.collectionQR.find():\n if test == i[\"QR\"][\"qrid\"]:\n return i[\"QR\"]\n\n# def attendacne_filter(cc, date, st, et, sub):\n# class_collection = []\n# class_collection.append([config.class1,config.class2,config.class3,config.class4,config.class5,\n# config.class6,config.class7,config.class8,config.class9,config.class10,config.nursery,config.lkg,config.ukg])\n# for i in class_collection[0]:\n# for j in i.find():\n# if cc == j['class'] and sub == j['subject'] and date == j['Date'] and st == j[\"Time\"][\"starttime\"] and et == j[\"Time\"][\"endtime\"]:\n# data = list(i.find())\n# print (data)\n# return data\n# else:\n# print(\"NO records available\")]\n\ndef attendacne_filter(cc, sub,date):\n class_collection = []\n class_collection.append([config.class1,config.class2,config.class3,config.class4,config.class5,config.class6,config.class7,config.class8,config.class9,config.class10,config.lkg])\n for i in class_collection[0]:\n for j in i.find():\n if cc == j['class'] and sub == j['subject'] and date == j['Date']:\n data=j\n data[\"_id\"]=str(data[\"_id\"])\n return data[\"attendance\"]\n\n# def marks_filter(cc, sub,date):\n# class_collection1 = []\n# class_collection1.append([config.class1M,config.class2M,config.class3M,config.class4M,config.class5M,config.class6M,config.class7M,config.class8M,config.class9M,config.class10M,config.lkgM]) \n# for i in class_collection1[0]:\n# for j in i.find():\n# if cc == j['class'] and sub == j['subject'] and date == j['Date']:\n# data=j\n# data[\"_id\"]=str(data[\"_id\"])\n# return data[\"attendance\"]\n\ndef marks_filter(classes,exam_type):\n class_collection1 = []\n class_collection1.append([config.class1M,config.class2M,config.class3M,config.class4M,config.class5M,config.class6M,config.class7M,config.class8M,config.class9M,config.class10M,config.lkgM])\n dat=[]\n for i in class_collection1[0]:\n for j in i.find():\n if classes == j['class'] and exam_type == j['exam_type']:\n data=j\n data[\"_id\"]=str(data[\"_id\"])\n dat.append(data)\n info = info1(dat)\n return info\n\ndef info1(dat):\n info={}\n marks1 = {}\n students = []\n mark = []\n subjects = []\n for i in dat:\n subjects.append(i[\"subject\"])\n mks= list(i[\"marks\"])\n students.append(mks)\n stu = students[0]\n m = []\n for z in i[\"marks\"].values():\n m.append(z)\n mark.append(m)\n marks = np.transpose(mark)\n marks_final = marks.tolist()\n for a in students[0]:\n for b in marks_final:\n print(b)\n marks1[a]= b\n info[\"students\"]=stu\n info[\"subjects\"]=subjects\n return {\"info\":info,\"marks\":marks1}\n\n\ndef getLoginJson(mail,date,sub,cls):\n for j in config.teachers.find():\n if mail == j[\"data\"][\"mail\"] and date == j[\"data\"][\"date\"] and sub == j[\"data\"][\"sub\"] and cls==j[\"data\"][\"cls\"]:\n data = j\n data[\"_id\"]=str(data[\"_id\"])\n return data\n\ndef student_task(class_name,subject_name):\n print(class_name)\n print(subject_name)\n for i in config.student_tasks.find():\n print(\"hellooooo\")\n if class_name == i[\"assignedClass\"] and subject_name == i[\"assignedSubject\"]:\n data = i\n data[\"_id\"]=str(data[\"_id\"])\n return data\n", "id": "5873770", "language": "Python", "matching_score": 5.960269451141357, "max_stars_count": 0, "path": "python/utils/utils.py" }, { "content": "import json\nfrom pymongo import MongoClient\n\n\nclient = MongoClient(\"mongodb+srv://himanshu:[email protected]/myFirstDatabase?authSource=admin&replicaSet=atlas-13hnoa-shard-0&w=majority&readPreference=primary&appname=MongoDB%20Compass&retryWrites=true&ssl=true\",\n tlsAllowInvalidCertificates=True, tls=True)\ndb = client.AtmPohu\ndb2 = client.students\ndb3 = client.attendaceqr\ndb4 = client.attendance\ndb5=client.studentmarks\nstudenttask = client.StudentTask\ncollectionSC=db.studyCentralInsert\nparentCollection=db.ParentsList\nstudent_tasks = studenttask.student_task\nclass1 = db4.class1\nclass2 = db4.class2\nclass3 = db4.class3\nclass4 = db4.class4\nclass5 = db4.class5\nclass6 = db4.class6\nclass7 = db4.class7\nclass8 = db4.class8\nclass9 = db4.class9\nclass10 = db4.class10\nnursery = db4.nursery\nteachers=db4.teacherattendance\nlkg = db4.L.K.G\nukg = db4.U.K.G\ngarbage = db4.garbage\ncollectionQR = db3.qr\ncollection = db.Users\ncollection1 = db.Task\ncollectionStaff = db.Staff\ncollectionResponsibilities = db.Responsibilities\ncollection2 = db.Roles\ncollectionStatus = db.TaskStatus\ncollectionClassInfo = db.ClassInfo\ncollectionTeacherAssignments = db.teacherAssignments\ncollectionMeetings = db.meetings\ncollectionMark=db5.studentmark\nbroadcast = db.broadcast\nreport = client.attendance\nclass1 = report.class1\nclass2 = report.class2\nclass3 = report.class3\nclass4 = report.class4\nclass5 = report.class5\nclass6 = report.class6\nclass7 = report.class7\nclass8 = report.class8\nclass9 = report.class9\nclass10 = report.class10\nlkg = report.L.K.G\nmarks = client.marksreport\nclass1M = marks.class1\nclass2M = marks.class2\nclass3M = marks.class3\nclass4M = marks.class4\nclass5M = marks.class5\nclass6M = marks.class6\nclass7M = marks.class7\nclass8M = marks.class8\nclass9M = marks.class9\nclass10M = marks.class10\nlkgM = marks.L.K.G\n\n\neventTemplate = {\n 'summary': 'Google I/O 2015',\n 'location': '800 Howard St., San Francisco, CA 94103',\n 'description': 'A chance to hear more about Google\\'s developer products.',\n 'start': {\n 'dateTime': '2021-05-28T09:00:00-07:00',\n 'timeZone': 'Asia/Kolkata',\n },\n 'end': {\n 'dateTime': '2021-05-28T17:00:00-07:00',\n 'timeZone': 'Asia/Kolkata',\n },\n 'recurrence': [\n 'RRULE:FREQ=DAILY;COUNT=1'\n ],\n 'attendees': [\n {'email': '<EMAIL>'}\n ],\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n\nuserDetailsTemplate = {\n \"primaryEmail\": \"<EMAIL>\",\n \"name\": {\n \"givenName\": \"Test\",\n \"familyName\": \"User\"\n },\n \"suspended\": False,\n \"password\": \"<PASSWORD>\",\n \"hashFunction\": \"SHA-1\",\n \"changePasswordAtNextLogin\": True,\n \"ipWhitelisted\": False,\n \"ims\": [\n {\n \"type\": \"work\",\n \"protocol\": \"gtalk\",\n \"im\": \"<EMAIL>\",\n \"primary\": True\n }\n ],\n \"emails\": [\n {\n \"address\": \"<EMAIL>\",\n \"type\": \"home\",\n \"customType\": \"\",\n \"primary\": True\n }\n ],\n \"addresses\": [\n {\n \"type\": \"work\",\n \"customType\": \"\",\n \"streetAddress\": \"1600 Amphitheatre Parkway\",\n \"locality\": \"Mountain View\",\n \"region\": \"CA\",\n \"postalCode\": \"94043\"\n }\n ],\n \"externalIds\": [\n {\n \"value\": \"12345\",\n \"type\": \"custom\",\n \"customType\": \"employee\"\n }\n ],\n \"relations\": [\n {\n \"value\": \"Mom\",\n \"type\": \"mother\",\n \"customType\": \"\"\n },\n {\n \"value\": \"manager\",\n \"type\": \"referred_by\",\n \"customType\": \"\"\n }\n ],\n \"organizations\": [\n {\n \"name\": \"Google Inc.\",\n \"title\": \"SWE\",\n \"primary\": True,\n \"type\": \"work\",\n \"description\": \"Software engineer\"\n }\n ],\n \"phones\": [\n {\n \"value\": \"+1 nnn nnn nnnn\",\n \"type\": \"work\"\n }\n ],\n \"includeInGlobalAddressList\": True\n}", "id": "11808328", "language": "Python", "matching_score": 0.9433717727661133, "max_stars_count": 0, "path": "python/config.py" }, { "content": "import urllib3 as urllib2\nimport json\n\ndef get_auth_token():\n \"\"\"\n get an auth token\n \"\"\"\n req=urllib2.Request(\"https://accounts.google.com/o/oauth2/token\")\n response=urllib2.urlopen(req)\n html=response.read()\n json_obj=json.loads(html)\n token_string=json_obj[\"token\"].encode(\"ascii\",\"ignore\")\n return token_string\n\ndef get_response_json_object(url, auth_token):\n \"\"\"\n returns json object with info\n \"\"\"\n auth_token=get_auth_token()\n req=urllib2.Request(url, None, {\"Authorization\": \"Bearer %s\" %auth_token})\n response=urllib2.urlopen(req)\n html=response.read()\n json_obj=json.loads(html)\n return json_obj\n\nauth_token = get_auth_token()\njs = get_response_json_object(auth_token, url=\"https://accounts.google.com/o/oauth2/auth\")", "id": "6590951", "language": "Python", "matching_score": 0.10970672219991684, "max_stars_count": 0, "path": "python/test.py" } ]
1.998434
DH4CK1
[ { "content": "# CodedBotBy: <NAME>\r\n#-*- coding: utf-8 -*-\r\nimport telebot\r\nimport requests\r\nimport os\r\nimport re\r\nimport json\r\nfrom requests.exceptions import *\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom flask import Flask,request\r\nfrom lib import *\r\n\r\nbot = telebot.TeleBot(token)\r\nr = requests.Session()\r\n\r\napp = Flask(__name__)\r\n\r\[email protected]_handler(commands=['start'])\r\ndef start(message):\r\n bot.reply_to(message,\"📌 Press /help to show information!\")\r\n\r\[email protected]_handler(commands=['help'])\r\ndef help(message):\r\n bot.reply_to(message,'''----{>🤖 Manga Bot 🤖<}----\r\n\r\nUse command:\r\n\r\n/help - ❓information\r\n/search <mangaName> - 🔎 manga\r\n/download <mangaName> <Chapter> - ⬇️ manga''')\r\n\r\[email protected]_handler(commands=['search'])\r\ndef search(message):\r\n\ttext = message.text\r\n\tid = re.findall(\"'id': (.*?),\",str(message))\r\n\tid = id[len(id)-1]\r\n\treply = False\r\n\tif len(text.split()) == 1:\r\n\t\tbot.reply_to(message,\"❌ Please enter manga name! Use: /search <mangaName>\")\r\n\t\treturn \"salah!\"\r\n\ttry:\r\n\t\tnime = text.split(\" \")\r\n\t\tdel nime[0]\r\n\t\tnim = \" \".join(nime)\r\n\texcept:\r\n\t\treply = \"❌ Please enter manga name! Use: /search <mangaName>\"\r\n\tdata = search_manga(nim)\r\n\tif data:\r\n\t\tname = data['name']\r\n\t\tchapter = data['chapter']\r\n\t\tdesc = data['desc']\r\n\t\timg = data['img']\r\n\t\tcapt = \"✅ manga is found!\\n\\n\"+\"MangaName: \"+name+\"\\nAll_chapter: \"+chapter+\"\\n\\n-------<|Description|>-------\\n\\n\"+desc\r\n\t\tsend_Img(id, img, capt=capt)\r\n\telse:\r\n\t\treply = \"❌ Maybe manga not found!\"\r\n\tif reply:\r\n\t\tbot.reply_to(message,reply)\r\n\r\[email protected]_handler(commands=['download'])\r\ndef down(message):\r\n\ttext = message.text\r\n\tid = re.findall(\"'id': (.*?),\",str(message))\r\n\tid = id[len(id)-1]\r\n\treply = False\r\n\tif len(text.split()) == 1:\r\n\t\tbot.reply_to(message,\"❌ Please enter manga name! Use: /download <manaName> <chapter>\")\r\n\t\treturn \"salah!\"\r\n\ttry:\r\n\t\tbot.reply_to(message, \"⏱ Please wait........\")\r\n\t\targ = text.split()\r\n\t\tend = len(arg)-1\r\n\t\tchapter = arg[end]\r\n\t\tdel arg[end]\r\n\t\tdel arg[0]\r\n\texcept:\r\n\t\treply = \"❌ Please enter manga name! Use: /download <manaName> <chapter>\"\r\n\r\n\tnimex = \" \".join(arg)\r\n\tnimexx = search_manga(nimex)\r\n\tmanga = nimexx['name']\r\n\tif nimexx:\r\n\t\tpdf = download(nimexx[\"name\"], chapter)\r\n\t\tsend_File(id, pdf, capt=\"✅ Success Downloading!\\n\\nManga: \"+manga+\"\\nChapter: \"+chapter)\r\n\telse:\r\n\t\treply =\"❌ Maybe manga/chapter not found!\"\r\n\tif reply:\r\n\t\tsend_Msg(id, reply)\r\n\r\n#####--Uses For Loong Pooling method --#####\r\n#bot.remove_webhook()\r\n#print('bot aktif')\r\n#bot.polling()\r\n############################################\r\n\r\[email protected]('/' + token, methods=['POST'])\r\ndef getMessage():\r\n bot.process_new_updates([telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\"))])\r\n return \"oke\", 200\r\n\r\[email protected]('/')\r\ndef webhook():\r\n bot.remove_webhook()\r\n bot.set_webhook(url='https://yourbotdomain.com/'+api)\r\n return 'oke',200\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0',port=int(os.environ.get('PORT','5000')),debug=True)\r\n\r\n", "id": "11676324", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "bot.py" } ]
0
swolchok
[ { "content": "import torch\nimport nestedtensor\nimport utils\nfrom torch.nn import functional as F\n\nimport random\n\n\nclass DETRNestedTensor(object):\n def __init__(self, tensors, mask):\n self.tensors = tensors\n self.mask = mask\n\n def to(self, *args, **kwargs):\n cast_tensor = self.tensors.to(*args, **kwargs)\n cast_mask = self.mask.to(\n *args, **kwargs) if self.mask is not None else None\n return type(self)(cast_tensor, cast_mask)\n\n def decompose(self):\n return self.tensors, self.mask\n\n @classmethod\n def from_tensor_list(cls, tensor_list):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n # TODO make it support different-sized images\n max_size = tuple(max(s)\n for s in zip(*[img.shape for img in tensor_list]))\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = (len(tensor_list),) + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1],\n : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return cls(tensor, mask)\n\n def __repr__(self):\n return repr(self.tensors)\n", "id": "4843961", "language": "Python", "matching_score": 1.450083613395691, "max_stars_count": 229, "path": "test/detr_nestedtensor.py" }, { "content": "import traceback\nimport functools\nimport pdb\nimport sys\nimport torch\nimport nestedtensor\nimport unittest\nfrom utils_test_case import TestCase\nimport random\nimport utils\nfrom torch import nn\nimport math\n\nclass Joiner(nn.Sequential):\n def __init__(self, backbone, position_embedding):\n super().__init__(backbone, position_embedding)\n\n def forward(self, tensor_list: nestedtensor.NestedTensor):\n xs = self[0](tensor_list)\n out = []\n pos = []\n for _, x in xs.items():\n out.append(x)\n pos.append(self[1](x))\n\n return out, pos\n", "id": "10502987", "language": "Python", "matching_score": 0.46153923869132996, "max_stars_count": 229, "path": "test/joiner.py" }, { "content": "import torch\nimport nestedtensor\nimport nestedtensor\nimport utils\n\n\ndef vmap(fn):\n def decorator(arg):\n if torch.is_tensor(arg):\n return fn(arg)\n else:\n def asd(x):\n return fn(x)\n return arg.jit_apply(torch.jit.script(asd))\n return decorator\n\n\[email protected]\ndef my_fun(x):\n x = x + 1\n y = x.abs()\n return y\n\n# print(e)\n\n\ndef gen_current():\n n = nestedtensor.as_nested_tensor(\n [torch.randn(256, 128).to(device='cuda') for _ in range(128)])\n\n def _algorithm():\n n1 = n + 1\n n1.abs()\n\n return _algorithm\n\n\ndef gen_jit():\n\n n = nestedtensor._C._ListNestedTensor(\n [torch.randn(256, 128).to(device='cuda') for _ in range(128)])\n\n def gen_my_fun(scalar, tensor):\n @torch.jit.ignore\n def get_scalar():\n return scalar\n\n @torch.jit.ignore\n def get_tensor():\n return tensor\n\n @torch.jit.script\n def my_fun(x, y):\n x = x + get_scalar()\n x = x + get_tensor()\n y = y + x.abs()\n return y\n return my_fun\n my_fun = gen_my_fun(3.0, torch.randn(1).to(device='cuda'))\n\n def _algorithm_jit():\n nestedtensor._C.jit_apply_function((n, n), my_fun)\n\n return _algorithm_jit\n\n\nif __name__ == \"__main__\":\n # print(utils.benchmark_fn(alg, use_cprofile=True))\n # alg = gen_list_nested_tensor_construction()\n # print(utils.benchmark_fn(alg))\n alg1 = gen_current()\n print(utils.benchmark_fn(alg1))\n alg2 = gen_jit()\n print(utils.benchmark_fn(alg2))\n", "id": "9506385", "language": "Python", "matching_score": 3.6015944480895996, "max_stars_count": 229, "path": "benchmarks/jit_apply.py" }, { "content": "from nestedtensor import torch\nimport utils\n\nimport random\n\n\ndef gen_list_nested_tensor_construction():\n tensors = [torch.rand(random.randint(500, 1500), 25600) for _ in range(20)]\n def _algorithm():\n torch._ListNestedTensor(tensors)\n return _algorithm\n\ndef gen_list_nested_tensor_unbind():\n nested_tensor = torch._ListNestedTensor([torch.rand(random.randint(500, 1500), 25600) for _ in range(20)])\n def _algorithm():\n nested_tensor.unbind()\n return _algorithm\n\nif __name__ == \"__main__\":\n # print(utils.benchmark_fn(alg, use_cprofile=True))\n # alg = gen_list_nested_tensor_construction()\n # print(utils.benchmark_fn(alg))\n alg = gen_list_nested_tensor_unbind()\n print(utils.benchmark_fn(alg))\n", "id": "6840660", "language": "Python", "matching_score": 0.8319302797317505, "max_stars_count": 229, "path": "benchmarks/basic.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\n\nimport random\n\n# Performance tanks hard for lots of small Tensors as expected\nRAND_INTS = [random.randint(10, 30) for _ in range(2000)]\nRAND_INTS = [random.randint(100, 300) for _ in range(20)]\nDEVICE = torch.device('cpu')\n\n\ndef gen_t_mul():\n tensor = torch.cat([torch.rand(i, 2560).reshape(-1) for i in RAND_INTS])\n tensor1 = tensor.to(DEVICE)\n tensor2 = tensor.to(DEVICE).clone()\n\n def t():\n tensor1.mul(tensor2)\n return t\n\n\ndef gen_t_loop_mul():\n tensors1 = [torch.rand(i, 2560).to(DEVICE) for i in RAND_INTS]\n tensors2 = [torch.rand(i, 2560).to(DEVICE) for i in RAND_INTS]\n\n def t_loop():\n for t1, t2 in zip(tensors1, tensors2):\n t1.mul(t2)\n return t_loop\n\n\ndef gen_nt_mul():\n nested_tensor1 = nestedtensor.nested_tensor(\n [torch.rand(i, 2560).to(DEVICE) for i in RAND_INTS])\n nested_tensor2 = nestedtensor.nested_tensor(\n [torch.rand(i, 2560).to(DEVICE) for i in RAND_INTS])\n\n def nt():\n nested_tensor1.mul(nested_tensor2)\n return nt\n\ndef gen_nt_sum():\n nested_tensor1 = nestedtensor.nested_tensor(\n [torch.rand(i, 2560).to(DEVICE) for i in RAND_INTS], requires_grad=True)\n\n def nt():\n nested_tensor1 #.sum().backward()\n return nt\n\n\nif __name__ == \"__main__\":\n # print(utils.benchmark_fn(gen_t_mul()))\n # print(utils.benchmark_fn(gen_t_loop_mul()))\n print(utils.benchmark_fn(gen_nt_mul(), 1.0))\n # print(utils.benchmark_fn(gen_nt_sum()))\n", "id": "1327398", "language": "Python", "matching_score": 3.677332878112793, "max_stars_count": 229, "path": "benchmarks/binary.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\n\nimport random\n\n# Performance tanks hard for lots of small Tensors as expected\nRAND_INTS = [random.randint(10, 30) for _ in range(2000)]\nRAND_INTS = [random.randint(1000, 3000) for _ in range(20)]\n\n\ndef gen_t_cos():\n tensor = torch.cat([torch.rand(i, 2560).reshape(-1) for i in RAND_INTS])\n tensor = tensor.cuda()\n\n def t():\n tensor.cos().sum().backward()\n return t\n\n\ndef gen_t_loop_cos():\n tensors = [torch.rand(i, 2560).cuda() for i in RAND_INTS]\n\n def t_loop():\n for t in tensors:\n t.cos().sum().backward()\n return t_loop\n\n\ndef gen_nt_cos():\n nested_tensor = nestedtensor.nested_tensor(\n [torch.rand(i, 2560) for i in RAND_INTS], device=torch.device('cuda'), dtype=torch.float)\n\n def nt():\n nested_tensor.cos().sum().backward()\n return nt\n\n\nif __name__ == \"__main__\":\n print(utils.benchmark_fn(gen_t_cos()))\n print(utils.benchmark_fn(gen_t_loop_cos()))\n print(utils.benchmark_fn(gen_nt_cos()))\n", "id": "2885941", "language": "Python", "matching_score": 2.9635775089263916, "max_stars_count": 229, "path": "benchmarks/unary.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\nimport torchvision\n\nimport random\n\n# Performance tanks hard for lots of small Tensors as expected\nRAND_INTS = [random.randint(10, 30) for _ in range(2000)]\nRAND_INTS = [random.randint(100, 300) for _ in range(20)]\n\nbackbone = torchvision.models.resnet.__dict__['resnet50'](\n pretrained=True,\n replace_stride_with_dilation=[False, True, True])\n\nreturn_layers = {'layer4': 'out'}\nMODEL = torchvision.models._utils.IntermediateLayerGetter(\n backbone, return_layers=return_layers).cuda()\n\n\ndef gen_t_loop_segmentation():\n tensors = [torch.rand(1, 3, i, 256).cuda() for i in RAND_INTS]\n\n def t_loop():\n for t in tensors:\n MODEL(t)['out'].sum().backward()\n return t_loop\n\n\ndef gen_nt_segmentation():\n nested_tensor = nestedtensor.nested_tensor(\n [torch.rand(3, i, 256) for i in RAND_INTS], device=torch.device('cuda'), dtype=torch.float)\n\n def nt():\n MODEL(nested_tensor)['out'].sum().backward()\n return nt\n\n\nif __name__ == \"__main__\":\n # print(utils.benchmark_fn(gen_t_loop_segmentation(), 10.0))\n print(utils.benchmark_fn(gen_nt_segmentation(), 2.0))\n", "id": "6549455", "language": "Python", "matching_score": 2.3945152759552, "max_stars_count": 229, "path": "benchmarks/segmentation.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\n\nimport random\nrandom.seed(1010)\n\nBDIM=10\n\n# Performance tanks hard for lots of small Tensors as expected\nRAND_INTS = [random.randint(10, 30) for _ in range(2000)]\n\nOUTDIM=256\n\nTENSORS0 = [torch.rand(i, OUTDIM).cuda() for i in RAND_INTS]\n\ndef gen_t_matmul():\n nt0 = nestedtensor.nested_tensor(TENSORS0, device=torch.device('cuda'), dtype=torch.float)\n data, _ = nt0.to_tensor_mask()\n t1 = torch.randn(OUTDIM, 512).cuda()\n\n def t():\n torch.matmul(data, t1)\n return t\n\n\[email protected]_mode()\ndef gen_nt_matmul():\n nt0 = nestedtensor.nested_tensor(TENSORS0, device=torch.device('cuda'), dtype=torch.float)\n t1 = torch.randn(OUTDIM, 512).cuda()\n\n def nt():\n torch.matmul(nt0, t1)\n return nt\n\n\nif __name__ == \"__main__\":\n print(utils.benchmark_fn(gen_t_matmul()))\n print(utils.benchmark_fn(gen_nt_matmul()))\n", "id": "10818566", "language": "Python", "matching_score": 3.127362012863159, "max_stars_count": 229, "path": "benchmarks/matmul.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\n\nimport random\n\n# Performance tanks hard for lots of small Tensors as expected\nRAND_INTS = [random.randint(10, 30) for _ in range(2000)]\n\n\ndef gen_nt_unbind():\n nested_tensor = nestedtensor.nested_tensor(\n [torch.rand(i, 2560) for i in RAND_INTS])\n\n def nt():\n nested_tensor.unbind()\n return nt\n\n\ndef gen_ant_unbind():\n nested_tensor = nestedtensor.as_nested_tensor(\n [torch.rand(i, 2560) for i in RAND_INTS])\n\n def ant():\n nested_tensor.unbind()\n return ant\n\n\ndef gen_nt_unbind_2():\n nested_tensor = nestedtensor.nested_tensor(\n [[torch.rand(i, 25) for i in RAND_INTS] for j in range(100)])\n\n def nt_2():\n [t.unbind() for t in nested_tensor.unbind()]\n return nt_2\n\n\ndef gen_ant_unbind_2():\n nested_tensor = nestedtensor.as_nested_tensor(\n [[torch.rand(i, 25) for i in RAND_INTS] for j in range(100)])\n\n def ant_2():\n [t.unbind() for t in nested_tensor.unbind()]\n return ant_2\n\n\nif __name__ == \"__main__\":\n print(utils.benchmark_fn(gen_nt_unbind()))\n print(utils.benchmark_fn(gen_ant_unbind()))\n print(utils.benchmark_fn(gen_nt_unbind_2()))\n print(utils.benchmark_fn(gen_ant_unbind_2()))\n", "id": "8507516", "language": "Python", "matching_score": 0.41253364086151123, "max_stars_count": 229, "path": "benchmarks/unbind.py" }, { "content": "import torch\nimport time\nimport nestedtensor\n\n\[email protected]_mode()\ndef benchmark_torch_function(iters, f, *args, **kwargs):\n f(*args, **kwargs)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n start_event.record()\n else:\n t0 = time.time()\n for _ in range(iters):\n f(*args, **kwargs)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n return start_event.elapsed_time(end_event) * 1e3\n else:\n return (time.time() - t0) * 1e6\n\n\ndef run(bdim, embedding_dim, nhead, min_t, max_t, iters, device):\n import random\n random.seed(1010)\n\n # The following is meant to emulate the lenghts of randomly sampled tokenized sentences\n lengths = [random.randint(min_t, max_t) for _ in range(bdim)]\n lengths_mean = torch.tensor(lengths, dtype=torch.float).mean().item()\n lengths_std = torch.tensor(lengths, dtype=torch.float).std().item()\n\n # List of sentence embeddings\n tensors = [torch.rand(i, embedding_dim) for i in lengths]\n # Create packed NestedTensor\n nt = nestedtensor.nested_tensor(tensors, device=device, dtype=torch.float)\n\n # Create MHA with self-attention in mind\n mha = torch.nn.MultiheadAttention(embedding_dim, nhead).to(device).eval()\n\n # Create regular padded Tensor with corresponding mask\n data, mask = nt.to_tensor_mask(mask_dim=2)\n # Prepare input for torch.nn.MHA, which is batch second for Tensor input\n data = data.transpose(0, 1)\n not_mask = torch.logical_not(mask)\n\n # Comparison test to show correctness and API differences\n with torch.inference_mode():\n nt_output, _ = mha(nt, nt, nt, need_weights=False)\n t_output, _ = mha(data, data, data, key_padding_mask=not_mask, need_weights=False)\n nt_output_padded = nt_output.to_padded_tensor(padding=0)\n t_output = t_output.transpose(0, 1)\n # Fill in zero for masked-out values to enable comparison\n t_output = t_output * mask.unsqueeze(-1)\n # Tolerances taken from torch/testing/_core.py\n assert torch.isclose(nt_output_padded, t_output, rtol=1e-4, atol=1e-5).all().item()\n\n # Time NT version\n nt_time = benchmark_torch_function(iters, mha, nt, nt, nt, need_weights=False)\n\n # Amount of storage used for padding only\n percentage_padded = 100 * (data.numel() - nt.numel()) / data.numel()\n\n # Time Tensor version\n t_time = benchmark_torch_function(iters, mha, data, data, data, key_padding_mask=not_mask, need_weights=False)\n\n print(f\"batch size: {bdim:4.0f}, embedding dim: {embedding_dim}, nhead: {nhead}, T mean:{lengths_mean:5.0f}, T std: {lengths_std:4.0f}\", end='')\n print(f\", padding: {percentage_padded:3.0f}%, NT: {nt_time/iters:4.0f}us, T: {t_time/iters:4.0f}us, Speedup: {t_time/nt_time:3.2f}x\")\n\n\ndevice = torch.device('cpu')\nif torch.cuda.is_available():\n print(\"CUDA device: \", torch.cuda.get_device_name(0))\n device = torch.device('cuda')\niters = 10\nfor nhead in [2, 4, 8]:\n print(\"\")\n for embed_dim in [1024, 512, 256, 128]:\n print(\"\")\n for min_t, max_t in [(16, 128), (32, 128), (64, 128), (128, 128)]:\n run(256, embed_dim, nhead, min_t, max_t, iters, device)\n", "id": "8265449", "language": "Python", "matching_score": 5.37381649017334, "max_stars_count": 229, "path": "benchmarks/mha_cuda.py" }, { "content": "import torch\nimport time\nimport nestedtensor\n\n\[email protected]_mode()\ndef benchmark_torch_function(iters, f, *args):\n f(*args)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n start_event.record()\n else:\n t0 = time.time()\n for _ in range(iters):\n f(*args)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n return start_event.elapsed_time(end_event)\n else:\n return (time.time() - t0) * 1e3\n\n\n# def run(bdim, embedding_dim, out_dim, min_t, max_t, iters, device):\ndef run(bdim, nchannel, min_t, max_t, iters, device):\n import random\n random.seed(1010)\n\n # The following is meant to emulate the lenghts of randomly sampled tokenized sentences\n lengths1 = [random.randint(min_t, max_t) for _ in range(bdim)]\n lengths2 = [random.randint(min_t, max_t) for _ in range(bdim)]\n\n # List of sentence embeddings\n tensors = [torch.rand(nchannel, l1, l2).to(device=device, dtype=torch.float) for (l1, l2) in zip(lengths1, lengths2)]\n # Create packed NestedTensor\n nt = nestedtensor.nested_tensor(tensors, device=device, dtype=torch.float)\n\n lin = torch.nn.Conv2d(nchannel, nchannel, (1, 1), bias=False).to(device)\n\n def _loop(tensors):\n result = []\n for t in tensors:\n result.append(lin(t.unsqueeze(0)).squeeze(0))\n return result\n\n nt_time = benchmark_torch_function(iters, lin, nt)\n t_time = benchmark_torch_function(iters, _loop, tensors)\n\n # print(f\"batch size: {bdim:4.0f}, embedding dim: {embedding_dim}, out_dim: {out_dim}, T mean:{lengths_mean:5.0f}, T std: {lengths_std:4.0f}\", end='')\n print(f\"batch size: {bdim:4.0f}, nchannel: {nchannel:4.0f}\", end='')\n # print(f\", padding: {percentage_padded:3.0f}%, NT: {nt_time/iters:4.0f}ms, T: {t_time/iters:4.0f}ms, Speedup: {t_time/nt_time:3.2f}x\")\n print(f\", NT: {nt_time/iters:4.0f}ms, T: {t_time/iters:4.0f}ms, Speedup: {t_time/nt_time:3.2f}x\")\n\n\nif torch.cuda.is_available():\n print(\"CUDA device: \", torch.cuda.get_device_name(0))\niters = 10\nfor nchannel in [3, 128, 256, 512]:\n for min_t, max_t in [(16, 128), (32, 128), (64, 128), (128, 128)]:\n run(256, nchannel, min_t, max_t, iters, torch.device('cuda'))\n break\n", "id": "2556925", "language": "Python", "matching_score": 5.894616603851318, "max_stars_count": 229, "path": "benchmarks/conv2d.py" }, { "content": "import torch\nimport time\nimport nestedtensor\n\n\[email protected]_mode()\ndef benchmark_torch_function(iters, f, *args):\n f(*args)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n start_event.record()\n else:\n t0 = time.time()\n for _ in range(iters):\n f(*args)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n return start_event.elapsed_time(end_event)\n else:\n return (time.time() - t0) * 1e3\n\n\ndef run(bdim, embedding_dim, out_dim, min_t, max_t, iters, device):\n import random\n random.seed(1010)\n\n # The following is meant to emulate the lenghts of randomly sampled tokenized sentences\n lengths = [random.randint(min_t, max_t) for _ in range(bdim)]\n lengths_mean = torch.tensor(lengths, dtype=torch.float).mean().item()\n lengths_std = torch.tensor(lengths, dtype=torch.float).std().item()\n\n # List of sentence embeddings\n tensors = [torch.rand(i, embedding_dim) for i in lengths]\n # Create packed NestedTensor\n nt = nestedtensor.nested_tensor(tensors, device=device, dtype=torch.float)\n # Created regular padded Tensor\n data = nt.to_padded_tensor(padding=0)\n # Amount of storage used for padding only\n percentage_padded = 100 * (data.numel() - nt.numel()) / data.numel()\n\n # Projects embeddings into another space\n lin = torch.nn.Linear(embedding_dim, out_dim).to(device)\n nt_time = benchmark_torch_function(iters, lin, nt)\n t_time = benchmark_torch_function(iters, lin, data)\n\n print(f\"batch size: {bdim:4.0f}, embedding dim: {embedding_dim}, out_dim: {out_dim}, T mean:{lengths_mean:5.0f}, T std: {lengths_std:4.0f}\", end='')\n print(f\", padding: {percentage_padded:3.0f}%, NT: {nt_time/iters:4.0f}ms, T: {t_time/iters:4.0f}ms, Speedup: {t_time/nt_time:3.2f}x\")\n\n\nif torch.cuda.is_available():\n print(\"CUDA device: \", torch.cuda.get_device_name(0))\niters = 10\nfor out_dim in [4096, 2048, 1024, 512, 256]:\n print(\"\")\n for embed_dim in [4096, 2048, 1024, 512, 256]:\n print(\"\")\n for min_t, max_t in [(16, 128), (32, 128), (64, 128), (128, 128)]:\n run(256, embed_dim, out_dim, min_t, max_t, iters, torch.device('cuda'))\n", "id": "7405582", "language": "Python", "matching_score": 2.6217963695526123, "max_stars_count": 229, "path": "benchmarks/linear.py" }, { "content": "import torch\nimport numpy as np\nimport time\nimport random\nimport nestedtensor\nfrom classy_vision.models import build_model\n\n\[email protected]_mode()\ndef benchmark_torch_function(iters, f, *args, **kwargs):\n f(*args, **kwargs)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n start_event.record()\n else:\n t0 = time.time()\n for _ in range(iters):\n f(*args, **kwargs)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n return start_event.elapsed_time(end_event)\n else:\n return (time.time() - t0)\n\n\[email protected]_mode()\ndef run_benchmark(iters, shapes, model, model_name, bsz):\n ts = []\n for s in shapes:\n inp = torch.randn(*s, dtype=torch.half).cuda()\n ts.append(inp)\n ts_nt = nestedtensor.nested_tensor([t.squeeze(0) for t in ts], device=torch.device('cuda'), dtype=torch.half)\n ts_padded = ts_nt.to_padded_tensor()\n ts_nt = nestedtensor.nested_tensor([t.squeeze(0) for t in ts], device=torch.device('cuda'), dtype=torch.half, channels_last=True)\n\n def _loop():\n model_outputs = []\n for inp in ts:\n model_outputs.append(model(inp))\n return model_outputs\n\n def _padded():\n return model(ts_padded)\n\n # Test\n outputs_nt = model(ts_nt)\n # import time; time.sleep(1)\n # outputs_nt = model(ts_nt)\n # import sys; sys.exit(1)\n model_outputs = _loop()\n for mo, ntmo in zip(model_outputs, outputs_nt.unbind()):\n # Using float16 tolerances from torch/testing/_core.yp\n assert torch.allclose(mo.squeeze(0), ntmo, rtol=1e-3, atol=1e-3)\n\n loop_time = benchmark_torch_function(iters, _loop)\n padded_time = benchmark_torch_function(iters, _padded)\n nt_time = benchmark_torch_function(iters, lambda: model(ts_nt))\n\n shapes_2_array = np.array([s[2] for s in shapes])\n shapes_3_array = np.array([s[3] for s in shapes])\n print(f\"model_name: {model_name.rjust(18)},\", end='')\n print(f\" bsz: {bsz:3.0f},\", end='')\n print(f\" mean±std shapes[2]: {shapes_2_array.mean():.2f}±{shapes_2_array.std():.2f},\", end='')\n print(f\" mean±std shapes[3]: {shapes_3_array.mean():.2f}±{shapes_3_array.std():.2f},\", end='')\n print(f\" padded_size: {tuple(ts_padded.size())},\", end='')\n print(f\" loop: {loop_time / iters:7.2f}ms, nt: {nt_time / iters:7.2f}ms, padded: {padded_time / iters:7.2f}ms, speedup: {loop_time / nt_time:.2f}x\")\n\nif __name__ == \"__main__\":\n iters = 10\n\n def _benchmark(model_name, bsz):\n model = build_model({\"name\": model_name})\n model = model.cuda().half().eval()\n random.seed(123)\n shapes = [(1, 3, random.randint(100, 600), random.randint(100, 600)) for _ in range(bsz)]\n run_benchmark(iters, shapes, model, model_name, bsz)\n\n for bsz in [16, 32, 64, 128]:\n _benchmark(\"resnext101_32x4d\", bsz)\n\n for bsz in [16, 32]:\n _benchmark(\"regnet_y_128gf\", bsz)\n", "id": "998400", "language": "Python", "matching_score": 2.104414224624634, "max_stars_count": 229, "path": "benchmarks/classy.py" }, { "content": "import torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GATConv\nimport random\nimport time\nimport nestedtensor\nfrom nestedtensor import nested_tensor as ntnt\n\[email protected]_mode()\ndef benchmark_torch_function(iters, f, *args, **kwargs):\n f(*args, **kwargs)\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n start_event.record()\n else:\n t0 = time.time()\n for _ in range(iters):\n f(*args, **kwargs)\n if torch.cuda.is_available():\n end_event.record()\n torch.cuda.synchronize()\n return start_event.elapsed_time(end_event)\n else:\n return (time.time() - t0)\n\n\nnum_features = 1433\nnum_classes = 7\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GATConv(num_features, 8, heads=8,\n dropout=0.6)\n\n self.conv2 = GATConv(64, num_classes, heads=1, concat=True,\n dropout=0.6)\n\n def forward(self, x, edge_index):\n x = F.dropout(x, p=0.6, training=self.training)\n x = F.elu(self.conv1(x, edge_index))\n x = F.dropout(x, p=0.6, training=self.training)\n x = self.conv2(x, edge_index)\n return F.log_softmax(x, dim=1)\n\n\nclass NTNet(torch.nn.Module):\n def __init__(self):\n super(NTNet, self).__init__()\n self.conv1 = GATConv(num_features, 8, heads=8,\n dropout=0.6)\n\n self.conv2 = GATConv(64, num_classes, heads=1, concat=True,\n dropout=0.6)\n\n def forward(self, x, edge_index):\n x = F.dropout(x, p=0.6, training=self.training)\n x = ntnt([self.conv1(xi, edge_index_i) for (xi, edge_index_i) in zip(x.unbind(), edge_index.unbind())], dtype=x.dtype, device=x.device)\n x = F.elu(x)\n x = F.dropout(x, p=0.6, training=self.training)\n x = ntnt([self.conv2(xi, edge_index_i) for (xi, edge_index_i) in zip(x.unbind(), edge_index.unbind())], dtype=x.dtype, device=x.device)\n return F.log_softmax(x, dim=1)\n\n\ndef create_models(device):\n model = Net().to(device).eval()\n nt_model = NTNet().to(device).eval()\n return model, nt_model\n\ndef create_tensors():\n random.seed(1010)\n nnodes_list = []\n nedges_list = []\n for i in range(50):\n nnodes_list.append(random.randint(100, 4000))\n nedges_list.append(random.randint(8000, 15000))\n \n tensors_x = []\n tensors_edge_index = []\n for nnodes, nedges in zip(nnodes_list, nedges_list):\n x = torch.normal(-10, 4, (nnodes, 1433))\n x[x < 0] = 0.\n x[x > 1] = 1.\n edge_index = torch.randint(0, nnodes, (2, nedges), dtype=torch.int64)\n tensors_x.append(x)\n tensors_edge_index.append(edge_index)\n return tensors_x, tensors_edge_index\n\n\[email protected]_mode()\ndef loop(model, tensors_x, tensors_edge_index):\n for x, edge_index in zip(tensors_x, tensors_edge_index):\n model(x, edge_index)\n\n\[email protected]_mode()\ndef nt(nt_model, nt_x, nt_edge_index):\n nt_model(nt_x, nt_edge_index)\n\nif __name__ == \"__main__\":\n device = torch.device('cuda')\n model, nt_model = create_models(device)\n tensors_x, tensors_edge_index = create_tensors()\n print(benchmark_torch_function(10, loop, model, tensors_x, tensors_edge_index))\n nt_x = ntnt(tensors_x, device=device)\n nt_edge_index = ntnt(tensors_edge_index, device=device, dtype=torch.int64)\n print(benchmark_torch_function(10, nt, nt_model, nt_x, nt_edge_index))\n", "id": "560304", "language": "Python", "matching_score": 2.341916084289551, "max_stars_count": 229, "path": "benchmarks/gat.py" }, { "content": "import traceback\nimport functools\nimport pdb\nimport sys\nimport torch\nimport nestedtensor\nimport urllib\n\ndef debug_on(*exceptions):\n if not exceptions:\n exceptions = (BaseException,)\n\n def decorator(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except exceptions:\n info = sys.exc_info()\n traceback.print_exception(*info)\n pdb.post_mortem(info[2])\n\n return wrapper\n\n return decorator\n\n\ndef internet_on():\n try:\n urllib.request.urlopen(\"http://www.google.com\", timeout=1)\n return True\n except urllib.error.URLError as err:\n return False\n\n\ndef _shape_prod(shape_):\n shape = tuple(shape_)\n start = 1\n for s in shape:\n start = start * s\n return start\n\n\ndef random_float_tensor(seed, size, a=22695477, c=1, m=2 ** 32, requires_grad=False):\n \"\"\" Generates random tensors given a seed and size\n https://en.wikipedia.org/wiki/Linear_congruential_generator\n X_{n + 1} = (a * X_n + c) % m\n Using Borland C/C++ values\n The tensor will have values between [0,1)\n Inputs:\n seed (int): an int\n size (Tuple[int]): the size of the output tensor\n a (int): the multiplier constant to the generator\n c (int): the additive constant to the generator\n m (int): the modulus constant to the generator\n \"\"\"\n num_elements = 1\n for s in size:\n num_elements *= s\n\n arr = [(a * seed + c) % m]\n for i in range(num_elements - 1):\n arr.append((a * arr[i] + c) % m)\n\n return torch.tensor(arr, requires_grad=requires_grad).float().view(size) / m\n\n\ndef random_int_tensor(seed, size, low=0, high=2 ** 32, a=22695477, c=1, m=2 ** 32):\n \"\"\" Same as random_float_tensor but integers between [low, high)\n \"\"\"\n return (\n torch.floor(random_float_tensor(seed, size, a, c, m) * (high - low)) + low\n ).to(torch.int64)\n\n\ndef gen_float_tensor(seed, shape, requires_grad=False):\n return random_float_tensor(seed, shape, requires_grad=requires_grad)\n\n\ndef gen_random_int(seed, low=0, high=2 ** 32):\n \"\"\" Returns random integer in [low, high)\n \"\"\"\n return int(random_int_tensor(seed, (), low=low, high=high))\n\n\n# TODO: Something occasionally causes a NaN here...\ndef gen_nested_list(seed, nested_dim, tensor_dim, size_low=1, size_high=10):\n tensors = []\n num_tensors = gen_random_int(\n (seed * nested_dim + seed) * 1024, low=size_low, high=size_high\n )\n assert nested_dim > 0\n if nested_dim == 1:\n for i in range(num_tensors):\n ran = gen_random_int(\n (seed * nested_dim + seed) * (1024 * i), low=size_low, high=size_high\n )\n ran_size = ()\n for _ in range(tensor_dim):\n ran = gen_random_int(ran * 1024, low=size_low, high=size_high)\n ran_size = ran_size + (ran,)\n\n tensors.append(gen_float_tensor(ran, ran_size))\n else:\n for _ in range(num_tensors):\n tensors.append(\n gen_nested_list(\n num_tensors * seed,\n nested_dim - 1,\n tensor_dim,\n size_low=size_low,\n size_high=size_high,\n )\n )\n return tensors\n\n\ndef nested_map(fn, data):\n if isinstance(data, list):\n return [nested_map(fn, d) for d in data]\n else:\n return fn(data)\n\n\ndef gen_nested_tensor(\n seed, nested_dim, tensor_dim, size_low=1, size_high=10, constructor=None\n):\n if constructor is None:\n constructor = nestedtensor.as_nested_tensor\n return constructor(\n gen_nested_list(\n seed, nested_dim, tensor_dim, size_low=size_low, size_high=size_high\n )\n )\n\n\ndef get_first_tensor(nested_list):\n if isinstance(nested_list, list):\n return get_first_tensor(nested_list[0])\n else:\n return nested_list\n\ndef get_nn_C_functions():\n return [\n \"relu\",\n \"relu_\",\n \"dropout\",\n \"conv2d\",\n \"max_pool2d\",\n \"batch_norm\",\n \"cross_entropy\",\n \"interpolate\",\n ]\n\ndef get_unary_C_functions():\n return [\n \"abs\",\n \"acos\",\n \"angle\",\n \"asin\",\n \"atan\",\n \"bitwise_not\",\n \"ceil\",\n \"conj\",\n \"cos\",\n \"cosh\",\n \"digamma\",\n \"erf\",\n \"erfc\",\n \"erfinv\",\n \"exp\",\n \"expm1\",\n \"floor\",\n \"frac\",\n \"imag\",\n \"inverse\",\n \"lgamma\",\n \"log\",\n \"log10\",\n \"log1p\",\n \"log2\",\n \"logical_not\",\n \"neg\",\n \"nonzero\",\n \"real\",\n \"reciprocal\",\n # \"round\",\n \"rsqrt\",\n \"sigmoid\",\n \"sign\",\n \"sin\",\n \"sinh\",\n \"sqrt\",\n \"tan\",\n \"tanh\",\n \"trunc\",\n ]\n\n\ndef get_unary_functions():\n return [\n 'abs',\n 'acos',\n 'asin',\n 'atan',\n 'ceil',\n 'clamp', # Requires extra kwargs\n 'clamp_min', # Undocumented\n 'clamp_max', # Undocumented\n 'cos',\n 'cosh',\n 'digamma',\n 'erf',\n 'erfc',\n 'erfinv',\n 'exp',\n 'expm1',\n 'floor',\n # 'fill', Not a unary op\n # 'fmod', # Requires extra kwargs\n 'frac',\n # 'hardshrink', # TODO: Not part of aten\n 'lgamma',\n 'log',\n 'log10',\n 'log1p',\n 'log2',\n 'mvlgamma',\n 'neg',\n # 'nonzero', # TODO: Special case because it modifies dtype - no inplace\n # 'polygamma', # TODO: Undocumented and first argument not Tensor\n # polygamma NOTE: Should change to dispatch on first tensor argument not argument - but then raises questions of mixed tensor vs. nestedtensor etc.\n # 'prelu', # TODO: no prelu_out in aten\n 'reciprocal',\n # 'relu', # TODO: no relu_out in aten\n # 'renorm', # TODO: Requires extra kwargs\n # 'round',\n 'rsqrt',\n 'sigmoid',\n 'sign',\n 'sin',\n 'sinh',\n 'sqrt',\n 'tan',\n 'tanh',\n 'trunc']\n\n\ndef get_binary_functions():\n return [\n 'add',\n 'mul',\n 'sub',\n 'div',\n 'pow',\n 'atan2',\n 'remainder',\n 'floor_divide',\n ]\n\n\ndef get_python_rich_comparison_functions():\n return [\n \"lt\",\n \"le\",\n \"eq\",\n \"ne\",\n \"gt\",\n \"ge\",\n ]\n\n\ndef get_pointwise_functions():\n funcs = []\n funcs += get_unary_functions()\n funcs += get_binary_functions()\n funcs += get_python_rich_comparison_functions()\n return funcs\n\n\ndef get_python_binary_arithmetic_operations():\n funcs = [\n \"add\",\n \"sub\",\n \"mul\",\n \"matmul\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"divmod\",\n \"pow\",\n \"lshift\",\n \"rshift\",\n \"and\",\n \"xor\",\n \"or\",\n ]\n return funcs\n\n\ndef get_complete_reductions():\n funcs = [\n 'all',\n 'any',\n 'mean',\n 'prod',\n 'sum',\n ]\n return funcs\n\n\ndef get_random_sampling_operations():\n funcs = [\n \"bernoulli\",\n \"cauchy\",\n \"exponential\",\n \"geometric\",\n \"log_normal\",\n \"normal\",\n \"random\",\n \"uniform\",\n ]\n return funcs\n\n\ndef get_tensorwise_reductions():\n # Only support per-tensor or full reductions.\n funcs = [\n 'argmax',\n 'argmin',\n 'argsort',\n 'cumprod',\n 'cumsum',\n 'std',\n 'var',\n 'max', # may return tuple\n 'median', # may return tuple\n 'min', # may return tuple\n 'mode', # returns tuple\n ]\n return funcs\n\n\ndef get_conversion_functions():\n # Convenience functions for to(torch.float) and such\n funcs = [\n \"bfloat16\",\n \"bool\",\n \"byte\",\n \"char\",\n \"cpu\",\n \"cuda\",\n \"double\",\n \"float\",\n \"half\",\n \"int\",\n \"long\",\n \"short\",\n \"to_dense\",\n \"to_mkldnn\",\n \"to_sparse\",\n ]\n return funcs\n\n\ndef get_fft_ops():\n funcs = [\n \"fft\",\n \"ifft\",\n \"rfft\",\n \"irfft\",\n ]\n return funcs\n\n\ndef get_stft_ops():\n funcs = [\n \"stft\",\n ]\n return funcs\n\n\ndef get_blas_lapack_ops():\n \"\"\"\n These functions all have fixed dimension inputs,\n which makes it easy to think about for NestedTensors\n \"\"\"\n funcs = [\n # BLAS and LAPACK functions\n \"addbmm\",\n \"addmm\",\n \"addmv\",\n \"addr\",\n \"baddbmm\",\n \"bmm\",\n \"chain_matmul\",\n \"cholesky\",\n \"cholesky_inverse\",\n \"cholesky_solve\",\n \"dot\",\n \"eig\",\n \"geqrf\",\n \"ger\",\n \"inverse\",\n \"det\",\n \"logdet\",\n \"slogdet\",\n \"lstsq\",\n \"lu\",\n \"lu_solve\",\n \"lu_unpack\",\n \"matmul\",\n \"matrix_power\",\n \"matrix_rank\",\n \"mm\",\n \"mv\",\n \"orgqr\",\n \"ormqr\",\n \"pinverse\",\n \"qr\",\n \"solve\",\n \"svd\",\n \"symeig\",\n \"trapz\",\n \"triangular_solve\",\n ]\n return funcs\n\n\ndef get_other_ops():\n \"\"\"\n Misc functions based on other classification in torch docs.\n \"\"\"\n funcs = [\n \"bincount\",\n \"broadcast_tensors\",\n \"cartesian_prod\",\n \"cdist\",\n \"combinations\",\n \"cross\",\n \"diag\",\n \"diag_embed\",\n \"diagflat\",\n \"diagonal\",\n \"einsum\",\n \"flatten\",\n \"flip\",\n \"rot90\",\n \"histc\",\n \"meshgrid\",\n \"renorm\",\n \"repeat_interleave\",\n \"roll\",\n \"tensordot\",\n \"trace\",\n \"tril\",\n \"tril_indices\",\n \"triu\",\n \"triu_indices\",\n ]\n return funcs\n\n\ndef get_functionals():\n funcs = [\n \"adaptive_avg_pool2d\",\n \"adaptive_avg_pool3d\",\n \"adaptive_max_pool1d_with_indices\",\n \"adaptive_max_pool2d_with_indices\",\n \"adaptive_max_pool3d_with_indices\",\n \"affine_grid\",\n \"alpha_dropout\",\n \"assert_int_or_pair\",\n #\"batch_norm\",\n \"bilinear\",\n \"binary_cross_entropy\",\n \"binary_cross_entropy_with_logits\",\n \"celu\",\n \"cosine_embedding_loss\",\n #\"cross_entropy\",\n \"ctc_loss\",\n #\"dropout\",\n \"dropout2d\",\n \"dropout3d\",\n \"elu\",\n \"embedding\",\n \"embedding_bag\",\n \"feature_alpha_dropout\",\n \"fold\",\n \"fractional_max_pool2d_with_indices\",\n \"fractional_max_pool3d_with_indices\",\n \"gelu\",\n \"glu\",\n \"grid_sample\",\n \"group_norm\",\n \"gumbel_softmax\",\n \"hardshrink\",\n \"hardtanh\",\n \"hinge_embedding_loss\",\n \"instance_norm\",\n #\"interpolate\",\n \"kl_div\",\n \"l1_loss\",\n \"layer_norm\",\n \"leaky_relu\",\n \"linear\",\n \"local_response_norm\",\n \"log_softmax\",\n \"lp_pool1d\",\n \"lp_pool2d\",\n \"max_pool1d\",\n #\"max_pool2d\",\n \"max_pool3d\",\n \"margin_ranking_loss\",\n \"max_pool1d_with_indices\",\n \"max_pool2d_with_indices\",\n \"max_pool3d_with_indices\",\n \"max_unpool1d\",\n \"max_unpool2d\",\n \"max_unpool3d\",\n \"mse_loss\",\n \"multi_head_attention_forward\",\n \"multilabel_margin_loss\",\n \"multilabel_soft_margin_loss\",\n \"multi_margin_loss\",\n \"nll_loss\",\n \"normalize\",\n \"pad\",\n \"pairwise_distance\",\n \"poisson_nll_loss\",\n \"prelu\",\n #\"relu\",\n #\"relu_\",\n \"relu6\",\n \"rrelu\",\n \"selu\",\n \"sigmoid\",\n \"smooth_l1_loss\",\n \"soft_margin_loss\",\n \"softmax\",\n \"softmin\",\n \"softsign\",\n \"tanh\",\n \"tanhshrink\",\n \"threshold\",\n \"triplet_margin_loss\",\n \"unfold\",\n \"upsample\",\n \"upsample_bilinear\",\n \"upsample_nearest\",\n ]\n return funcs\n\ndef cuda_benchmark_torch_function(iters, f, *args):\n f(*args)\n torch.cuda.synchronize()\n start_event = torch.cuda.Event(enable_timing=True)\n end_event = torch.cuda.Event(enable_timing=True)\n start_event.record()\n for _ in range(iters):\n f(*args)\n end_event.record()\n torch.cuda.synchronize()\n return (start_event.elapsed_time(end_event) * 1.0e-3) / iters\n", "id": "8109386", "language": "Python", "matching_score": 3.4372360706329346, "max_stars_count": 0, "path": "test/utils.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\nimport torch.nn.functional as F\nimport sys\nimport random\nimport argparse\nimport itertools\nimport re\nimport csv\n\n\nBenchmarks = {}\n\ndef register_benchmark(fn):\n Benchmarks[fn.__name__] = fn\n\n#\n# relu\n#\n@register_benchmark\ndef relu__tensor_iter(self):\n def _relu_tensor_iter():\n for t in self.inputs:\n torch.nn.functional.relu_(t)\n\n return _relu_tensor_iter\n\n@register_benchmark\ndef relu__tensor_pad(self):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _relu_tensor_pad():\n torch.nn.functional.relu_(tensor)\n\n return _relu_tensor_pad\n\n@register_benchmark\ndef relu__nt(self):\n nt = nestedtensor.nested_tensor(self.inputs)\n\n def _relu_nt():\n torch.nn.functional.relu_(nt)\n\n return _relu_nt\n\n@register_benchmark\ndef relu_tensor_iter(self):\n def _relu_tensor_iter():\n for t in self.inputs:\n torch.nn.functional.relu(t)\n\n return _relu_tensor_iter\n\n@register_benchmark\ndef relu_tensor_pad(self):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _relu_tensor_pad():\n torch.nn.functional.relu(tensor)\n\n return _relu_tensor_pad\n\n@register_benchmark\ndef relu_nt(self):\n nt = nestedtensor.nested_tensor(self.inputs)\n\n def _relu_nt():\n torch.nn.functional.relu(nt)\n\n return _relu_nt\n\n#\n# conv2d\n#\n@register_benchmark\ndef conv2d_iter(self, module):\n def _conv2d_tensor_iter():\n for t in self.inputs:\n module(t.unsqueeze(0)).squeeze(0)\n\n return _conv2d_tensor_iter\n\n@register_benchmark\ndef conv2d_pad(self, module):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _conv2d_tensor():\n module(tensor)\n\n return _conv2d_tensor\n\n@register_benchmark\ndef conv2d_nt(self, module):\n nt = nestedtensor.nested_tensor(self.inputs)\n\n def _conv2d():\n module(nt)\n\n return _conv2d\n\n#\n# batch_norm\n#\n@register_benchmark\ndef batch_norm_tensor_iter(self, module):\n def _batch_norm_tensor_iter():\n for t in self.inputs:\n module(t.unsqueeze(0)).squeeze(0)\n\n return _batch_norm_tensor_iter\n\n@register_benchmark\ndef batch_norm_tensor_pad(self, module):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _batch_norm_tensor_pad():\n module(tensor)\n\n return _batch_norm_tensor_pad\n\n@register_benchmark\ndef batch_norm_nt(self, module):\n nt = nestedtensor.nested_tensor(self.inputs)\n\n def _batch_norm_nt():\n module(nt)\n\n return _batch_norm_nt\n\n#\n# max_pool2d\n#\n@register_benchmark\ndef max_pool2d_tensor_iter(self, module):\n def _max_pool2d_tensor_iter():\n for t in self.inputs:\n module(t.unsqueeze(0)).squeeze(0)\n\n return _max_pool2d_tensor_iter\n\n@register_benchmark\ndef max_pool2d_tensor_pad(self, module):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _max_pool2d_tensor_pad():\n module(tensor)\n\n return _max_pool2d_tensor_pad\n\n@register_benchmark\ndef max_pool2d_nt(self, module):\n nt = nestedtensor.nested_tensor(self.inputs)\n\n def _max_pool2d_nt():\n module(nt)\n\n return _max_pool2d_nt\n\n#\n# cross_entropy\n#\n@register_benchmark\ndef cross_entropy_tensor_iter(self):\n def _cross_entropy_tensor_iter():\n for a, b in zip(self.inputs, self.targets):\n torch.nn.functional.cross_entropy(\n a.unsqueeze(0), b.unsqueeze(0)\n ).squeeze(0)\n\n return _cross_entropy_tensor_iter\n\n@register_benchmark\ndef cross_entropy_tensor_pad(self):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n targets, _ = nestedtensor.nested_tensor(self.targets).to_tensor_mask()\n\n def _cross_entropy_tensor_pad():\n torch.nn.functional.cross_entropy(tensor, targets)\n\n return _cross_entropy_tensor_pad\n\n@register_benchmark\ndef cross_entropy_nt(self):\n nt_input = nestedtensor.nested_tensor(self.inputs)\n nt_targets = nestedtensor.nested_tensor(self.targets)\n\n def _cross_entropy_nt():\n torch.nn.functional.cross_entropy(nt_input, nt_targets)\n\n return _cross_entropy_nt\n\n#\n# dropout\n#\n@register_benchmark\ndef dropout_tensor_iter(self):\n def _dropout_tensor_iter():\n for t in self.inputs:\n torch.nn.functional.dropout(t.unsqueeze(0)).squeeze(0)\n\n return _dropout_tensor_iter\n\n@register_benchmark\ndef dropout_tensor_pad(self):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _dropout_tensor_pad():\n torch.nn.functional.dropout(tensor)\n\n return _dropout_tensor_pad\n\n@register_benchmark\ndef dropout_nt(self):\n nt = nestedtensor.nested_tensor(self.inputs)\n\n def _dropout_nt():\n torch.nn.functional.dropout(nt)\n\n return _dropout_nt\n\n#\n# interpolate\n#\n@register_benchmark\ndef interpolate_tensor_iter(self):\n def _interpolate_tensor_iter():\n for t in self.inputs:\n torch.nn.functional.interpolate(t, t.unsqueeze(0).shape[-2])\n\n return _interpolate_tensor_iter\n\n@register_benchmark\ndef interpolate_tensor_pad(self):\n tensor, _ = nestedtensor.nested_tensor(self.inputs).to_tensor_mask()\n\n def _interpolate_tensor_pad():\n torch.nn.functional.interpolate(tensor, tensor[0].unsqueeze(0).shape[-2])\n\n return _interpolate_tensor_pad\n\n@register_benchmark\ndef interpolate_nt(self):\n nt = nestedtensor.nested_tensor(self.inputs)\n input_shape = [y[-2:] for y in nt.nested_size().unbind()]\n def _interpolate_nt():\n torch.nn.functional.interpolate(nt, input_shape)\n\n return _interpolate_nt\n\nclass SegLayersBenchMark(object):\n def __init__(self, args):\n self.args = args\n self.layers = {}\n\n def get_benchmark(self, channels, name, cuda):\n layer = None\n if name.startswith(\"conv2d\"):\n m = re.match(r\"conv2d_([a-z]+)_(\\d+)x(\\d+)\", name)\n if m is None:\n raise ValueError(\"Unsupported parameterization for conv2d layer {}\".format(name))\n benchmark_kind = m.group(1)\n k0 = int(m.group(2))\n k1 = int(m.group(3))\n # Parameters chosen based on dominant settings in\n # https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/segmentation.py#L19\n layer = self.layers.setdefault(\n (name, channels, cuda), torch.nn.Conv2d(channels, channels, kernel_size=(k0, k1), dilation=2, bias=False)\n )\n name = \"conv2d_\" + benchmark_kind\n if name.startswith(\"batch_norm\"):\n layer = self.layers.setdefault(\n (name, cuda), torch.nn.BatchNorm2d(channels, 1e-05, 0.1).eval()\n )\n if name.startswith(\"max_pool2d\"):\n layer = self.layers.setdefault(\n (name, cuda),\n torch.nn.MaxPool2d(\n kernel_size=(2, 2), stride=(2, 2), padding=(0, 0), dilation=(1, 1)\n ),\n )\n try:\n if cuda and layer is not None:\n layer.cuda()\n return Benchmarks[name](self) if layer is None else Benchmarks[name](self, layer)\n except KeyError:\n raise ValueError(\"Benchmark {} is not supported. Available benchmarks are\\n{}.\".format(layer,\n \"\\n\".join(sorted(Benchmarks.keys()))))\n\n def run(self):\n params = itertools.product(\n self.args.cuda,\n self.args.N,\n self.args.C,\n self.args.H,\n self.args.W,\n self.args.seed,\n )\n if self.args.V:\n var_params = [(v, v) for v in self.args.V]\n else:\n var_params = itertools.product(self.args.HV, self.args.WV)\n params = [[p + v for v in var_params] for p in params]\n params = sum(params, [])\n\n writer = None\n i = 0\n for cuda, n, c, h, w, seed, h_var, w_var in params:\n # generate inputs before iterating layers to have the same imput per layer\n self.inputs, self.targets = self.get_input(cuda, n, c, h, w, h_var, w_var, seed)\n\n benchmarks = [(layer, self.get_benchmark(c, layer, cuda)) for layer in self.args.layers]\n for layer, benchmark in benchmarks:\n result = utils.benchmark_fn(benchmark, run_time=self.args.run_time, warmup=self.args.warmup, cuda=cuda)\n result[\"#\"] = str(i) + \"/\" + str(len(benchmarks) * len(params))\n result[\"N\"] = n\n result[\"C\"] = c\n result[\"H\"] = h\n result[\"W\"] = w\n result[\"h_var\"] = h_var\n result[\"w_var\"] = w_var\n result[\"seed\"] = seed\n result[\"avg_us\"] = int(result[\"avg_us\"])\n result[\"std_us\"] = int(result[\"std_us\"])\n result[\"name\"] = layer\n result[\"cuda\"] = cuda\n result[\"numel\"] = sum(x.numel() for x in self.inputs)\n if writer is None and self.args.csv_log:\n writer = csv.DictWriter(open(self.args.csv_log, 'w'), fieldnames=result.keys())\n writer.writeheader()\n if writer is not None:\n writer.writerow(result)\n print(\",\".join(str((str(key), result[key])) for key in sorted(result.keys())))\n i += 1\n\n def get_input(self, cuda, n, c, h, w, h_var, w_var, seed):\n inputs = []\n targets = []\n device = 'cpu'\n if cuda:\n device = 'cuda'\n\n torch.manual_seed(seed)\n random.seed(seed)\n if cuda:\n torch.cuda.init()\n for _ in range(n):\n h_res = max(1, int(random.gauss(h, h_var)))\n w_res = max(1, int(random.gauss(w, w_var)))\n input_i = torch.randn(c, h_res, w_res, device=device)\n target_i = torch.randint(1, (h_res, w_res), dtype=torch.int64, device=device)\n inputs.append(input_i)\n targets.append(target_i)\n if cuda:\n # Synchronize copy operations so they don't influence the benchmark\n torch.cuda.synchronize()\n\n return inputs, targets\n\n\n\ndef main(args):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-L\", dest=\"layers\", type=str, nargs=\"+\")\n parser.add_argument(\"-N\", dest=\"N\", type=int, nargs=\"+\")\n parser.add_argument(\"-C\", dest=\"C\", type=int, nargs=\"+\")\n parser.add_argument(\"-H\", dest=\"H\", type=int, nargs=\"+\")\n parser.add_argument(\"-W\", dest=\"W\", type=int, nargs=\"+\")\n parser.add_argument(\"-HV\", dest=\"HV\", type=float, nargs=\"+\")\n parser.add_argument(\"-WV\", dest=\"WV\", type=float, nargs=\"+\")\n parser.add_argument(\"-V\", dest=\"V\", type=float, nargs=\"+\")\n parser.add_argument(\"-S\", dest=\"seed\", type=int, nargs=\"+\")\n parser.add_argument(\"--warmup\", dest=\"warmup\", type=float, default=2.0)\n parser.add_argument(\"--run-time\", dest=\"run_time\", type=float, default=5.0)\n parser.add_argument(\"--verbose\", dest=\"verbose\", type=int, default=0)\n parser.add_argument(\"--csv-log\", dest=\"csv_log\", type=str)\n parser.add_argument(\"--cuda\", dest=\"cuda\", type=str, nargs=\"+\", default=[\"False\"])\n args = parser.parse_args()\n for v in args.cuda:\n if v not in [\"False\", \"True\"]:\n raise ValueError(\"Argument --cuda may only be passed a list of True or False. Got {} instead.\".format(args.cuda))\n args.cuda = [True if c == \"True\" else False for c in args.cuda]\n\n if args.V is not None:\n if (args.HV is not None or args.WV is not None):\n raise ValueError(\"If specifying variance for both H and W, arguments HV and WV must not be set.\")\n args.HV = args.V\n args.WV = args.V\n\n if args.verbose > 0:\n print(\"called with: \", args)\n benchmark_obj = SegLayersBenchMark(args)\n benchmark_obj.run()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "id": "412796", "language": "Python", "matching_score": 1.4204602241516113, "max_stars_count": 229, "path": "benchmarks/segmentation_layers.py" }, { "content": "import torch\nimport nestedtensor\nimport unittest\nfrom torch.nn import functional as F\nfrom torch import nn\n\nfrom utils_test_case import TestCase\n\n\ndef ntnt(x): return nestedtensor.nested_tensor(x, requires_grad=True)\ndef ntnt_nograd(x): return nestedtensor.nested_tensor(x, requires_grad=False)\n\n\n# Various smoke tests to confirm coverage of an operator\n\nclass TestCoverage(TestCase):\n\n @unittest.skip(\"Fails for strange reason\")\n @torch.inference_mode()\n def test_issues_313(self):\n # Based on https://github.com/pytorch/nestedtensor/issues/313\n\n def model(x):\n torch.manual_seed(20)\n linear = nn.Linear(9, 64)\n norm = nn.BatchNorm1d(64).eval()\n # 3 voxel with 40, 50 and 90 points respectively\n x = linear(x)\n x = norm(x.transpose(2, 1).contiguous()\n ).transpose(2, 1).contiguous()\n x = F.relu(x)\n return torch.max(x, dim=1, keepdim=True)[0]\n\n inputs = [torch.randn(i, 9) for i in [40, 50, 90]]\n model(ntnt_nograd(inputs))\n\n inputs = [torch.randn(30, 9) for _ in range(3)]\n x0 = model(ntnt_nograd(inputs))\n x1 = model(torch.stack(inputs))\n self.assertEqual(torch.stack(x0.unbind()), x1)\n\n @unittest.skip(\"Fails for strange reason\")\n @torch.inference_mode()\n def test_pytorch_commit_56017(self):\n # Based on https://github.com/pytorch/nestedtensor/issues/313\n\n nn.Linear(9, 64)\n # inputs = [torch.randn(i, 3) for i in [4, 5, 9]]\n # x0 = ntnt_nograd(inputs)\n # print(x0)\n # del inputs\n # x0 = x0 + x0\n # print(x0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "2957342", "language": "Python", "matching_score": 3.350761651992798, "max_stars_count": 229, "path": "test/test_coverage.py" }, { "content": "import torch\nimport nestedtensor\nimport unittest\nfrom utils_test_case import TestCase\nimport random\nimport utils\nfrom torch.nn import functional as F\nfrom detr_nestedtensor import DETRNestedTensor\nfrom torch import nn\n\n\ndef _iter_constructors():\n yield nestedtensor.as_nested_tensor\n yield nestedtensor.nested_tensor\n\n\ndef ntnt(x): return nestedtensor.nested_tensor(x, requires_grad=True)\n\n\ndef ntnt_nograd(x, device=None, dtype=None): return nestedtensor.nested_tensor(\n x, requires_grad=False, device=device, dtype=dtype)\n\n\nclass TestFunctional(TestCase):\n def test_nll_loss(self):\n utils.gen_float_tensor(1, (40, 5))\n utils.gen_float_tensor(1, (40,))\n\n def test_addmm(self):\n torch.rand(5), torch.rand(4, 5)\n nestedtensor.nested_tensor(\n [torch.rand(1, 4), torch.rand(1, 4), torch.rand(4, 4)]\n )\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_add(self):\n nt = ntnt_nograd([torch.randn(4, 2, 5), torch.randn(4, 3, 5)],\n device=torch.device('cuda'), dtype=torch.half)\n o = torch.randn(1, 4, 1, 1)\n o = o.cuda().half()\n res = nt + o\n\n def _test_conv2d_dtype(self, dtype, weight, device, shapes,\n stride=None, padding=None, dilation=None,\n groups=None):\n if stride is None:\n stride = [1, 1]\n if padding is None:\n padding = [0, 0]\n if dilation is None:\n dilation = [1, 1]\n if groups is None:\n groups = 1\n\n def _prod(tup):\n r = 1\n for t in tup:\n r = r * t\n return r\n\n def _test(ts, weight, stride, padding, dilation, groups):\n nt = ntnt_nograd(ts, device=device, dtype=dtype)\n nt_out = torch.conv2d(nt, weight, stride=stride,\n padding=padding, dilation=dilation,\n groups=groups)\n for i, (t, nt_out_i) in enumerate(zip(ts, nt_out.unbind())):\n t_out = torch.conv2d(t.unsqueeze(0), weight,\n stride=stride, padding=padding,\n dilation=dilation,\n groups=groups).squeeze(0)\n self.assertEqual(t_out, nt_out_i)\n ts = []\n for s in shapes:\n ts.append(torch.randn(_prod(s)).reshape(*s).to(device=device, dtype=dtype))\n weight = weight.to(device=device, dtype=dtype)\n _test(ts, weight, stride, padding, dilation, groups)\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_conv2d_1x1_cuda(self):\n shapes = [(2, 2, 3), (2, 4, 2), (2, 2, 2)]\n weight = torch.randn(3*2*1*1).reshape(3, 2, 1, 1)\n self._test_conv2d_dtype(torch.float16, weight, torch.device('cuda'), shapes)\n self._test_conv2d_dtype(torch.float32, weight, torch.device('cuda'), shapes)\n\n @torch.inference_mode()\n def test_conv2d_1x1_cpu(self):\n shapes = [(2, 2, 3), (2, 4, 2), (2, 2, 2)]\n weight = torch.randn(3*2*1*1).reshape(3, 2, 1, 1)\n # self._test_conv2d_dtype(torch.float16, weight, torch.device('cpu'), shapes)\n self._test_conv2d_dtype(torch.float32, weight, torch.device('cpu'), shapes)\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_conv2d_3x3_cuda(self):\n shapes = [(2, 4, 5), (2, 5, 3), (2, 3, 3)]\n weight = torch.randn(3*2*3*3).reshape(3, 2, 3, 3)\n self._test_conv2d_dtype(torch.float16, weight, torch.device('cuda'), shapes)\n self._test_conv2d_dtype(torch.float32, weight, torch.device('cuda'), shapes)\n\n @torch.inference_mode()\n def test_conv2d_3x3_cpu(self):\n shapes = [(2, 4, 5), (2, 5, 3), (2, 3, 3)]\n weight = torch.randn(3*2*3*3).reshape(3, 2, 3, 3)\n # self._test_conv2d_dtype(torch.float16, weight, torch.device('cpu'), shapes)\n self._test_conv2d_dtype(torch.float32, weight, torch.device('cpu'), shapes)\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_conv2d_3x3_resnext_common_cuda(self):\n shapes = [(32, 4, 5), (32, 5, 3), (32, 3, 3)]\n weight = torch.randn(32*1*3*3).reshape(32, 1, 3, 3)\n for dtype in [torch.float16, torch.float32]:\n stride = [1, 1] # default\n padding = [1, 1]\n dilation = [1, 1] # default\n groups = 32\n self._test_conv2d_dtype(dtype, weight, torch.device('cuda'),\n shapes, stride=stride, padding=padding,\n dilation=dilation, groups=groups)\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_conv2d_3x3_resnext_input_cuda(self):\n shapes = [(4, 3, 2), (4, 3, 3), (4, 2, 3)]\n weight = torch.randn(5, 4, 2, 2)\n for dtype in [torch.float16, torch.float32]:\n stride = [1, 1]\n padding = [1, 1]\n dilation = [1, 1]\n groups = 1\n self._test_conv2d_dtype(dtype, weight, torch.device('cuda'),\n shapes, stride=stride, padding=padding,\n dilation=dilation, groups=groups)\n\n def test_contiguousity(self):\n initial_t = torch.rand(2, 5, 10, 15)\n self.assertEqual(True, initial_t.is_contiguous())\n\n non_contiguous_1 = initial_t.select(1, 0)\n non_contiguous_2 = initial_t.select(1, 0)\n self.assertEqual(False, non_contiguous_1.is_contiguous())\n\n relu = torch.nn.ReLU()\n t_cont = relu(non_contiguous_1)\n self.assertEqual(True, t_cont.is_contiguous())\n\n nt = nestedtensor.nested_tensor([non_contiguous_1, non_contiguous_2])\n self.assertEqual(True, nt.is_contiguous())\n\n # nt_cont = relu(nt)\n # self.assertEqual(True, nt_cont.is_contiguous())\n\n @torch.inference_mode()\n def test_nn_embedding(self):\n inputs = [torch.randint(100, (L,)) for L in torch.randint(5, 50, (8,))]\n x = nestedtensor.nested_tensor(inputs, dtype=torch.int64)\n emb = torch.nn.Embedding(100, 8)\n y = emb(x)\n for i, inp in enumerate(inputs):\n self.assertEqual(emb(inp), y[i])\n\n @torch.inference_mode()\n def test_nn_embedding_bag(self):\n\n def run_test(EmbeddingBag, inputs):\n x = nestedtensor.nested_tensor(inputs, dtype=torch.int64)\n torch.manual_seed(0)\n emb = EmbeddingBag()\n y = emb(x)\n s = y.sum()\n # s.backward()\n input_tensor = torch.cat(inputs).contiguous()\n input_offset = [0]\n for inp in inputs[:-1]:\n input_offset.append(len(inp) + input_offset[-1])\n input_offset = torch.tensor(input_offset)\n torch.manual_seed(0)\n emb_t = EmbeddingBag()\n y_t = emb_t(input_tensor, input_offset)\n s_t = y_t.sum()\n # s_t.backward()\n for yi, y_ti in zip(y.unbind(), y_t.unbind()):\n self.assertEqual(yi, y_ti)\n self.assertEqual(s, s_t)\n # self.assertEqual(emb.weight.grad, emb_t.weight.grad)\n\n run_test(lambda: torch.nn.EmbeddingBag(100, 8), [\n torch.randint(100, (5,)), torch.randint(100, (5,))])\n run_test(lambda: torch.nn.EmbeddingBag(100, 8), [\n torch.randint(100, (L,)) for L in torch.randint(3, 7, (5,))])\n run_test(lambda: torch.nn.EmbeddingBag(100, 8, sparse=True), [\n torch.randint(100, (5,)), torch.randint(100, (5,))])\n run_test(lambda: torch.nn.EmbeddingBag(100, 8, sparse=True), [\n torch.randint(100, (L,)) for L in torch.randint(3, 7, (5,))])\n\n @torch.inference_mode()\n def test_nn_functional_conv2d(self):\n tensor1 = torch.rand(3, 128, 128)\n tensor2 = torch.rand(3, 300, 400)\n inputs = [tensor1, tensor2]\n weight = torch.rand(3, 3, 7, 7)\n\n # no optional params\n tensor_res = [torch.nn.functional.conv2d(\n t.unsqueeze(0), weight).squeeze(0) for t in inputs]\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = [t for t in torch.nn.functional.conv2d(\n nt, weight).unbind()]\n self.assertEqual(nt_res, tensor_res)\n\n # optional params with no bias\n tensor_res = [torch.nn.functional.conv2d(t.unsqueeze(\n 0), weight, None, 2, 3, 1, 1).squeeze(0) for t in inputs]\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = [t for t in torch.nn.functional.conv2d(\n nt, weight, None, 2, 3, 1, 1).unbind()]\n self.assertEqual(nt_res, tensor_res)\n\n # optional params with bias\n bias = torch.rand(3)\n tensor_res = [torch.nn.functional.conv2d(t.unsqueeze(\n 0), weight, bias, (2, 2), (3, 3), (1, 1), 1).squeeze(0) for t in inputs]\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = [t for t in torch.nn.functional.conv2d(\n nt, weight, bias, (2, 2), (3, 3), (1, 1), 1).unbind()]\n self.assertEqual(nt_res, tensor_res)\n\n @unittest.skip(\"Not implemented\")\n def test_nn_functional_batch_norm(self):\n inputs = [\n torch.tensor([[[-0.5000]], [[0.5000]]]),\n torch.tensor([[[-1.0000, 1.0000], [-0.2500, -0.5000]],\n [[0.2500, 0.5000], [1.5000, -1.5000]]])\n ]\n\n tensor_res = []\n running_mean = torch.rand(2)\n running_var = torch.rand(2)\n for i in range(2):\n t_res = torch.nn.functional.batch_norm(\n inputs[i].unsqueeze(0).contiguous(), running_mean, running_var)\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.batch_norm(\n nt, running_mean, running_var)\n self.assertEqual(nestedtensor.nested_tensor(tensor_res), nt_res)\n\n def test_nn_functional_max_pool2d(self):\n inputs = [\n torch.randn(3, 500, 600),\n torch.randn(3, 128, 128)\n ]\n\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.max_pool2d(inputs[i].unsqueeze(0).contiguous(), kernel_size=(\n 3, 3), stride=(2, 2), padding=(1, 1), dilation=(1, 1), ceil_mode=False)\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.max_pool2d(nt, kernel_size=(3, 3), stride=(\n 2, 2), padding=(1, 1), dilation=(1, 1), ceil_mode=False)\n self.assertEqual(nestedtensor.nested_tensor(tensor_res), nt_res)\n\n def test_functional_relu_(self):\n orig_t1 = torch.tensor([-2, -1, 0, 1, 2])\n expected_t = torch.tensor([0, 0, 0, 1, 2])\n expected_nt = ntnt_nograd([expected_t])\n\n t_clone = orig_t1.clone()\n torch.nn.functional.relu_(t_clone)\n self.assertEqual(t_clone, expected_t)\n\n t_clone = orig_t1.clone()\n nt1 = ntnt_nograd([t_clone])\n torch.nn.functional.relu_(nt1)\n self.assertEqual(nt1, expected_nt)\n self.assertEqual(t_clone, orig_t1)\n\n t_clone = orig_t1.clone()\n nt1 = nestedtensor.as_nested_tensor([t_clone])\n torch.nn.functional.relu_(nt1)\n self.assertEqual(nt1, expected_nt)\n self.assertNotEqual(t_clone, expected_t)\n\n def test_nn_functional_relu(self):\n inputs = [\n torch.randn(3, 500, 600),\n torch.randn(3, 128, 128)\n ]\n\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.relu(\n inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.relu(nt)\n self.assertEqual(nestedtensor.nested_tensor(tensor_res), nt_res)\n\n def test_nn_functional_cross_entropy(self):\n inputs = [\n torch.randn(3, 300, 300),\n torch.randn(3, 400, 400)\n ]\n\n targets = [\n torch.randint(1, (300, 300), dtype=torch.int64),\n torch.randint(1, (400, 400), dtype=torch.int64)\n ]\n\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.cross_entropy(\n inputs[i].unsqueeze(0).contiguous(), targets[i].unsqueeze(0))\n tensor_res.append(t_res.squeeze(0))\n\n input_nt = nestedtensor.nested_tensor(inputs)\n target_nt = nestedtensor.nested_tensor(targets, dtype=torch.int64)\n nt_res = torch.nn.functional.cross_entropy(input_nt, target_nt)\n self.assertEqual(nestedtensor.nested_tensor(tensor_res), nt_res)\n\n def test_nn_dropout(self):\n inputs = [\n torch.randn(3, 128, 128),\n torch.randn(3, 300, 400)\n ]\n\n dropout = torch.nn.Dropout(p=0.2)\n tensor_res = []\n for i in range(2):\n t_res = dropout(inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = dropout(nt)\n self.assertEqual(nestedtensor.nested_tensor(\n tensor_res).size(), nt_res.size())\n\n def test_nn_functional_dropout(self):\n inputs = [\n torch.randn(3, 128, 128),\n torch.randn(3, 300, 400)\n ]\n\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.dropout(\n inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n\n nt = ntnt_nograd(inputs)\n nt_res = torch.nn.functional.dropout(nt)\n self.assertEqual(ntnt_nograd(tensor_res).size(), nt_res.size())\n\n def test_nn_functional_interpolate(self):\n inputs = [\n torch.randn(3, 200, 300),\n torch.randn(3, 300, 400)\n ]\n\n # no optional params\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.interpolate(\n inputs[i].unsqueeze(0).contiguous(), 200)\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.interpolate(nt, 200)\n self.assertEqual(nestedtensor.nested_tensor(tensor_res), nt_res)\n\n # tuple/int size and optional mode\n for size in [(200, 200), 100]:\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.interpolate(inputs[i].unsqueeze(\n 0).contiguous(), size, mode='bilinear', align_corners=True)\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.interpolate(\n nt, size, mode='bilinear', align_corners=True)\n self.assertEqual(\n nestedtensor.nested_tensor(tensor_res), nt_res)\n\n # special NT case - list of sizes\n size = ((100, 100), (200, 250), )\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.interpolate(\n nt, size, mode='bilinear', align_corners=True)\n self.assertEqual(nt_res.nested_size(2), (100, 200))\n self.assertEqual(nt_res.nested_size(3), (100, 250))\n\n # scale_factor instead of a size\n for scale_factor in [(2.2, 2.2), 1.1]:\n tensor_res = []\n for i in range(2):\n t_res = torch.nn.functional.interpolate(\n inputs[i].unsqueeze(0).contiguous(), scale_factor=scale_factor)\n tensor_res.append(t_res.squeeze(0))\n\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n nt_res = torch.nn.functional.interpolate(\n nt, scale_factor=scale_factor)\n self.assertEqual(\n nestedtensor.nested_tensor(tensor_res), nt_res)\n\n # check errors\n for nt in [nestedtensor.nested_tensor(inputs), nestedtensor.as_nested_tensor(inputs)]:\n self.assertRaises(RuntimeError, lambda: torch.nn.functional.interpolate(\n nt, size=(100, 100), scale_factor=(1, 1)))\n\n def test_copy_(self):\n for constructor in _iter_constructors():\n nt1 = constructor([])\n nt2 = constructor([])\n nt1.copy_(nt2)\n self.assertEqual(nt1, nt2)\n\n nt1 = constructor([torch.randn(1, 2, 3)])\n nt2 = constructor([torch.randn(1, 2, 3)])\n nt1.copy_(nt2)\n self.assertEqual(nt1, nt2)\n\n nt1 = constructor([torch.randn(1, 2, 3), torch.randn(2, 1, 3)])\n nt2 = constructor([torch.randn(1, 2, 3), torch.randn(2, 1, 3)])\n nt1.copy_(nt2)\n self.assertEqual(nt1, nt2)\n\n # Currently only supporting nested dim 1.\n # nt1 = constructor(\n # [[torch.randn(1, 2, 3), torch.randn(2, 1, 3)], [torch.randn(3, 2, 1)]])\n # nt2 = constructor(\n # [[torch.randn(1, 2, 3), torch.randn(2, 1, 3)], [torch.randn(3, 2, 1)]])\n # nt1.copy_(nt2)\n # self.assertEqual(nt1, nt2)\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_unsqueeze(self):\n for constructor in _iter_constructors():\n t = torch.randn(2, 3)\n\n # Currently only supporting nested dim 1.\n # nt = constructor([[t.reshape(2, 3)]])\n # self.assertEqual(nt.unsqueeze(\n # 0), constructor([[[t.reshape(2, 3)]]]))\n # self.assertEqual(nt.unsqueeze(\n # 1), constructor([[[t.reshape(2, 3)]]]))\n # self.assertEqual(nt.unsqueeze(\n # 2), constructor([[t.reshape(1, 2, 3)]]))\n # self.assertEqual(nt.unsqueeze(\n # 3), constructor([[t.reshape(2, 1, 3)]]))\n # self.assertEqual(nt.unsqueeze(\n # 4), constructor([[t.reshape(2, 3, 1)]]))\n\n # Currently only supporting nested dim 1.\n # t0 = t.reshape(3, 2)\n # t1 = t\n # t2 = torch.randn(4, 5)\n # nt = constructor([[t0, t1], [t2]])\n # self.assertEqual(nt.unsqueeze(0), constructor([[[t0, t1], [t2]]]))\n # self.assertEqual(nt.unsqueeze(\n # 1), constructor([[[t0, t1]], [[t2]]]))\n # self.assertEqual(nt.unsqueeze(2), constructor(\n # [[t0.reshape(1, 3, 2), t1.reshape(1, 2, 3)], [t2.reshape(1, 4, 5)]]))\n # self.assertEqual(nt.unsqueeze(3), constructor(\n # [[t0.reshape(3, 1, 2), t1.reshape(2, 1, 3)], [t2.reshape(4, 1, 5)]]))\n # self.assertEqual(nt.unsqueeze(4), constructor(\n # [[t0.reshape(3, 2, 1), t1.reshape(2, 3, 1)], [t2.reshape(4, 5, 1)]]))\n\n t = torch.randn(2, 3)\n nt = constructor([t])\n self.assertEqual(nt.unsqueeze(0), constructor([[t]]))\n self.assertEqual(nt.unsqueeze(\n 1), constructor([t.reshape(1, 2, 3)]))\n self.assertEqual(nt.unsqueeze(\n 2), constructor([t.reshape(2, 1, 3)]))\n self.assertEqual(nt.unsqueeze(\n 3), constructor([t.reshape(2, 3, 1)]))\n self.assertRaises(IndexError, lambda: nt.unsqueeze(4))\n\n @torch.inference_mode()\n def test_matmul(self):\n for constructor in _iter_constructors():\n t1 = torch.randn(2, 3)\n a = constructor([t1, t1])\n t21 = torch.randn(3, 2)\n t22 = torch.randn(3, 2)\n b = constructor([t21, t22])\n result = torch.matmul(a, b)\n result1 = torch.matmul(a, t22)\n self.assertEqual(result[1], result1[0])\n self.assertEqual(result[1], result1[1])\n # Currently only supporting nested dim 1.\n # c = constructor([[t21, t22], [t22, t21]])\n # result2 = torch.matmul(c, t1)\n # self.assertEqual(result2[0][0], torch.matmul(t21, t1))\n # self.assertEqual(result2[0][1], torch.matmul(t22, t1))\n # self.assertEqual(result2[1][0], torch.matmul(t22, t1))\n # self.assertEqual(result2[1][1], torch.matmul(t21, t1))\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_transpose(self):\n t0 = torch.randn(3, 3, 4)\n t1 = torch.randn(2, 4, 3)\n t2 = torch.randn(3, 3, 2)\n ts = [[t0, t1], [t2]]\n nt = nestedtensor.nested_tensor(ts)\n self.assertRaisesRegex(RuntimeError, \"Transposition of nested dimensions is not implemented yet.\",\n lambda: nt.transpose(0, 2))\n self.assertRaisesRegex(RuntimeError, \"Transposition of nested dimensions is not implemented yet.\",\n lambda: nt.transpose(1, 3))\n self.assertRaisesRegex(RuntimeError, \"Transposition of nested dimensions is not implemented yet.\",\n lambda: nt.transpose(0, 1))\n self.assertEqual(nt.transpose(2, 3), nt.transpose(3, 2))\n t = torch.randn(2, 3, 2, 4, 1)\n t_t = t.transpose(2, 3)\n nt = nestedtensor.nested_tensor(\n list(map(lambda x: x.unbind(), t.unbind())))\n nt_t = nestedtensor.nested_tensor(\n list(map(lambda x: x.unbind(), t_t.unbind())))\n self.assertEqual(t_t, nt_t.to_tensor())\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_flatten(self):\n t0 = torch.randn(3, 3, 4)\n t1 = torch.randn(2, 4, 3)\n t2 = torch.randn(3, 3, 2)\n ts = [[t0, t1], [t2]]\n nt = nestedtensor.nested_tensor(ts)\n self.assertRaisesRegex(RuntimeError, \"Cannot flatten nested dimension 0\",\n lambda: nt.flatten(0))\n self.assertRaisesRegex(RuntimeError, \"Cannot flatten nested dimension 1\",\n lambda: nt.flatten(2, 1))\n result = nt.flatten(2)\n map(self.assertEqual, tuple(\n map(lambda x: x.flatten(), ts[0])), result[0])\n map(self.assertEqual, tuple(\n map(lambda x: x.flatten(), ts[1])), result[1])\n\n result = nt.flatten(3, 4)\n map(self.assertEqual, tuple(\n map(lambda x: x.flatten(1, 2), ts[0])), result[0])\n map(self.assertEqual, tuple(\n map(lambda x: x.flatten(1, 2), ts[1])), result[1])\n\n ts = torch.randn(3, 2, 4, 5, 3)\n ts_r = ts.flatten(3, 4)\n ts = list(map(lambda x: x.unbind(), ts.unbind()))\n ts_r = list(map(lambda x: x.unbind(), ts_r.unbind()))\n ts = nestedtensor.nested_tensor(ts).flatten(3, 4)\n ts_r = nestedtensor.nested_tensor(ts_r)\n map(self.assertEqual, zip(ts[0].unbind(), ts_r[0].unbind()))\n map(self.assertEqual, zip(ts[1].unbind(), ts_r[1].unbind()))\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_reshape(self):\n t0 = torch.randn(3, 3)\n t1 = torch.randn(2, 3)\n t2 = torch.randn(3, 3)\n ts = [[t0, t1], [t2]]\n nt = nestedtensor.nested_tensor(ts)\n self.assertRaisesRegex(RuntimeError, \"Reshape cannot be exclusive to nested dimensions.\",\n lambda: nt.reshape(0, -1))\n self.assertRaisesRegex(RuntimeError, \"Cannot reshape explicitly along irregular dimension 1. Please use -1 as a placeholder.\",\n lambda: nt.reshape(-1, 1, 2, 3))\n result = nt.reshape(-1, -1, 3, -1)\n map(self.assertEqual, tuple(\n map(lambda x: x.reshape(3, -1), ts[0])), result[0])\n map(self.assertEqual, tuple(\n map(lambda x: x.reshape(3, -1), ts[1])), result[1])\n\n result = nt.reshape(-1, -1, 1, 1, 3, -1)\n map(self.assertEqual, tuple(\n map(lambda x: x.reshape(1, 1, 3, -1), ts[0])), result[0])\n map(self.assertEqual, tuple(\n map(lambda x: x.reshape(1, 1, 3, -1), ts[1])), result[1])\n\n result = nt.reshape(-1, -1, 1, 1, 3, -1)\n map(self.assertEqual, tuple(\n map(lambda x: x.reshape(1, 1, 3, -1), ts[0])), result[0])\n map(self.assertEqual, tuple(\n map(lambda x: x.reshape(1, 1, 3, -1), ts[1])), result[1])\n\n ts = torch.randn(3, 2, 4, 5, 3)\n ts_r = ts.reshape(3, 2, 5, 3, 4)\n ts = list(map(lambda x: x.unbind(), ts.unbind()))\n ts_r = list(map(lambda x: x.unbind(), ts_r.unbind()))\n ts = nestedtensor.nested_tensor(ts)\n ts_r = nestedtensor.nested_tensor(ts_r)\n map(self.assertEqual, zip(ts[0].unbind(), ts_r[0].unbind()))\n map(self.assertEqual, zip(ts[1].unbind(), ts_r[1].unbind()))\n\n def _test_softmax(self, ts, nt):\n fn = F.softmax\n self.assertRaises(RuntimeError, lambda: fn(nt, 0))\n self.assertRaises(RuntimeError, lambda: fn(nt, 1))\n\n def _map_fn(dim, result):\n result = fn(nt, 2)\n map(self.assertEqual, tuple(\n map(lambda x: fn(x, dim), ts[0])), result[0])\n map(self.assertEqual, tuple(\n map(lambda x: fn(x, dim), ts[1])), result[1])\n s = result.sum()\n # s.backward()\n\n for i in range(nt.dim() - nt.nested_dim()):\n _map_fn(i, fn(nt, i + nt.nested_dim()))\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_softmax_1(self):\n ts = [[], []]\n nt = ntnt_nograd(ts)\n self._test_softmax(ts, nt)\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_softmax_2(self):\n t0 = torch.randn(3)\n t1 = torch.randn(2)\n t2 = torch.randn(3)\n ts = [[t0, t1], [t2]]\n nt = ntnt_nograd(ts)\n self._test_softmax(ts, nt)\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_softmax_3(self):\n t0 = torch.randn(3, 2, 1)\n t1 = torch.randn(2, 3, 1)\n t2 = torch.randn(3, 1, 2)\n ts = [[t0, t1], [t2]]\n nt = ntnt_nograd(ts)\n self._test_softmax(ts, nt)\n\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_softmax_4(self):\n ts = torch.randn(6, 4, 3, 2, 5)\n ts = list(map(lambda x: x.unbind(), ts.unbind()))\n nt = ntnt_nograd(ts)\n self._test_softmax(ts, nt)\n\n @torch.inference_mode()\n def test_mha(self):\n embed_dim = 2\n num_heads = 2\n torch.manual_seed(1010)\n mha = torch.nn.MultiheadAttention(embed_dim, num_heads)\n query = torch.randn(3, 1, embed_dim, requires_grad=True)\n key = torch.randn(2, 1, embed_dim, requires_grad=True)\n value = torch.randn(2, 1, embed_dim, requires_grad=True)\n attn_output, _ = mha(query, key, value)\n nt_mha = torch.nn.MultiheadAttention(embed_dim, num_heads)\n nt_mha.in_proj_weight = mha.in_proj_weight\n nt_mha.in_proj_bias = mha.in_proj_bias\n nt_mha.out_proj.weight = mha.out_proj.weight\n nt_mha.out_proj.bias = mha.out_proj.bias\n query_nt = ntnt_nograd([query.squeeze(1)])\n key_nt = ntnt_nograd([key.squeeze(1)])\n value_nt = ntnt_nograd([value.squeeze(1)])\n nt_attn_output, _ = nt_mha(\n query_nt, key_nt, value_nt, need_weights=False)\n self.assertEqual(attn_output.squeeze(1), nt_attn_output[0])\n\n @torch.inference_mode()\n def test_mha_detr(self):\n NDIM = 128\n BSZ = 8\n NHEAD = 8\n RAND_INTS = [(1, 5), (7, 9)]\n MODEL = torch.nn.MultiheadAttention(NDIM, NHEAD).eval()\n\n src_list = ntnt_nograd(\n [torch.randn(NDIM, i, j) for (i, j) in RAND_INTS])\n detr_nt_src = DETRNestedTensor.from_tensor_list(src_list)\n src0, mask = detr_nt_src.decompose()\n src0.requires_grad_()\n src = src0.flatten(2).permute(2, 0, 1)\n mask = mask.flatten(1)\n result, _ = MODEL(src, src, src, key_padding_mask=mask,\n need_weights=False) # [0].sum().backward()\n mask = (~mask.t().unsqueeze(2)).float()\n result0 = result * mask\n # result_sum = result.sum()\n\n src = ntnt_nograd([t.flatten(1).permute(\n 1, 0) for t in src_list])\n result1, _ = MODEL(src, src, src, need_weights=False)\n self.assertEqual(result0.sum(0).sum(0), result1.sum(1).sum(0))\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_mha_detr_cuda(self):\n NDIM = 128\n BSZ = 8\n NHEAD = 8\n RAND_INTS = [(1, 5), (7, 9)]\n MODEL = torch.nn.MultiheadAttention(NDIM, NHEAD).cuda().eval()\n\n src_list = [torch.randn(NDIM, i, j) for (i, j) in RAND_INTS]\n detr_nt_src = DETRNestedTensor.from_tensor_list(src_list)\n src0, mask = detr_nt_src.decompose()\n src = src0.flatten(2).permute(2, 0, 1).cuda()\n mask = mask.flatten(1).cuda()\n result, _ = MODEL(src, src, src, key_padding_mask=mask,\n need_weights=False) # [0].sum().backward()\n mask = (~mask.t().unsqueeze(2)).float()\n result0 = result * mask\n # result_sum = result.sum()\n\n src = ntnt_nograd([t.flatten(1).permute(\n 1, 0) for t in src_list], device=torch.device('cuda'))\n result1, _ = MODEL(src, src, src, need_weights=False)\n self.assertEqual(result0.sum(0).sum(0), result1.sum(1).sum(0))\n\n def test_squeeze(self):\n t = torch.randn(2, 3)\n result = ntnt_nograd([t])\n\n # Currently only supporting nested dim 1.\n # nt = ntnt_nograd([[t.reshape(1, 2, 1, 3)]])\n # # self.assertEqual(nt.squeeze(), result)\n # self.assertRaises(RuntimeError, lambda: nt.squeeze())\n # nt.squeeze_()\n # self.assertEqual(nt, result)\n\n nt = ntnt_nograd([t.reshape(2, 3)])\n # self.assertEqual(nt.squeeze(), result)\n self.assertRaises(RuntimeError, lambda: nt.squeeze())\n nt.squeeze_()\n self.assertEqual(nt, result)\n\n # Currently only supporting nested dim 1.\n # nt = ntnt_nograd([[t.reshape(2, 3)]])\n # # self.assertEqual(nt.squeeze(), result)\n # self.assertRaises(RuntimeError, lambda: nt.squeeze())\n # nt.squeeze_()\n # self.assertEqual(nt, result)\n\n nt = ntnt_nograd([t.reshape(1, 2, 3)])\n # self.assertEqual(nt.squeeze(), result)\n self.assertRaises(RuntimeError, lambda: nt.squeeze())\n nt.squeeze_()\n self.assertEqual(nt, result)\n\n nt = ntnt_nograd([t.reshape(1, 2, 1, 3, 1)])\n # self.assertEqual(nt.squeeze(), result)\n self.assertRaises(RuntimeError, lambda: nt.squeeze())\n nt.squeeze_()\n self.assertEqual(nt, result)\n\n # Currently only supporting nested dim 1.\n # nt = ntnt_nograd([[[t.reshape(1, 2, 3)]]])\n # # self.assertEqual(nt.squeeze(), result)\n # self.assertRaises(RuntimeError, lambda: nt.squeeze())\n # nt.squeeze_()\n # self.assertEqual(nt, result)\n\n # result = ntnt([t])\n # nt = ntnt([t.reshape(1, 2, 3)])\n # self.assertEqual(nt.squeeze(1), result)\n # self.assertRaisesRegex(\n # RuntimeError, \"Cannot squeeze first dimension.\", lambda: nt.squeeze(0))\n # self.assertRaisesRegex(\n # RuntimeError, \"Given dimension is either undefined or not a singleton.\", lambda: nt.squeeze(2))\n # self.assertRaisesRegex(\n # RuntimeError, \"Given dimension is either undefined or not a singleton.\", lambda: nt.squeeze(3))\n # self.assertRaises(IndexError, lambda: nt.squeeze(4))\n # a = nt.squeeze(1)\n # a.sum().backward()\n # self.assertEqual(nt.grad, ntnt_nograd(\n # [t.reshape(1, 2, 3).mul(0).add(1)]))\n\n # nt = ntnt([[t.reshape(1, 2, 1, 3)]])\n # self.assertRaisesRegex(\n # RuntimeError, \"Cannot squeeze nested dimension.\", lambda: nt.squeeze(1))\n # # self.assertEqual(nt.squeeze(1), ntnt(\n # # [t.reshape(1, 2, 1, 3)]))\n # self.assertEqual(nt.squeeze(\n # 2), ntnt([[t.reshape(2, 1, 3)]]))\n # self.assertEqual(nt.squeeze(\n # 4), ntnt([[t.reshape(1, 2, 3)]]))\n\n def test_nn_max_pool2d(self):\n data = [\n [\n torch.randn(3, 500, 600),\n torch.randn(3, 128, 128)\n ],\n [\n torch.randn(3, 500, 600),\n torch.randn(3, 500, 600)\n ],\n ]\n\n # with optional params\n maxPool2d = torch.nn.MaxPool2d(kernel_size=(\n 3, 3), stride=2, padding=(1, 1), dilation=1, ceil_mode=False)\n for inputs in data:\n tensor_res = []\n for i in range(2):\n t_res = maxPool2d(inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n\n nt = ntnt_nograd(inputs)\n nt_res = maxPool2d(nt)\n self.assertEqual(ntnt_nograd(tensor_res), nt_res)\n\n @unittest.skip(\"Currently broken\")\n def test_fzbn2d(self):\n class FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n print(\"1\")\n w = self.weight.reshape(-1, 1, 1)\n print(\"2\")\n b = self.bias.reshape(-1, 1, 1)\n print(\"3\")\n rv = self.running_var.reshape(-1, 1, 1)\n print(\"4\")\n rm = self.running_mean.reshape(-1, 1, 1)\n print(\"5\")\n eps = 1e-5\n print(\"6\")\n scale = w * (rv + eps).rsqrt()\n print(\"7\")\n bias = b - rm * scale\n print(\"8\")\n # return (x * scale + bias)\n # return x\n # return (x * scale + bias)\n res = x + bias\n print(\"9\")\n return res\n\n b0 = FrozenBatchNorm2d(64) # .cuda()\n random.seed(1010)\n torch.manual_seed(1310)\n RAND_INTS = [random.randint(100, 300) for _ in range(1)]\n tensors = [torch.rand(64, i, 256, requires_grad=False)\n for i in RAND_INTS]\n # RAND_INTS = [random.randint(1, 1) for _ in range(1)]\n # tensors = [torch.rand(1, i, 2, requires_grad=True)\n # for i in RAND_INTS]\n nested_tensor = ntnt_nograd(tensors)\n # print(nested_tensor.nested_size())\n s00 = b0(nested_tensor)\n print(\"s00\")\n print(s00.requires_grad)\n s0 = s00.sum()\n # s0.backward()\n\n b1 = FrozenBatchNorm2d(64)\n s1 = 0\n for t in tensors:\n s1 += b1(t).sum()\n # s1.backward()\n self.assertEqual(s0, s1)\n # for i in range(len(tensors)):\n # self.assertEqual(nested_tensor.grad[i], tensors[i].grad)\n\n self.assertEqual(len((list(b0.named_parameters()))), 0)\n self.assertEqual(len((list(b1.named_parameters()))), 0)\n\n @torch.inference_mode()\n def test_layer_norm(self):\n def _test(device, dtype, size):\n print(f'device {device} dtype {dtype} size: {size}')\n # Currently only supporting nested dim 1.\n # layer_norm = torch.nn.LayerNorm((0,)).to(device)\n # t0 = torch.randn(3)\n # t1 = torch.randn(2)\n # t2 = torch.randn(3)\n # ts = [[t0, t1], [t2]]\n # nt = ntnt_nograd(ts, device=device)\n # self.assertRaisesRegex(RuntimeError,\n # \"Cannot normalize across irregular dimension 2\", lambda: layer_norm(nt))\n\n t0 = utils.gen_float_tensor(1, (2, size)).to(device).to(dtype)\n t1 = utils.gen_float_tensor(2, (2, size)).to(device).to(dtype)\n ts = [t0, t1, t0, t1]\n nt = ntnt_nograd(ts, device=device, dtype=dtype)\n layer_norm = torch.nn.LayerNorm(size).to(device).to(dtype)\n nt_result = layer_norm(nt)\n for i in range(len(ts)):\n a = nt_result[i]\n b = layer_norm(\n ts[i].reshape(1, -1, size).squeeze(0))\n self.assertEqual(a, b)\n\n # layer_norm = torch.nn.LayerNorm(16).to(device).to(dtype)\n # tt = utils.gen_float_tensor(1, (3, 23, 16)).to(device).to(dtype)\n # res = layer_norm(tt)\n # nt = nt + 3\n # res = res * 5\n # res = layer_norm(tt + 2)\n # t0 = utils.gen_float_tensor(1, (3, 16)).to(device)\n # t1 = utils.gen_float_tensor(2, (2, 16)).to(device)\n # t2 = utils.gen_float_tensor(3, (3, 16)).to(device)\n\n # Currently only supporting nested dim 1.\n # ts = [[t0, t1], [t2]]\n # result = ntnt_nograd(ts, device=device)\n # layer_norm(ts[0][0])\n # map(self.assertEqual, tuple(\n # map(lambda x: layer_norm(x), ts[0])), result[0])\n # map(self.assertEqual, tuple(\n # map(lambda x: layer_norm(x), ts[1])), result[1])\n\n # layer_norm = torch.nn.LayerNorm(3).to(device)\n # t0 = torch.randn(3, 3, 4)\n # t1 = torch.randn(2, 3, 4)\n # t2 = torch.randn(3, 3, 4)\n # ts = [[t0, t1], [t2]]\n # nt = ntnt_nograd(ts, device=device)\n # self.assertRaisesRegex(RuntimeError,\n # \"Normalized shape \\[3\\] does not match the size of the last dimension \\(4\\) of input.\",\n # lambda: layer_norm(nt))\n\n # layer_norm = torch.nn.LayerNorm((3, 2, 4)).to(device)\n # self.assertRaisesRegex(RuntimeError,\n # \"Currently only singleton tuples of integers supported for layer_norm.\",\n # lambda: layer_norm(nt))\n for size in [1024, 512, 256, 128, 2, 4, 32]:\n _test(torch.device('cpu'), torch.float32, size)\n if torch.cuda.is_available():\n _test(torch.device('cuda'), torch.float16, size)\n _test(torch.device('cuda'), torch.float32, size)\n\n @torch.inference_mode()\n def test_decoder(self):\n class TransformerDecoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = torch.nn.MultiheadAttention(\n d_model, nhead, dropout=dropout)\n self.multihead_attn = torch.nn.MultiheadAttention(\n d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = torch.nn.functional.relu\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos):\n return tensor if pos is None else tensor + pos\n\n def forward(self, tgt, memory,\n # tgt_mask: Optional[Tensor] = None,\n # memory_mask: Optional[Tensor] = None,\n # tgt_key_padding_mask: Optional[Tensor] = None,\n # memory_key_padding_mask: Optional[Tensor] = None,\n pos=None, query_pos=None):\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt,\n need_weights=False)[0]\n # tgt = tgt + self.dropout1(tgt2)\n tgt = tgt + tgt2\n tgt = self.norm1(tgt)\n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(\n memory, pos),\n value=memory,\n need_weights=False)[0]\n # tgt = tgt + self.dropout2(tgt2)\n tgt = tgt + tgt2\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(\n self.activation(self.linear1(tgt))))\n # tgt = tgt + self.dropout3(tgt2)\n tgt = tgt + tgt2\n tgt = self.norm3(tgt)\n # print('tgt.requires_grad')\n # print(tgt.requires_grad)\n return tgt\n\n d = TransformerDecoderLayer(256, 8)\n d.zero_grad()\n a = d(\n ntnt_nograd([\n torch.randn(864, 256),\n torch.randn(360, 256)]),\n ntnt_nograd([\n torch.randn(864, 256),\n torch.randn(360, 256)]),\n pos=ntnt_nograd([\n torch.randn(864, 256),\n torch.randn(360, 256)]),\n query_pos=ntnt_nograd([\n torch.randn(864, 256),\n torch.randn(360, 256)]),\n )\n # a.sum().backward()\n # for (n, p) in d.named_parameters():\n # print(n)\n # print(p is None)\n\n @torch.inference_mode()\n @unittest.skipIf(not torch.cuda.is_available(), \"Test requires cuda\")\n def test_effective_transformer_mha(self):\n\n def test(dtype, num_heads, batch_size, seq_len_, head_size, embedding_dim,\n use_arange=False):\n assert num_heads * head_size == embedding_dim\n import random\n inputs = []\n k = 0\n seq_len = 0\n seq_lens = []\n for _ in range(batch_size):\n i = random.randint(1, seq_len_)\n seq_len = max(i, seq_len)\n seq_lens.append(i)\n if use_arange:\n inputs.append(torch.arange(\n i * embedding_dim).reshape(i, embedding_dim))\n else:\n inputs.append(torch.randn(i, embedding_dim))\n input_nt = nestedtensor.nested_tensor(\n inputs, device=torch.device('cuda'), dtype=dtype)\n\n input_batch, input_mask = input_nt.to_tensor_mask(mask_dim=2)\n\n mha = torch.nn.MultiheadAttention(embedding_dim, num_heads)\n mha = mha.to(dtype)\n if use_arange:\n in_proj_weight_test = torch.arange(mha.in_proj_weight.numel()).reshape(\n mha.in_proj_weight.shape).to(dtype)\n mha.in_proj_weight.copy_(in_proj_weight_test)\n in_proj_weight = mha.in_proj_weight.clone().cuda()\n\n in_proj_bias = mha.in_proj_bias.clone().cuda()\n\n if use_arange:\n out_proj_weight_test = torch.arange(mha.out_proj.weight.numel()).reshape(\n mha.out_proj.weight.shape).to(dtype)\n mha.out_proj.weight.copy_(\n out_proj_weight_test)\n out_proj_weight = mha.out_proj.weight.clone().cuda()\n\n import time\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n t0 = time.time()\n scaling = float(head_size ** -0.5)\n for _ in range(5):\n result_nt = torch.ops.nestedtensor.bt_min_mha(num_heads,\n head_size,\n 0.5,\n False,\n input_nt,\n input_nt,\n input_nt,\n in_proj_weight,\n in_proj_bias,\n scaling,\n out_proj_weight,\n in_proj_bias)\n\n torch.cuda.synchronize()\n t1 = time.time()\n a = t1 - t0\n\n mha = mha.cuda()\n torch.cuda.synchronize()\n torch.cuda.synchronize()\n t0 = time.time()\n for _ in range(5):\n attn_output, _ = mha(input_nt, input_nt, input_nt)\n\n torch.cuda.synchronize()\n t1 = time.time()\n b = t1 - t0\n\n self.assertEqual(result_nt, attn_output)\n\n torch.cuda.synchronize()\n input_batch = input_batch.transpose(0, 1)\n not_input_mask = torch.logical_not(input_mask)\n torch.cuda.synchronize()\n t0 = time.time()\n # print(input_batch.size())\n for _ in range(5):\n attn_output, _ = mha(\n input_batch,\n input_batch,\n input_batch,\n key_padding_mask=not_input_mask)\n\n\n torch.cuda.synchronize()\n t1 = time.time()\n attn_output = attn_output.transpose(0, 1)\n attn_output = attn_output * torch.logical_not(not_input_mask.unsqueeze(-1))\n custom_atol = 5e-4\n custom_rtol = 1e-8\n r0 = result_nt.to_padded_tensor(padding=0)\n r1 = attn_output\n # print(\"r0.sum(): \", r0.sum(), \" r1.sum(): \", r1.sum())\n self.assertTrue(torch.allclose(result_nt.to_padded_tensor(padding=0), attn_output, atol=custom_atol, rtol=custom_rtol))\n c = t1 - t0\n # print(\"bt: \", a, \"\\tnt: \", b, \"\\tdense: \", c, \"\\tdense/bt: \", c/a, \"\\tdtype: \", dtype)\n\n for dtype in [torch.float32, torch.float16]:\n # test(dtype, 1, 1, 1, 4, 4, use_arange=True)\n # test(dtype, 1, 1, 2, 2, 2, use_arange=True)\n # test(dtype, 1, 2, 2, 1, 1, use_arange=True)\n # test(dtype, 1, 4, 3, 2, 2, use_arange=True)\n test(dtype, 2, 1, 2, 1, 2)\n test(dtype, 1, 3, 5, 4, 4)\n test(dtype, 2, 3, 5, 2, 4)\n test(dtype, 2, 1, 2, 2, 4)\n test(dtype, 2, 1, 2, 2, 4)\n test(dtype, 2, 3, 5, 2, 4)\n test(dtype, 1, 3, 5, 4, 4)\n test(dtype, 8, 8, 50, 16, 128)\n test(dtype, 16, 64, 50, 16, 256)\n test(dtype, 16, 128, 50, 16, 256)\n test(dtype, 16, 256, 50, 16, 256)\n test(dtype, 4, 256, 50, 256, 1024)\n test(dtype, 16, 256, 50, 64, 1024)\n\n @torch.inference_mode()\n def test_relu(self):\n nt = ntnt_nograd([torch.randn(2, 3), torch.randn(3, 2)])\n n1 = torch.nn.ReLU(inplace=False)\n out1 = n1(nt)\n n2 = torch.nn.ReLU(inplace=True)\n out2 = n2(nt)\n self.assertEqual(out1, out2)\n self.assertEqual(out1, nt)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "8880376", "language": "Python", "matching_score": 4.939117908477783, "max_stars_count": 0, "path": "test/test_nested_tensor_functional.py" }, { "content": "import torch\nimport nestedtensor\nimport unittest\nfrom utils_test_case import TestCase\nimport random\nfrom frozen_batch_norm_2d import NTFrozenBatchNorm2d\nfrom position_encoding import PositionEmbeddingSine\nfrom joiner import Joiner\n\n\ndef ntnt(x): return nestedtensor.nested_tensor(x, requires_grad=True)\ndef ntnt_nograd(x): return nestedtensor.nested_tensor(x)\n\n\nclass TestAutogradFunctional(TestCase):\n @unittest.skip(\"Requires autograd support\")\n def test_nn_conv2d(self):\n def _test(Conv2d):\n inputs = [\n torch.randn(3, 50, 60, requires_grad=True),\n torch.randn(3, 18, 18, requires_grad=True)\n ]\n\n # most of optional params\n conv2d = Conv2d()\n tensor_res = []\n for i in range(2):\n t_res = conv2d(inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n t_res.sum().backward()\n layer_grad0 = [p.grad for (n, p) in conv2d.named_parameters()]\n\n conv2d.zero_grad()\n\n nt = ntnt(inputs)\n nt_res = conv2d(nt)\n nt_res.sum().backward()\n layer_grad1 = [p.grad for (n, p) in conv2d.named_parameters()]\n\n self.assertEqual(ntnt(tensor_res), nt_res)\n map(self.assertEqual, zip(layer_grad0, layer_grad1))\n self.assertEqual(nt.grad[0], inputs[0].grad)\n self.assertEqual(nt.grad[1], inputs[1].grad)\n\n _test(lambda: torch.nn.Conv2d(3, 33, kernel_size=3, stride=(2, 1), padding=(\n 4, 2), padding_mode='zeros', dilation=1, groups=1, bias=True))\n _test(lambda: torch.nn.Conv2d(3, 33, kernel_size=3, stride=(2, 1), padding=(\n 4, 2), padding_mode='zeros', dilation=1, groups=1, bias=False))\n _test(lambda: torch.nn.Conv2d(3, 33, kernel_size=3, stride=(2, 1)))\n _test(lambda: torch.nn.Conv2d(\n 3, 33, kernel_size=(1, 1), stride=(1, 1), bias=False))\n\n @unittest.skip(\"Requires autograd support\")\n def test_nn_linear(self):\n def _test(linear):\n inputs = [\n torch.randn(3, 10, requires_grad=True),\n torch.randn(3, 10, requires_grad=True)\n ]\n\n # most of optional params\n linear = linear()\n tensor_res = []\n for i in range(2):\n t_res = linear(inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n t_res.sum().backward()\n layer_grad0 = [p.grad for (n, p) in linear.named_parameters()]\n\n linear.zero_grad()\n\n nt = ntnt(inputs)\n nt_res = linear(nt)\n nt_res.sum().backward()\n layer_grad1 = [p.grad for (n, p) in linear.named_parameters()]\n\n self.assertEqual(ntnt(tensor_res), nt_res)\n map(self.assertEqual, zip(layer_grad0, layer_grad1))\n self.assertEqual(nt.grad[0], inputs[0].grad)\n self.assertEqual(nt.grad[1], inputs[1].grad)\n\n _test(lambda: torch.nn.Linear(10, 6))\n\n @unittest.skip(\"Requires autograd support\")\n def test_nn_batch_norm(self):\n def _test(BatchNorm2d, has_grad=True):\n inputs = torch.randn(5, 3, 18, 18, requires_grad=True)\n\n batch_norm = BatchNorm2d()\n\n t_res = batch_norm(inputs)\n t_res.sum().backward()\n layer_grad0 = [p.grad for (n, p) in batch_norm.named_parameters()]\n\n batch_norm.zero_grad()\n nt = ntnt(inputs.unbind())\n nt_res = batch_norm(nt)\n\n self.assertEqual(ntnt(t_res.unbind()), nt_res)\n if has_grad:\n nt_res.sum().backward()\n layer_grad1 = [p.grad for (\n n, p) in batch_norm.named_parameters()]\n map(self.assertEqual, zip(layer_grad0, layer_grad1))\n self.assertEqual(nt.grad[0], inputs.grad[0])\n self.assertEqual(nt.grad[1], inputs.grad[1])\n else:\n self.assertRaisesRegex(\n RuntimeError, \"var.dim gradient not implemented yet.\", lambda: nt_res.sum().backward())\n\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True), False)\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True).eval())\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=False), False)\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=False).eval(), False)\n\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05,\n momentum=0.1, affine=False, track_running_stats=False), False)\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05, momentum=0.1,\n affine=False, track_running_stats=False).eval(), False)\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05,\n momentum=0.1, affine=False, track_running_stats=True), False)\n _test(lambda: torch.nn.BatchNorm2d(3, eps=1e-05, momentum=0.1,\n affine=False, track_running_stats=True).eval())\n _test(lambda: torch.nn.BatchNorm2d(3), False)\n\n @unittest.skip(\"Requires autograd support\")\n def test_nn_relu(self):\n inputs = [\n torch.randn(3, 500, 600, requires_grad=True),\n torch.randn(3, 128, 128, requires_grad=True)\n ]\n\n relu = torch.nn.ReLU()\n relu_ = torch.nn.ReLU(inplace=True)\n tensor_res = []\n for i in range(2):\n t_res = relu(inputs[i].unsqueeze(0).contiguous())\n t_res = relu_(t_res)\n tensor_res.append(t_res.squeeze(0))\n tensor_res[i].sum().backward()\n layer_grad0 = [p.grad for (n, p) in relu.named_parameters()]\n\n nt = ntnt(inputs)\n nt_res = relu(nt)\n nt_res = relu_(nt_res)\n nt_res.sum().backward()\n layer_grad1 = [p.grad for (n, p) in relu.named_parameters()]\n\n self.assertEqual(ntnt(tensor_res), nt_res)\n map(self.assertEqual, zip(layer_grad0, layer_grad1))\n self.assertEqual(inputs[0].grad, nt.grad[0])\n self.assertEqual(inputs[1].grad, nt.grad[1])\n\n @unittest.skip(\"Requires autograd support\")\n def test_add(self):\n inputs0_ = [\n torch.randn(5, 6, requires_grad=True),\n torch.randn(1, 1, requires_grad=True)\n ]\n inputs1_ = [\n torch.randn(5, 6, requires_grad=True),\n torch.randn(1, 1, requires_grad=True)\n ]\n inputs0 = ntnt(inputs0_)\n inputs1 = ntnt(inputs1_)\n output = inputs0 + inputs1\n output += inputs0\n output.sum().backward()\n self.assertEqual(inputs0.grad.sum(),\n inputs1.grad.sum() + inputs1.grad.sum())\n\n @unittest.skip(\"Requires autograd support\")\n def test_resnet_bottleneck(self):\n import torchvision\n\n def _test(Bottleneck, has_grad=True):\n inputs_ = [\n torch.randn(256, 50, 60, requires_grad=True)\n ]\n inputs = ntnt(inputs_)\n\n b = Bottleneck()\n print(b)\n x = b(inputs).sum()\n # import torchviz\n # dot = torchviz.make_dot(x)\n # dot.format = 'svg'\n # dot.render('asdf')\n # x.backward()\n # import sys; sys.exit(1)\n g0 = list(p.grad for (n, p) in b.named_parameters())\n\n b.zero_grad()\n b(inputs_[0].unsqueeze(0)).sum().backward()\n g1 = list(p.grad for (n, p) in b.named_parameters())\n\n map(self.assertEqual, zip(g0, g1))\n\n inputs_ = [\n torch.randn(256, 50, 60, requires_grad=True),\n torch.randn(256, 18, 18, requires_grad=True)\n ]\n b = Bottleneck()\n inputs = ntnt(inputs_)\n if has_grad:\n b(inputs).sum().backward()\n # print(list((n, p.grad is None) for (n, p) in b.named_parameters()))\n\n b.zero_grad()\n b(inputs_[0].unsqueeze(0)).sum().backward()\n\n b.zero_grad()\n b(inputs_[1].unsqueeze(0)).sum().backward()\n\n self.assertEqual(inputs_[0].grad, inputs.grad[0])\n self.assertEqual(inputs_[1].grad, inputs.grad[1])\n _test(lambda: torchvision.models.resnet.Bottleneck(256, 64), False)\n _test(lambda: torchvision.models.resnet.Bottleneck(256, 64).eval())\n\n @unittest.skip(\"Requires autograd support\")\n def test_resnet_classification(self):\n import torchvision\n\n def _test(FCNHead):\n inputs_ = [\n torch.randn(256, 50, 60, requires_grad=True)\n ]\n inputs = ntnt(inputs_)\n\n b = FCNHead()\n print(b)\n # print(b)\n # list(b.children())[3].eval() # dropout is stochastic otherwise\n b(inputs).sum().backward()\n g0 = list(p.grad for (n, p) in b.named_parameters())\n\n b.zero_grad()\n b(inputs_[0].unsqueeze(0)).sum().backward()\n g1 = list(p.grad for (n, p) in b.named_parameters())\n\n map(self.assertEqual, zip(g0, g1))\n\n inputs_ = [\n torch.randn(256, 50, 60, requires_grad=True),\n torch.randn(256, 18, 18, requires_grad=True)\n ]\n inputs = ntnt(inputs_)\n b.zero_grad()\n b(inputs).sum().backward()\n\n b.zero_grad()\n b(inputs_[0].unsqueeze(0)).sum().backward()\n\n b.zero_grad()\n b(inputs_[1].unsqueeze(0)).sum().backward()\n\n self.assertEqual(inputs_[0].grad, inputs.grad[0])\n self.assertEqual(inputs_[1].grad, inputs.grad[1])\n # _test(lambda: torchvision.models.segmentation.fcn.FCNHead(256, 64))\n _test(lambda: torchvision.models.segmentation.fcn.FCNHead(256, 64).eval())\n\n @unittest.skip(\"Requires autograd support\")\n def test_backbone(self):\n import torchvision\n from torchvision.models._utils import IntermediateLayerGetter\n\n def _test(FCNHead):\n inputs_ = [\n torch.randn(3, 50, 60, requires_grad=True)\n ]\n inputs = ntnt(inputs_)\n\n b = FCNHead()\n # print(b)\n # print(b(inputs))\n b(inputs)[0][0].sum().backward()\n g0 = list(p.grad for (n, p) in b.named_parameters())\n\n b.zero_grad()\n b(inputs_[0].unsqueeze(0))[0][0].sum().backward()\n g1 = list(p.grad for (n, p) in b.named_parameters())\n\n map(self.assertEqual, zip(g0, g1))\n\n inputs_ = [\n torch.randn(3, 50, 60, requires_grad=True),\n torch.randn(3, 18, 18, requires_grad=True)\n ]\n inputs = ntnt(inputs_)\n b.zero_grad()\n b(inputs)[0][0].sum().backward()\n # for (n, p) in b.named_parameters():\n # if p.grad is None:\n # print(n)\n # continue\n # print(n, \" is fine\")\n\n b.zero_grad()\n b(inputs_[0].unsqueeze(0))[0][0].sum().backward()\n\n b.zero_grad()\n b(inputs_[1].unsqueeze(0))[0][0].sum().backward()\n\n self.assertEqual(inputs_[0].grad, inputs.grad[0])\n self.assertEqual(inputs_[1].grad, inputs.grad[1])\n # Note: It seems expected that layer0 has no gradients.\n return_layers = {\"layer1\": \"0\", \"layer2\": \"1\",\n \"layer3\": \"2\", \"layer4\": \"3\"}\n _test(lambda: Joiner(IntermediateLayerGetter(torchvision.models.resnet50(\n replace_stride_with_dilation=[False, False, False],\n pretrained=True, norm_layer=NTFrozenBatchNorm2d), return_layers),\n PositionEmbeddingSine(128, normalize=True)))\n\n @unittest.skip(\"Requires autograd support\")\n def test_nn_max_pool2d(self):\n data = [\n [\n torch.randn(3, 500, 600),\n torch.randn(3, 128, 128)\n ],\n [\n torch.randn(3, 500, 600),\n torch.randn(3, 500, 600)\n ],\n ]\n\n # with optional params\n maxPool2d = torch.nn.MaxPool2d(kernel_size=(\n 3, 3), stride=2, padding=(1, 1), dilation=1, ceil_mode=False)\n for inputs in data:\n tensor_res = []\n for i in range(2):\n t_res = maxPool2d(inputs[i].unsqueeze(0).contiguous())\n tensor_res.append(t_res.squeeze(0))\n\n nt = ntnt(inputs)\n nt_res = maxPool2d(nt)\n self.assertEqual(ntnt(tensor_res), nt_res)\n\n @unittest.skip(\"Requires autograd support\")\n def test_fzbn2d(self):\n class FrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n):\n super(FrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(FrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(-1, 1, 1)\n b = self.bias.reshape(-1, 1, 1)\n rv = self.running_var.reshape(-1, 1, 1)\n rm = self.running_mean.reshape(-1, 1, 1)\n eps = 1e-5\n scale = w * (rv + eps).rsqrt()\n bias = b - rm * scale\n # return (x * scale + bias)\n # return x\n # return (x * scale + bias)\n return x + bias\n\n b0 = FrozenBatchNorm2d(64) # .cuda()\n random.seed(1010)\n torch.manual_seed(1310)\n RAND_INTS = [random.randint(100, 300) for _ in range(1)]\n tensors = [torch.rand(64, i, 256, requires_grad=True)\n for i in RAND_INTS]\n nested_tensor = ntnt(tensors)\n # print(nested_tensor.nested_size())\n s0 = b0(nested_tensor).sum()\n s0.backward()\n\n b1 = FrozenBatchNorm2d(64)\n s1 = 0\n for t in tensors:\n s1 += b1(t).sum()\n s1.backward()\n self.assertEqual(s0, s1)\n for i in range(len(tensors)):\n self.assertEqual(nested_tensor.grad[i], tensors[i].grad)\n\n self.assertEqual(len((list(b0.named_parameters()))), 0)\n self.assertEqual(len((list(b1.named_parameters()))), 0)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "5883569", "language": "Python", "matching_score": 6.381524562835693, "max_stars_count": 229, "path": "test/test_nested_tensor_autograd_functional.py" }, { "content": "import traceback\nimport functools\nimport pdb\nimport sys\nimport torch\nimport nestedtensor\nimport unittest\nfrom utils_test_case import TestCase\nimport random\nimport utils\n\nclass NTFrozenBatchNorm2d(torch.nn.Module):\n \"\"\"\n BatchNorm2d where the batch statistics and the affine parameters are fixed.\n Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n without which any other models than torchvision.models.resnet[18,34,50,101]\n produce nans.\n \"\"\"\n\n def __init__(self, n):\n super(NTFrozenBatchNorm2d, self).__init__()\n self.register_buffer(\"weight\", torch.ones(n))\n self.register_buffer(\"bias\", torch.zeros(n))\n self.register_buffer(\"running_mean\", torch.zeros(n))\n self.register_buffer(\"running_var\", torch.ones(n))\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key in state_dict:\n del state_dict[num_batches_tracked_key]\n\n super(NTFrozenBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n def forward(self, x):\n # move reshapes to the beginning\n # to make it fuser-friendly\n w = self.weight.reshape(-1, 1, 1)\n b = self.bias.reshape(-1, 1, 1)\n rv = self.running_var.reshape(-1, 1, 1)\n rm = self.running_mean.reshape(-1, 1, 1)\n eps = 1e-5\n scale = w * (rv + eps).rsqrt()\n bias = b - rm * scale\n res = (x * scale + bias)\n return res\n", "id": "6337948", "language": "Python", "matching_score": 0.6317352652549744, "max_stars_count": 229, "path": "test/frozen_batch_norm_2d.py" }, { "content": "import torch\nimport nestedtensor\n\n# NT case query, key, value have nested_dim 1 and are of shape (bsz, tgt_len, embed_dim)\n\n\ndef multi_head_attention_forward(query,\n key,\n value,\n embed_dim_to_check,\n num_heads,\n in_proj_weight,\n in_proj_bias,\n bias_k,\n bias_v,\n add_zero_attn,\n dropout_p,\n out_proj_weight,\n out_proj_bias,\n training=True,\n key_padding_mask=None,\n need_weights=True,\n attn_mask=None,\n use_separate_proj_weight=False,\n q_proj_weight=None,\n k_proj_weight=None,\n v_proj_weight=None,\n static_k=None,\n static_v=None\n ):\n assert isinstance(query, nestedtensor.NestedTensor)\n assert isinstance(key, nestedtensor.NestedTensor)\n assert isinstance(value, nestedtensor.NestedTensor)\n assert torch.is_tensor(out_proj_weight)\n assert torch.is_tensor(out_proj_bias)\n\n # TODO: Explicitly unsupported flags\n assert not use_separate_proj_weight\n assert attn_mask is None\n assert key_padding_mask is None\n assert bias_k is None\n assert bias_v is None\n assert static_k is None\n assert static_v is None\n assert not add_zero_attn\n # assert not need_weights\n\n bsz, tgt_len, embed_dim = query.size()\n assert embed_dim == embed_dim_to_check\n # allow MHA to have different sizes for the feature dimension\n assert key.size(0) == value.size(0) and key.size(1) == value.size(1)\n\n head_dim = embed_dim // num_heads\n assert head_dim * num_heads == embed_dim, \"embed_dim must be divisible by num_heads\"\n scaling = float(head_dim) ** -0.5\n\n if query is key and key is value and in_proj_weight.is_cuda:\n return torch.ops.nestedtensor.bt_min_mha(num_heads,\n head_dim,\n 0.5,\n False,\n query,\n query,\n query,\n in_proj_weight,\n in_proj_bias,\n scaling,\n out_proj_weight,\n in_proj_bias), None\n\n return nestedtensor.nested.nested._wrap_result(\n torch.ops.nestedtensor.min_mha(num_heads,\n head_dim,\n dropout_p,\n training,\n query._impl,\n key._impl,\n value._impl,\n in_proj_weight,\n in_proj_bias,\n scaling,\n out_proj_weight,\n out_proj_bias)), None\n", "id": "1528005", "language": "Python", "matching_score": 2.6102616786956787, "max_stars_count": 229, "path": "nestedtensor/nn/mha.py" }, { "content": "from .mha import multi_head_attention_forward\n", "id": "6141592", "language": "Python", "matching_score": 0.013385160826146603, "max_stars_count": 229, "path": "nestedtensor/nn/__init__.py" }, { "content": "#\n# <NAME>\n# Modified by <NAME>\n#\n# Overall rules:\n# Agents at plants reproduce as much as possible\n# Agents are born with a random direction away from the plant\n# Agents send a message with they attack\n# Agents always attack\n# Agents goto the location of the attack, exception scouts that keep looking\n#\n# Results\n# Large growing swarm that explores that area for all plants as fast as possible\n# until the enemy is found. By the time the enemy is found everyone is spread out\n# Once the enemy is found everyone heads in that direction and if there are any\n# plants between the two they are usually taken before they enemy.\n# Once a new plant is reached more are quickly spawned and that plant is overrun\n# From there it is simple attrition\n#\n\nimport cells\n\nfrom cells import Action\nfrom cells import ACT_SPAWN, ACT_MOVE, ACT_EAT, ACT_RELEASE, ACT_ATTACK\nfrom cells import ACT_LIFT, ACT_DROP\n\nimport cmath\nfrom random import choice, random, randrange\n\nimport numpy\n\nfrom .genes import InitializerGene, make_normally_perturbed_gene\n\n\nDesiredEnergyGene = make_normally_perturbed_gene(5, cells.ATTACK_POWER,\n cells.ENERGY_CAP)\nFieldSpawnEnergyGene = make_normally_perturbed_gene(5, cells.SPAWN_MIN_ENERGY,\n cells.ENERGY_CAP)\nPlantSpawnEnergyGene = make_normally_perturbed_gene(5, cells.SPAWN_MIN_ENERGY,\n cells.ENERGY_CAP)\n\n\ndef debug(s):\n #print s\n pass\n\nclass MessageType(object):\n ATTACK = 0\n\nsize = 300 #cells.config.getint('terrain', 'bounds')\n\nclass AgentMind(object):\n def __init__(self, args):\n # The direction to walk in\n self.tx = randrange(size)\n self.ty = randrange(size)\n\n self.step = 0\n self.my_plant = None\n self.apoptosis = randrange(100, 201)\n\n if args is None:\n self.strain = 0\n self.scout = False\n self.genes = genes = {}\n genes['desired_energy'] = DesiredEnergyGene(\n InitializerGene(2 * cells.SPAWN_MIN_ENERGY))\n genes['field_spawn_energy'] = FieldSpawnEnergyGene(\n InitializerGene(4 * cells.ENERGY_CAP // 5))\n genes['plant_spawn_energy'] = PlantSpawnEnergyGene(\n InitializerGene(2 * cells.SPAWN_MIN_ENERGY))\n else:\n parent = args[0]\n self.strain = parent.strain\n # Don't come to the rescue, continue looking for plants & bad guys.\n self.genes = dict((k, v.spawn()) for (k,v) in parent.genes.items())\n if parent.my_plant is not None:\n self.scout = (random() > 0.9)\n else:\n self.scout = False\n\n\n def get_available_space_grid(self, me, view):\n grid = numpy.ones((3,3))\n for agent in view.get_agents():\n grid[agent.x - me.x + 1, agent.y - me.y + 1] = 0\n for plant in view.get_plants():\n grid[plant.x - me.x + 1, plant.y - me.y + 1] = 0\n grid[1,1] = 0\n return grid\n\n def smart_spawn(self, me, view):\n grid = self.get_available_space_grid(me, view)\n ret = []\n for x in range(3):\n for y in range(3):\n if grid[x,y]:\n ret.append((x-1, y-1))\n if ret:\n return choice(ret)\n return (-1, -1)\n\n def would_bump(self, me, view, dir_x, dir_y):\n grid = self.get_available_space_grid(me, view)\n dx = numpy.sign(dir_x)\n dy = numpy.sign(dir_y)\n adj_dx = dx + 1\n adj_dy = dy + 1\n return grid[adj_dx,adj_dy] == 0\n\n\n def act(self, view, msg):\n me = view.me\n mx = me.x\n my = me.y\n my_pos = mx, my\n\n tx = self.tx\n ty = self.ty\n if mx == tx and my == ty:\n self.tx = tx = randrange(tx - 5, tx + 6)\n self.ty = ty = randrange(tx - 5, tx + 6)\n self.step = 0\n\n\n if self.apoptosis <= 0:\n return Action(ACT_MOVE, (0, 0))\n\n # Attack anyone next to me, but first send out the distress message with my position\n my_team = me.team\n for a in view.agent_views:\n if a.team != my_team:\n ax = a.y\n ay = a.y\n msg.send_message((self.strain, MessageType.ATTACK, ax, ay))\n return Action(ACT_ATTACK, (ax, ay))\n\n # Eat any energy I find until I am 'full'. The cost of eating\n # is 1, so don't eat just 1 energy.\n my_energy = me.energy\n if self.my_plant is None and view.energy_map.values[my_pos] > 1:\n if my_energy <= self.genes['desired_energy'].val:\n return Action(ACT_EAT)\n# else:\n# debug('Not eating. Have %s which is above %s' %\n# (my_energy, self.genes['desired_energy'].val))\n\n\n # If there is a plant near by go to it and spawn all I can\n if self.my_plant is None :\n plants = view.get_plants()\n if plants:\n self.my_plant = plants[0]\n self.tx = tx = mx\n self.ty = ty = my\n self.strain = self.my_plant.x * 41 + self.my_plant.y\n debug('attached to plant, strain %s' % self.strain)\n else:\n self.apoptosis -= 1\n if self.apoptosis <= 0:\n self.my_plant = None\n return Action(ACT_RELEASE, (mx + 1, my, my_energy - 1))\n \n\n if self.my_plant is None:\n spawn_threshold = self.genes['field_spawn_energy'].val\n else:\n spawn_threshold = self.genes['plant_spawn_energy'].val\n if my_energy >= spawn_threshold:\n spawn_x, spawn_y = self.smart_spawn(me, view)\n return Action(ACT_SPAWN,\n (me.x + spawn_x, me.y + spawn_y, self))\n elif self.my_plant is not None:\n return Action(ACT_EAT)\n\n \n # If I get the message of help go and rescue!\n if (not self.step) and (not self.scout) and random() > 0.1:\n ax = 0;\n ay = 0;\n best = 500;\n message_count = len(msg.get_messages());\n for strain, type, ox, oy in msg.get_messages():\n if strain != self.strain:\n continue\n if (type == MessageType.ATTACK) :\n dist = max(abs(mx-ax), abs(my-ay))\n if dist < best:\n ax = ox\n ay = oy\n best = dist\n if ax and ay:\n self.tx = tx = ax + randrange(-3, 4)\n self.ty = ty = ay + randrange(-3, 4)\n # if (message_count > 1) :\n # # Attack the base, not the front\n # agent_scale = 1 + random()\n # self.x *= agent_scale\n # self.y *= agent_scale\n # don't stand still once we get there\n self.step = randrange(20, 100);\n\n # Back to step 0 we can change direction at the next attack.\n if self.step:\n self.step -= 1\n\n return Action(ACT_MOVE, (tx, ty))\n", "id": "10524940", "language": "Python", "matching_score": 7.306515216827393, "max_stars_count": 3, "path": "minds/benvolution_genetic.py" }, { "content": "#\n# <NAME>\n# Modified by <NAME>\n#\n# Overall rules:\n# Agents at plants reproduce as much as possible\n# Agents are born with a random direction away from the plant\n# Agents send a message with they attack\n# Agents always attack\n# Agents goto the location of the attack, exception scouts that keep looking\n#\n# Results\n# Large growing swarm that explores that area for all plants as fast as possible\n# until the enemy is found. By the time the enemy is found everyone is spread out\n# Once the enemy is found everyone heads in that direction and if there are any\n# plants between the two they are usually taken before they enemy.\n# Once a new plant is reached more are quickly spawned and that plant is overrun\n# From there it is simple attrition\n#\n\nimport cmath\nimport random, cells\n\nimport numpy\n\nfrom . import genes\n\nclass MessageType(object):\n ATTACK = 0\n\nclass AgentMind(object):\n def __init__(self, args):\n # The direction to walk in\n self.x = None\n # Once we are attacked (mainly) those reproducing at plants should eat up a defense.\n self.defense = 0\n\n self.step = 0\n self.my_plant = None\n self.bumps = 0\n self.last_pos = (-1, -1)\n\n if args is None:\n self.strain = 0\n self.scout = False\n else:\n parent = args[0]\n self.strain = parent.strain\n # Don't come to the rescue, continue looking for plants & bad guys.\n if parent.my_plant:\n self.scout = (random.random() > 0.9)\n else:\n self.scout = False\n\n\n def get_available_space_grid(self, me, view):\n grid = numpy.ones((3,3))\n for agent in view.get_agents():\n grid[agent.x - me.x + 1, agent.y - me.y + 1] = 0\n for plant in view.get_plants():\n grid[plant.x - me.x + 1, plant.y - me.y + 1] = 0\n grid[1,1] = 0\n return grid\n\n def smart_spawn(self, me, view):\n grid = self.get_available_space_grid(me, view)\n for x in range(3):\n for y in range(3):\n if grid[x,y]:\n return (x-1, y-1)\n return (-1, -1)\n\n def would_bump(self, me, view, dir_x, dir_y):\n grid = self.get_available_space_grid(me, view)\n dx = numpy.sign(dir_x)\n dy = numpy.sign(dir_y)\n adj_dx = dx + 1\n adj_dy = dy + 1\n return grid[adj_dx,adj_dy] == 0\n\n\n def act(self, view, msg):\n ret = self.act_wrapper(view, msg)\n self.last_pos = view.me.get_pos()\n return ret\n\n def act_wrapper(self, view, msg):\n me = view.get_me()\n my_pos = (mx,my) = me.get_pos()\n if my_pos == self.last_pos:\n self.bumps += 1\n else:\n self.bumps = 0\n\n if self.x is None:\n self.x = random.randrange(view.energy_map.width) - me.x\n self.y = random.randrange(view.energy_map.height) - me.y\n # Attack anyone next to me, but first send out the distress message with my position\n for a in view.get_agents():\n if (a.get_team() != me.get_team()):\n msg.send_message((self.strain, MessageType.ATTACK, mx,my))\n return cells.Action(cells.ACT_ATTACK, a.get_pos())\n\n # Eat any energy I find until I am 'full'. The cost of eating\n # is 1, so don't eat just 1 energy.\n if view.get_energy().get(mx, my) > 1:\n if (me.energy <= 50):\n return cells.Action(cells.ACT_EAT)\n if (me.energy < self.defense and (random.random()>0.3)):\n return cells.Action(cells.ACT_EAT)\n\n\n # If there is a plant near by go to it and spawn all I can\n if self.my_plant is None :\n plants = view.get_plants()\n if plants :\n self.my_plant = plants[0]\n self.x = self.y = 0\n self.strain = self.my_plant.x * 41 + self.my_plant.y\n\n # Current rules don't make carrying around excess energy\n # worthwhile. Generates a very nice \"They eat their\n # wounded?!\" effect. Also burns extra energy so the enemy\n # can't use it.\n # Spawning takes 25 of the energy and gives it\n # to the child and reserves the other 25 for the child's death\n # drop. In addition, the action costs 1 unit. Therefore, we\n # can't create energy by spawning...\n if me.energy >= 51:\n spawn_x, spawn_y = self.smart_spawn(me, view)\n return cells.Action(cells.ACT_SPAWN,\n (me.x + spawn_x, me.y + spawn_y, self))\n\n # If I get the message of help go and rescue!\n if not self.step and not self.scout and random.random() > 0.1:\n ax = 0;\n ay = 0;\n best = 500;\n message_count = len(msg.get_messages());\n for m in msg.get_messages():\n (strain, type, ox,oy) = m\n if strain != self.strain:\n continue\n if (type == MessageType.ATTACK) :\n dist = max(abs(mx-ax), abs(my-ay))\n if dist < best:\n ax = ox\n ay = oy\n best = dist\n if ax and ay:\n self.defense = 200\n dir = ax-mx + (ay - my) * 1j\n r, theta = cmath.polar(dir)\n theta += 0.02 * random.random() - 0.5\n dir = cmath.rect(r, theta)\n self.x = dir.real\n self.y = dir.imag\n # if (message_count > 1) :\n # # Attack the base, not the front\n # agent_scale = 1 + random.random()\n # self.x *= agent_scale\n # self.y *= agent_scale\n # don't stand still once we get there\n if (self.x == 0 and self.y == 0) :\n self.x = random.randrange(-1, 2)\n self.y = random.randrange(-1, 2)\n self.step = random.randrange(20, 100);\n\n if self.bumps >= 2:\n self.x = random.randrange(-3,4)\n self.y = random.randrange(-3,4)\n self.bumps = 0\n\n\n # hit world wall\n map_size = view.energy_map.width\n if (mx == 0 or mx == map_size-1) :\n self.x = random.randrange(-1,2)\n if (my == 0 or my == map_size-1) :\n self.y = random.randrange(-1,2)\n\n # Back to step 0 we can change direction at the next attack.\n if self.step:\n self.step -= 1\n\n return cells.Action(cells.ACT_MOVE,(mx+self.x,my+self.y))\n", "id": "10601754", "language": "Python", "matching_score": 3.8706603050231934, "max_stars_count": 3, "path": "minds/benvolution.py" }, { "content": "'''\nDefines an agent mind that attacks any opponent agents within its view,\nattaches itself to the strongest plant it finds, eats when its hungry, \n'''\n\nimport random, cells\nimport math, numpy\n\nclass AgentType(object):\n\tQUEEN = 0\n\tWORKER = 1\n\tFIGHTER = 2\n\tBUILDER = 3\n\nclass MessageType(object):\n\tFOUND = 0\n\tDEFEND = 1\n\tCLAIM = 2\n\tCLAIMED = 3\n\ndef dist(a, b):\n\treturn int(math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2))\n\ndef length(xy):\n\treturn dist(xy, (0, 0))\n\ndef offset(i):\n\ti = i % 9\n\tx = 0\n\ty = 0\n\tif i < 3:\n\t\ty = -1\n\tif i > 5:\n\t\ty = 1\n\t\n\tif i == 0 or i == 5 or i == 6:\n\t\tx = -1\n\tif i == 2 or i == 3 or i == 8:\n\t\tx = 1\n\n\treturn (x, y)\n\ndef get_available_space_grid(view, agent):\n\tgrid = numpy.ones((3,3))\n\tfor a in view.get_agents():\n\t\tgrid[a.x - agent.x + 1, a.y - agent.y + 1] = 0\n\tfor plant in view.get_plants():\n\t\tgrid[plant.x - agent.x + 1, plant.y - agent.y + 1] = 0\n\tgrid[1,1] = 0\n\treturn grid\n\ndef spawnPos(i, type, view, agent):\n\tif type == AgentType.QUEEN:\n\t\told = offset(i)\n\t\treturn (-old[0], -old[1])\n\tgrid = get_available_space_grid(view, agent)\n\tfor x in range(3):\n\t\tfor y in range(3):\n\t\t\tif grid[x,y]:\n\t\t\t\treturn (x-1, y-1)\n\treturn (-1, -1)\n\nclass AgentMind(object):\n\n\tdef __init__(self, data):\n\t\tself.target_range = random.randrange(50, 1000)\n\n\t\tif data == None:\n\t\t\tself.type = AgentType.QUEEN\n\t\t\tself.ratios = (1,)\n\t\telse:\n\t\t\tself.type = data[0]\n\t\t\tself.ratios = (1, 1, 1, 2)\n\n\t\tif self.type == AgentType.QUEEN:\n\t\t\tself.plant = None\n\t\t\tself.claimed = False\n\t\t\tself.claiming = False\n\t\t\tself.position = 0\n\t\t\tself.count = 0\n\t\t\tself.directionOfAttack = None\n\t\t\tself.newborn = True\n\t\t\tself.age = 0\n\n\t\tif self.type == AgentType.WORKER:\n\t\t\tself.plantList = list()\n\t\t\tself.startPoint = data[1]\n\n\t\tif self.type == AgentType.BUILDER:\n\t\t\tself.radius = 10\n\t\t\tself.height = 4\n\t\t\tself.openings = 1\n\n\t\tself.skip = True\n\n\t\tif self.type == AgentType.FIGHTER and data[1]:\n\t\t\tself.direction = data[1]\n\t\telse:\n\t\t\tself.direction = (random.randrange(0, 300), random.randrange(0, 300))\n\n\tdef act(self, view, msg):\n\t\tagent = view.get_me()\n\t\tposition = (x, y)= agent.get_pos()\n\n\t\tif dist(self.direction, position) < 2:\n\t\t\tself.direction = (random.randrange(0, view.energy_map.width), random.randrange(0, view.energy_map.height))\n\n\t\t# Attack any opponents.\n\t\tfor a in view.get_agents():\n\t\t\tif a.get_team() != agent.get_team():\n\t\t\t\tif self.type == AgentType.QUEEN:\n\t\t\t\t\tmsg.send_message((MessageType.DEFEND, (x,y)))\n\t\t\t\t\tself.ratios = [0, 2, 2, 2]\n\t\t\t\telse:\n\t\t\t\t\tmsg.send_message((MessageType.FOUND, a.get_pos()))\n\t\t\t\treturn cells.Action(cells.ACT_ATTACK, a.get_pos())\n\n\t\t# Process messages\n\t\talreadyClaimed = 0\n\t\tdistance = 1000000\n\t\tfor message in msg.get_messages():\n\t\t\t# Queen message behavior\n\t\t\tif message[0] == MessageType.CLAIM and self.type == AgentType.QUEEN:\n\t\t\t\tif self.plant != None and self.plant.get_pos() == message[1]:\n\t\t\t\t\tif self.claimed:\n\t\t\t\t\t\tself.newborn = False\n\t\t\t\t\t\tmsg.send_message((MessageType.CLAIMED, message[1]))\n\t\t\tif message[0] == MessageType.CLAIMED and self.type == AgentType.QUEEN:\n\t\t\t\tif self.plant != None and self.plant.get_pos() == message[1]:\n\t\t\t\t\tif not self.claimed:\n\t\t\t\t\t\talreadyClaimed += 1\n\t\t\tif message[0] == MessageType.FOUND and self.type == AgentType.QUEEN:\n\t\t\t\tif dist(message[1], position) < distance:\n\t\t\t\t\tself.directionOfAttack = message[1]\n\t\t\t\t\tdistance = dist(message[1], position)\n\n\t\t\t# Worker message behavior\n\t\t\tif self.type == AgentType.WORKER:\n\t\t\t\tif message[0] == MessageType.CLAIM:\n\t\t\t\t\tfound = False\n\t\t\t\t\tfor p in self.plantList:\n\t\t\t\t\t\tif p == message[1]:\n\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif not found:\n\t\t\t\t\t\tself.plantList.append(message[1])\n\n\t\t\t\tif message[0] == MessageType.DEFEND or message[0] == MessageType.FOUND:\n\t\t\t\t\taDistance = dist(position, message[1])\n\t\t\t\t\tif aDistance < 20 and aDistance < distance:\n\t\t\t\t\t\tself.type = AgentType.FIGHTER\n\t\t\t\t\t\tself.direction = message[1]\n\t\t\t\t\t\tdistance = aDistance\n\n\t\t\t# Fighter message behavior\n\t\t\tif self.type == AgentType.FIGHTER:\n\t\t\t\tif message[0] == MessageType.DEFEND or message[0] == MessageType.FOUND:\n\t\t\t\t\tif distance > dist(position, message[1]):\n\t\t\t\t\t\tself.direction = message[1]\n\t\t\t\t\t\tdistance = dist(position, message[1])\n\n\t\tif self.type == AgentType.WORKER:\n\t\t\tif dist(position, self.startPoint) > 2:\n\t\t\t\tplants = view.get_plants()\n\t\t\t\tif plants:\n\t\t\t\t\tfound = False\n\t\t\t\t\tfor p in self.plantList:\n\t\t\t\t\t\tif p == plants[0].get_pos():\n\t\t\t\t\t\t\tfound = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif not found:\n\t\t\t\t\t\tself.type = AgentType.QUEEN\n\t\t\t\t\t\tself.ratios = (1,1,1,2)\n\t\t\t\t\t\tself.newborn = True\n\t\t\t\t\t\tself.plant = None\n\t\t\t\t\t\tself.claimed = False\n\t\t\t\t\t\tself.claiming = False\n\t\t\t\t\t\tself.position = 0\n\t\t\t\t\t\tself.count = 0\n\t\t\t\t\t\tself.directionOfAttack = None\n\t\t\t\t\t\tself.age = 0\n\t\t\t\t\t\tdel self.plantList\n\n\t\t\t# Eat if hungry.\n\t\t\thungry = (agent.energy < 50)\n\t\t\tenergy_here = view.get_energy().get(x, y)\n\t\t\tfood = (energy_here > 0)\n\t\t\tif hungry and food:\n\t\t\t\treturn cells.Action(cells.ACT_EAT)\n\n\t\t\tif agent.energy > 500:\n\t\t\t\tsp = spawnPos(0, AgentType.WORKER, view, agent)\n\t\t\t\tsp = (sp[0]+x, sp[1]+y, AgentType.WORKER, (x, y))\n\t\t\t\treturn cells.Action(cells.ACT_SPAWN, sp)\n\n\t\t\tif random.random() < 0.65:\n\t\t\t\tif random.random() < 0.4:\n\t\t\t\t\tif view.get_energy().get(x, y) > 0:\n\t\t\t\t\t\treturn cells.Action(cells.ACT_EAT)\n\n\t\t\t\tdirection = [self.direction[0]-x, self.direction[1]-y]\n\t\t\t\tif direction[0] > 0:\n\t\t\t\t\tdirection[0] = 1\n\t\t\t\telif direction[0] == 0:\n\t\t\t\t\tdirection[0] = 0\n\t\t\t\telse:\n\t\t\t\t\tdirection[0] = -1\n\n\t\t\t\tif direction[1] > 0:\n\t\t\t\t\tdirection[1] = 1\n\t\t\t\telif direction[1] == 0:\n\t\t\t\t\tdirection[1] = 0\n\t\t\t\telse:\n\t\t\t\t\tdirection[1] = -1\n\n\t\t\t\tposition = (position[0]+direction[0], position[1]+direction[1])\n\t\t\telse:\n\t\t\t\tposition = (x + random.randrange(-1, 2), y + random.randrange(-1, 2))\n\t\t\treturn cells.Action(cells.ACT_MOVE, position)\n\n\t\tif self.type == AgentType.FIGHTER:\n\t\t\t# Eat if hungry.\n\t\t\thungry = (agent.energy < 100)\n\t\t\tenergy_here = view.get_energy().get(x, y)\n\t\t\tfood = (energy_here > 0)\n\t\t\tif hungry and food:\n\t\t\t\treturn cells.Action(cells.ACT_EAT)\n\n\t\t\tif agent.energy > 1000:\n\t\t\t\tsp = spawnPos(0, AgentType.FIGHTER, view, agent)\n\t\t\t\tsp = (sp[0]+x, sp[1]+y, AgentType.FIGHTER, (x, y))\n\t\t\t\treturn cells.Action(cells.ACT_SPAWN, sp)\n\n\t\t\tif random.random() < 0.85 or dist(position, self.direction) < 8:\n\t\t\t\tdirection = [self.direction[0]-x, self.direction[1]-y]\n\t\t\t\tif direction[0] > 0:\n\t\t\t\t\tdirection[0] = 1\n\t\t\t\telif direction[0] == 0:\n\t\t\t\t\tdirection[0] = 0\n\t\t\t\telse:\n\t\t\t\t\tdirection[0] = -1\n\n\t\t\t\tif direction[1] > 0:\n\t\t\t\t\tdirection[1] = 1\n\t\t\t\telif direction[1] == 0:\n\t\t\t\t\tdirection[1] = 0\n\t\t\t\telse:\n\t\t\t\t\tdirection[1] = -1\n\n\t\t\t\tposition = (position[0]+direction[0], position[1]+direction[1])\n\t\t\telse:\n\t\t\t\tposition = (x + random.randrange(-1, 2), y + random.randrange(-1, 2))\n\t\t\treturn cells.Action(cells.ACT_MOVE, position)\n\n\n\t\t# Queen Stuff\n\t\tif self.type == AgentType.QUEEN:\n\t\t\t# Check claim\n\t\t\tif self.claiming:\n\t\t\t\tif self.skip:\n\t\t\t\t\tself.skip = False\n\t\t\t\telse:\n\t\t\t\t\tif alreadyClaimed > 39:\n\t\t\t\t\t\t# Try again\n\t\t\t\t\t\tself.plant = None\n\t\t\t\t\t\tself.claiming = False\n\t\t\t\t\telse:\n\t\t\t\t\t\t# We have a throne\n\t\t\t\t\t\tself.claimed = True\n\t\t\t\t\t\tself.claiming = False\n\t\t\t\t\t\tself.position = alreadyClaimed\n\t\t\t\t\t\tprint(alreadyClaimed)\n\t\t\t\t\tself.skip = True\n\n\t\t\t# Get a plant\n\t\t\tif self.plant == None and view.get_plants():\n\t\t\t\tself.age += 1\n\t\t\t\tif self.age > 5:\n\t\t\t\t\tself.type = AgentType.WORKER\n\t\t\t\t\tself.plantList = list()\n\n\t\t\t\tif view.get_plants():\n\t\t\t\t\tplants = view.get_plants()\n\t\t\t\t\tbestPlant = plants[0]\n\t\t\t\t\tdistance = dist(position, bestPlant.get_pos())\n\t\t\t\t\tfor plant in plants:\n\t\t\t\t\t\tif distance > dist(position, bestPlant.get_pos()):\n\t\t\t\t\t\t\tdistance = dist(position, bestPlant.get_pos())\n\t\t\t\t\t\t\tbestPlant = plant\n\t\t\t\t\t\t\n\t\t\t\t\tself.plant = bestPlant\n\t\t\t\t\tself.claiming = True\n\t\t\t\t\tmsg.send_message((MessageType.CLAIM, self.plant.get_pos()))\n\n\t\t\t# Check position\n\t\t\tif self.claimed == False and self.claiming == False:\n\t\t\t\t# Move randomly\n\t\t\t\tif random.random() > 0.75:\n\t\t\t\t\tdirection = [self.direction[0]-x, self.direction[1]-y]\n\t\t\t\t\tif direction[0] > 0:\n\t\t\t\t\t\tdirection[0] = 1\n\t\t\t\t\telif direction[0] == 0:\n\t\t\t\t\t\tdirection[0] = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tdirection[0] = -1\n\n\t\t\t\t\tif direction[1] > 0:\n\t\t\t\t\t\tdirection[1] = 1\n\t\t\t\t\telif direction[1] == 0:\n\t\t\t\t\t\tdirection[1] = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tdirection[1] = -1\n\n\t\t\t\t\tposition = (position[0]+direction[0], position[1]+direction[1])\n\t\t\t\telse:\n\t\t\t\t\tposition = (x + random.randrange(-1, 2), y + random.randrange(-1, 2))\n\t\t\t\treturn cells.Action(cells.ACT_MOVE, position)\n\n\t\t\tif self.claimed:\n\t\t\t\t# Move towards\n\t\t\t\toff = offset(self.position)\n\t\t\t\tpos = self.plant.get_pos()\n\t\t\t\tpos = (pos[0]+off[0], pos[1]+off[1])\n\t\t\t\tdistance = dist(pos, position)\n\t\t\t\t\n\t\t\t\tif distance > 0:\n\t\t\t\t\tif agent.energy > distance * 1.1:\n\t\t\t\t\t\tif random.random() > 0.6:\n\t\t\t\t\t\t\tpos = (x + random.randrange(-1, 2), y + random.randrange(-1, 2))\n\t\t\t\t\t\treturn cells.Action(cells.ACT_MOVE, pos)\n\t\t\t\t\telse:\n\t\t\t\t\t\t# Cannot move in one go eat if pos or move a bit\n\t\t\t\t\t\tif view.get_energy().get(x, y) > 0:\n\t\t\t\t\t\t\treturn cells.Action(cells.ACT_EAT)\n\t\t\t\t\t\tmxy = [0, 0]\n\t\t\t\t\t\tif self.plant.get_pos()[0] > x:\n\t\t\t\t\t\t\tmxy[0] = 1\n\t\t\t\t\t\telif self.plant.get_pos()[0] < x:\n\t\t\t\t\t\t\tmxy[0] = -1\n\t\t\t\t\t\tif self.plant.get_pos()[1] > y:\n\t\t\t\t\t\t\tmxy[1] = 1\n\t\t\t\t\t\telif self.plant.get_pos()[1] < y:\n\t\t\t\t\t\t\tmxy[1] = -1\n\n\t\t\t\t\t\tmxy = (mxy[0]+x, mxy[1]+y)\n\t\t\t\t\t\treturn cells.Action(cells.ACT_MOVE, mxy)\n\t\t\t\t\t\n\t\t\t# Breed or Eat\n\t\t\tnxt = self.ratios[self.count%len(self.ratios)]\n\t\t\tspawn = [x, y, nxt]\n\t\t\tspawning = False\n\n\t\t\tif self.newborn and agent.energy > 100:\n\t\t\t\tspawn = [x, y, AgentType.QUEEN]\n\t\t\t\tspawnOff = spawnPos(self.position, AgentType.QUEEN, view, agent)\n\t\t\t\tspawning = True\n\t\t\tif nxt == AgentType.QUEEN and agent.energy > 100:\n\t\t\t\t# Spawn new queen\n\t\t\t\tspawnOff = spawnPos(self.position, nxt, view, agent)\n\t\t\t\tspawning = True\n\t\t\tif nxt == AgentType.WORKER and agent.energy > 100:\n\t\t\t\t# Spawn new worker\n\t\t\t\tspawnOff = spawnPos(self.position, nxt, view, agent)\n\t\t\t\tspawn.append(position)\n\t\t\t\tspawning = True\n\t\t\tif nxt == AgentType.FIGHTER and agent.energy > 100:\n\t\t\t\t# Spawn new fighter\n\t\t\t\tspawnOff = spawnPos(self.position, nxt, view, agent)\n\t\t\t\tspawn.append(self.directionOfAttack)\n\t\t\t\tspawning = True\n\t\t\tif nxt == AgentType.BUILDER and agent.energy > 100:\n\t\t\t\t# Spawn new builder\n\t\t\t\tspawnOff = spawnPos(self.position, nxt, view, agent)\n\t\t\t\tspawning = True\n\n\t\t\tif spawning:\n\t\t\t\tspawn[0] += spawnOff[0]\n\t\t\t\tspawn[1] += spawnOff[1]\n\t\t\t\tself.count = self.count + 1\n\t\t\t\treturn cells.Action(cells.ACT_SPAWN, spawn)\n\n\t\t\t# Must eat\n\t\t\treturn cells.Action(cells.ACT_EAT)\n\t\t\t\n\n\t\tif random.random() > 0.75:\n\t\t\tdirection = (self.direction[0]-x, self.direction[1]-y)\n\t\t\tif direction[0] > 0:\n\t\t\t\tdirection[0] = 1\n\t\t\telif direction[0] == 0:\n\t\t\t\tdirection[0] = 0\n\t\t\telse:\n\t\t\t\tdirection[0] = -1\n\n\t\t\tif direction[1] > 0:\n\t\t\t\tdirection[1] = 1\n\t\t\telif direction[1] == 0:\n\t\t\t\tdirection[1] = 0\n\t\t\telse:\n\t\t\t\tdirection[1] = -1\n\n\t\t\tposition = (position[0]+direction[0], position[1]+direction[1])\n\t\telse:\n\t\t\tposition = (x + random.randrange(-1, 2), y + random.randrange(-1, 2))\n\t\treturn cells.Action(cells.ACT_MOVE, position)\n", "id": "8623437", "language": "Python", "matching_score": 2.960306167602539, "max_stars_count": 3, "path": "minds/seken.py" }, { "content": "import random, cells\n\n\nclass AgentMind(object):\n def __init__(self, junk):\n self.my_plant = None\n self.mode = 1\n self.target_range = random.randrange(50,200)\n\n def act(self,view,msg):\n x_sum = 0\n y_sum = 0\n dir = 1\n n = len(view.get_plants())\n me = view.get_me()\n mp = (mx,my)= me.get_pos()\n for a in view.get_agents():\n if (a.get_team()!=me.get_team()):\n return cells.Action(cells.ACT_ATTACK,a.get_pos())\n\n if self.my_plant:\n for m in msg.get_messages():\n if random.random() > 0.6:\n self.mode = 5\n (tx,ty) = m\n self.target = (tx+random.randrange(-3,4),ty+random.randrange(-3,4))\n\n if(n>0):\n if (not self.my_plant):\n self.my_plant = view.get_plants()[0]\n elif self.my_plant.get_eff()<view.get_plants()[0].get_eff():\n self.my_plant = view.get_plants()[0]\n\n if self.mode == 5:\n dist = max(abs(mx-self.target[0]),abs(my-self.target[1]))\n self.target_range = max(dist,self.target_range)\n if me.energy > dist*1.5:\n self.mode = 6\n\n if self.mode == 6:\n dist = max(abs(mx-self.target[0]),abs(my-self.target[1]))\n if dist > 4:\n return cells.Action(cells.ACT_MOVE,self.target)\n else:\n self.my_plant = None\n self.mode = 0\n\n if (me.energy < self.target_range) and (view.get_energy().get(mx, my) > 0):\n return cells.Action(cells.ACT_EAT)\n\n if self.my_plant:\n dist = max(abs(mx-self.my_plant.get_pos()[0]),abs(my-self.my_plant.get_pos()[1]))\n if me.energy < dist*1.5:\n (mx,my) = self.my_plant.get_pos()\n return cells.Action(cells.ACT_MOVE,(mx+random.randrange(-1,2),my+random.randrange(-1,2)))\n if (random.random()>0.9999):\n (mx,my) = self.my_plant.get_pos()\n msg.send_message((my,mx))\n\n if (random.random()>0.9):\n return cells.Action(cells.ACT_SPAWN,(mx+random.randrange(-1,2),my+random.randrange(-1,2)))\n else:\n return cells.Action(cells.ACT_MOVE,(mx+random.randrange(-1,2),my+random.randrange(-1,2)))\n", "id": "9201075", "language": "Python", "matching_score": 1.4827215671539307, "max_stars_count": 3, "path": "minds/mind2.py" }, { "content": "class WorldView(object):\n def __init__(self, me, agent_views, plant_views, terr_map, energy_map):\n self.agent_views = agent_views\n self.plant_views = plant_views\n self.energy_map = energy_map\n self.terr_map = terr_map\n self.me = me\n\n def get_me(self):\n return self.me\n\n def get_agents(self):\n return self.agent_views\n\n def get_plants(self):\n return self.plant_views\n\n def get_terr(self):\n return self.terr_map\n \n def get_energy(self):\n return self.energy_map\n", "id": "11145470", "language": "Python", "matching_score": 1.7264550924301147, "max_stars_count": 3, "path": "worldview.py" }, { "content": "#!/usr/bin/env python\n\nimport configparser\nimport random\nimport sys\nimport time\n\nimport numpy\nimport pygame, pygame.locals\n\nfrom agent import Agent, AgentView\nfrom constants import *\nfrom terrain.generator import terrain_generator\nfrom worldview import WorldView\n\nif not pygame.font: print('Warning, fonts disabled')\n\ntry:\n import psyco\n psyco.full()\nexcept ImportError:\n pass\n\n\ndef get_mind(name):\n full_name = 'minds.' + name\n __import__(full_name)\n mind = sys.modules[full_name]\n mind.name = name\n return mind\n\nTIMEOUT = None\n\nconfig = configparser.RawConfigParser()\n\n\ndef get_next_move(old_x, old_y, x, y):\n ''' Takes the current position, old_x and old_y, and a desired future position, x and y,\n and returns the position (x,y) resulting from a unit move toward the future position.'''\n dx = numpy.sign(x - old_x)\n dy = numpy.sign(y - old_y)\n return (old_x + dx, old_y + dy)\n\n\nclass Game(object):\n ''' Represents a game between different minds. '''\n def __init__(self, bounds, mind_list, symmetric, max_time, headless = False):\n self.size = self.width, self.height = (bounds, bounds)\n self.mind_list = mind_list\n self.messages = [MessageQueue() for x in mind_list]\n self.headless = headless\n if not self.headless:\n self.disp = Display(self.size, scale=2)\n self.time = 0\n self.clock = pygame.time.Clock()\n self.max_time = max_time\n self.tic = time.time()\n self.terr = ScalarMapLayer(self.size)\n self.terr.set_perlin(10, symmetric)\n self.minds = [m[1].AgentMind for m in mind_list]\n\n self.show_energy = True\n self.show_agents = True\n\n self.energy_map = ScalarMapLayer(self.size)\n self.energy_map.set_streak(SCATTERED_ENERGY, symmetric)\n\n self.plant_map = ObjectMapLayer(self.size)\n self.plant_population = []\n\n self.agent_map = ObjectMapLayer(self.size)\n self.agent_population = []\n self.winner = None\n if symmetric:\n self.n_plants = 7\n else:\n self.n_plants = 14\n \n # Add some randomly placed plants to the map. \n for x in range(self.n_plants):\n mx = random.randrange(1, self.width - 1)\n my = random.randrange(1, self.height - 1)\n eff = random.randrange(PLANT_MIN_OUTPUT, PLANT_MAX_OUTPUT)\n p = Plant(mx, my, eff)\n self.plant_population.append(p)\n if symmetric:\n p = Plant(my, mx, eff)\n self.plant_population.append(p)\n self.plant_map.lock()\n self.plant_map.insert(self.plant_population)\n self.plant_map.unlock()\n\n # Create an agent for each mind and place on map at a different plant.\n self.agent_map.lock()\n for idx in range(len(self.minds)):\n # BUG: Number of minds could exceed number of plants?\n (mx, my) = self.plant_population[idx].get_pos()\n fuzzed_x = mx\n fuzzed_y = my\n while fuzzed_x == mx and fuzzed_y == my:\n fuzzed_x = mx + random.randrange(-1, 2)\n fuzzed_y = my + random.randrange(-1, 2)\n self.agent_population.append(Agent(fuzzed_x, fuzzed_y, STARTING_ENERGY, idx,\n self.minds[idx], None))\n self.agent_map.insert(self.agent_population)\n self.agent_map.unlock()\n\n def run_plants(self):\n ''' Increases energy at and around (adjacent position) for each plant.\n Increase in energy is equal to the eff(?) value of each the plant.'''\n for p in self.plant_population:\n (x, y) = p.get_pos()\n for dx in (-1, 0, 1):\n for dy in (-1, 0, 1):\n adj_x = x + dx\n adj_y = y + dy\n if self.energy_map.in_range(adj_x, adj_y):\n self.energy_map.change(adj_x, adj_y, p.get_eff())\n\n\n def add_agent(self, a):\n ''' Adds an agent to the game. '''\n self.agent_population.append(a)\n self.agent_map.set(a.x, a.y, a)\n\n def del_agent(self, a):\n ''' Kills the agent (if not already dead), removes them from the game and\n drops any load they were carrying in there previously occupied position. '''\n self.agent_population.remove(a)\n self.agent_map.set(a.x, a.y, None)\n a.alive = False\n if a.loaded:\n a.loaded = False\n self.terr.change(a.x, a.y, 1)\n\n def move_agent(self, a, x, y):\n ''' Moves agent, a, to new position (x,y) unless difference in terrain levels between\n its current position and new position is greater than 4.'''\n if abs(self.terr.get(x, y)-self.terr.get(a.x, a.y)) <= 4:\n self.agent_map.set(a.x, a.y, None)\n self.agent_map.set(x, y, a)\n a.x = x\n a.y = y\n\n def collect_agent_actions(self):\n agent_map_get_small_view_fast = self.agent_map.get_small_view_fast\n plant_map_get_small_view_fast = self.plant_map.get_small_view_fast\n energy_map = self.energy_map\n terr_map = self.terr\n views = ((a, WorldView(a, agent_map_get_small_view_fast(a.x, a.y), plant_map_get_small_view_fast(a.x, a.y), terr_map, energy_map)) for a in self.agent_population)\n messages = self.messages\n return [(a, a.act(v, messages[a.team])) for (a, v) in views]\n\n def run_agents_core(self):\n actions = self.collect_agent_actions()\n # Create a list containing the action for each agent, where each agent\n # determines its actions based on its view of the world and messages \n # from its team.\n actions_dict = dict(actions)\n random.shuffle(actions)\n\n self.agent_map.lock()\n # Apply the action for each agent - in doing so agent uses up 1 energy unit.\n for (agent, action) in actions:\n #This is the cost of mere survival\n agent.energy -= SUSTAIN_COST\n\n if action.type == ACT_MOVE: # Changes position of agent.\n act_x, act_y = action.get_data()\n (new_x, new_y) = get_next_move(agent.x, agent.y,\n act_x, act_y)\n # Move to the new position if it is in range and it's not \n #currently occupied by another agent.\n if (self.agent_map.in_range(new_x, new_y) and\n not self.agent_map.get(new_x, new_y)):\n self.move_agent(agent, new_x, new_y)\n agent.energy -= MOVE_COST\n elif action.type == ACT_SPAWN: # Creates new agents and uses additional 50 energy units.\n act_x, act_y = action.get_data()[:2]\n (new_x, new_y) = get_next_move(agent.x, agent.y,\n act_x, act_y)\n if (self.agent_map.in_range(new_x, new_y) and\n not self.agent_map.get(new_x, new_y) and\n agent.energy >= SPAWN_TOTAL_ENERGY):\n agent.energy -= SPAWN_TOTAL_ENERGY\n agent.energy //= 2\n a = Agent(new_x, new_y, agent.energy, agent.get_team(),\n self.minds[agent.get_team()],\n action.get_data()[2:])\n self.add_agent(a)\n elif action.type == ACT_EAT:\n #Eat only as much as possible.\n intake = min(self.energy_map.get(agent.x, agent.y),\n ENERGY_CAP - agent.energy)\n agent.energy += intake\n self.energy_map.change(agent.x, agent.y, -intake)\n elif action.type == ACT_RELEASE:\n #Dump some energy onto an adjacent field\n #No Seppuku\n output = action.get_data()[2]\n output = min(agent.energy - 1, output) \n act_x, act_y = action.get_data()[:2]\n #Use get_next_move to simplyfy things if you know \n #where the energy is supposed to end up.\n (out_x, out_y) = get_next_move(agent.x, agent.y,\n act_x, act_y)\n if (self.agent_map.in_range(out_x, out_y) and\n agent.energy >= 1):\n agent.energy -= output\n self.energy_map.change(out_x, out_y, output)\n elif action.type == ACT_ATTACK:\n #Make sure agent is attacking an adjacent field.\n act_x, act_y = act_data = action.get_data()\n next_pos = get_next_move(agent.x, agent.y, act_x, act_y)\n new_x, new_y = next_pos\n victim = self.agent_map.get(act_x, act_y)\n terr_delta = (self.terr.get(agent.x, agent.y) \n - self.terr.get(act_x, act_y))\n if (victim is not None and victim.alive and\n next_pos == act_data):\n #If both agents attack each other, both loose double energy\n #Think twice before attacking \n try:\n contested = (actions_dict[victim].type == ACT_ATTACK)\n except:\n contested = False\n agent.attack(victim, terr_delta, contested)\n if contested:\n victim.attack(agent, -terr_delta, True)\n \n elif action.type == ACT_LIFT:\n if not agent.loaded and self.terr.get(agent.x, agent.y) > 0:\n agent.loaded = True\n self.terr.change(agent.x, agent.y, -1)\n \n elif action.type == ACT_DROP:\n if agent.loaded:\n agent.loaded = False\n self.terr.change(agent.x, agent.y, 1)\n\n # Kill all agents with negative energy.\n team = [0 for n in self.minds]\n for agent in self.agent_population:\n if agent.energy < 0 and agent.alive:\n self.energy_map.change(agent.x, agent.y, BODY_ENERGY)\n self.del_agent(agent)\n else:\n team[agent.team] += 1\n # Team wins (and game ends) if opposition team has 0 agents remaining.\n # Draw if time exceeds time limit.\n winner = 0\n alive = 0\n for t in team:\n if t != 0:\n alive += 1\n else:\n if alive == 0:\n winner += 1\n \n return (alive, winner)\n\n def run_agents(self):\n alive, winner = self.run_agents_core()\n self.agent_map.unlock() \n if alive == 1:\n colors = [\"red\", \"white\", \"purple\", \"yellow\"]\n print(\"Winner is %s (%s) in %s\" % (self.mind_list[winner][1].name, \n colors[winner], str(self.time)))\n self.winner = winner\n \n if alive == 0 or (self.max_time > 0 and self.time > self.max_time):\n print(\"It's a draw!\")\n self.winner = -1\n \n def tick(self):\n if not self.headless:\n # Space starts new game\n # q or close button will quit the game\n for event in pygame.event.get():\n if event.type == pygame.locals.KEYUP:\n if event.key == pygame.locals.K_SPACE:\n self.winner = -1\n elif event.key == pygame.locals.K_q:\n sys.exit()\n elif event.key == pygame.locals.K_e:\n self.show_energy = not self.show_energy\n elif event.key == pygame.locals.K_a:\n self.show_agents = not self.show_agents\n elif event.type == pygame.locals.MOUSEBUTTONUP:\n if event.button == 1:\n print(self.agent_map.get(event.pos[0]//2,\n event.pos[1]//2))\n elif event.type == pygame.QUIT:\n sys.exit()\n self.disp.update(self.terr, self.agent_population,\n self.plant_population, self.agent_map,\n self.plant_map, self.energy_map, self.time,\n len(self.minds), self.show_energy,\n self.show_agents, int(self.clock.get_fps()))\n \n # test for spacebar pressed - if yes, restart\n for event in pygame.event.get(pygame.locals.KEYUP):\n if event.key == pygame.locals.K_SPACE:\n self.winner = -1\n if pygame.event.get(pygame.locals.QUIT):\n sys.exit()\n pygame.event.pump()\n self.disp.flip()\n\n self.run_agents()\n self.run_plants()\n for msg in self.messages:\n msg.update()\n self.time += 1\n self.tic = time.time()\n self.clock.tick()\n if self.time % 100 == 0:\n print('FPS: %f' % self.clock.get_fps())\n\n\nclass MapLayer(object):\n def __init__(self, size, val=0, valtype=numpy.object_):\n self.size = self.width, self.height = size\n self.values = numpy.empty(size, valtype)\n self.values.fill(val)\n\n def get(self, x, y):\n if y >= 0 and x >= 0:\n try:\n return self.values[x, y]\n except IndexError:\n return None\n return None\n\n def set(self, x, y, val):\n self.values[x, y] = val\n\n def in_range(self, x, y):\n return (0 <= x < self.width and 0 <= y < self.height)\n\n\nclass ScalarMapLayer(MapLayer):\n def set_random(self, range, symmetric = True):\n self.values = terrain_generator().create_random(self.size, range, \n symmetric)\n\n def set_streak(self, range, symmetric = True):\n self.values = terrain_generator().create_streak(self.size, range,\n symmetric)\n\n def set_simple(self, range, symmetric = True):\n self.values = terrain_generator().create_simple(self.size, range,\n symmetric)\n \n def set_perlin(self, range, symmetric = True):\n self.values = terrain_generator().create_perlin(self.size, range,\n symmetric)\n\n\n def change(self, x, y, val):\n self.values[x, y] += val\n\n\nclass ObjectMapLayer(MapLayer):\n def __init__(self, size):\n MapLayer.__init__(self, size, None, numpy.object_)\n self.surf = pygame.Surface(size)\n self.surf.set_colorkey((0,0,0))\n self.surf.fill((0,0,0))\n self.pixels = None\n# self.pixels = pygame.PixelArray(self.surf)\n\n def lock(self):\n self.pixels = pygame.surfarray.pixels2d(self.surf)\n\n def unlock(self):\n self.pixels = None\n\n def get_small_view_fast(self, x, y):\n unwanted = self.values[x,y]\n return [z.get_view() for z in self.values[x-1:x+2, y-1:y+2].flat\n if z is not None and z is not unwanted]\n\n def get_view(self, x, y, r):\n ret = []\n for x_off in range(-r, r + 1):\n for y_off in range(-r, r + 1):\n if x_off == 0 and y_off == 0:\n continue\n a = self.get(x + x_off, y + y_off)\n if a is not None:\n ret.append(a.get_view())\n return ret\n\n def insert(self, list):\n for o in list:\n self.set(o.x, o.y, o)\n\n def set(self, x, y, val):\n MapLayer.set(self, x, y, val)\n if val is None:\n self.pixels[x][y] = 0\n# self.surf.set_at((x, y), 0)\n else:\n self.pixels[x][y] = val.color\n# self.surf.set_at((x, y), val.color)\n\n\n# Use Cython accelerators if available.\n# Otherwise, don't bother folks about it.\ntry:\n import cells_helpers\n import types\n ObjectMapLayer.get_small_view_fast = cells_helpers.get_small_view_fast\n get_next_move = cells_helpers.get_next_move\n Game.collect_agent_actions = cells_helpers.collect_agent_actions\n Game.run_agents_core = cells_helpers.run_agents_core\nexcept ImportError:\n pass\n\n# Actions available to an agent on each turn.\nACT_SPAWN, ACT_MOVE, ACT_EAT, ACT_RELEASE, ACT_ATTACK, ACT_LIFT, ACT_DROP = list(range(7))\n\nclass Action(object):\n '''\n A class for passing an action around.\n '''\n def __init__(self, action_type, data=None):\n self.type = action_type\n self.data = data\n\n def get_data(self):\n return self.data\n\n def get_type(self):\n return self.type\n\n\nclass PlantView(object):\n def __init__(self, p):\n self.x = p.x\n self.y = p.y\n self.eff = p.get_eff()\n\n def get_pos(self):\n return (self.x, self.y)\n\n def get_eff(self):\n return self.eff\n\n\nclass Display(object):\n black = (0, 0, 0)\n red = (255, 0, 0)\n green = (0, 255, 0)\n yellow = (255, 255, 0)\n\n def __init__(self, size, scale=2):\n self.width, self.height = size\n self.scale = scale\n self.size = (self.width * scale, self.height * scale)\n pygame.init()\n self.screen = pygame.display.set_mode(self.size)\n self.surface = self.screen\n pygame.display.set_caption(\"Cells\")\n\n self.background = pygame.Surface(self.screen.get_size())\n self.background = self.background.convert()\n self.background.fill((150,150,150))\n\n self.text = []\n\n if pygame.font:\n def show_text(self, text, color, topleft):\n font = pygame.font.Font(None, 24)\n text = font.render(text, 1, color)\n textpos = text.get_rect()\n textpos.topleft = topleft\n self.text.append((text, textpos))\n else:\n def show_text(self, text, color, topleft):\n pass\n\n def update(self, terr, pop, plants, agent_map, plant_map, energy_map,\n ticks, nteams, show_energy, show_agents, fps):\n # Slower version:\n # img = ((numpy.minimum(150, 20 * terr.values) << 16) +\n # ((numpy.minimum(150, 10 * terr.values + 10.energy_map.values)) << 8))\n \n r = 20 * terr.values\n numpy.clip(r, a_min=None, a_max=150, out=r)\n r <<= 16\n\n img = r\n\n# g = numpy.minimum(150, 10 * terr.values + 10 * energy_map.values)\n if show_energy:\n g = terr.values + energy_map.values\n g *= 10\n numpy.clip(g, a_min=None, a_max=150, out=g)\n g <<= 8\n img += g\n\n img_surf = pygame.Surface((self.width, self.height))\n pygame.surfarray.blit_array(img_surf, img)\n if show_agents:\n img_surf.blit(agent_map.surf, (0,0))\n img_surf.blit(plant_map.surf, (0,0))\n\n pygame.transform.scale(img_surf,\n self.size, self.screen)\n if not ticks % 60:\n #todo: find out how many teams are playing\n team_pop = [0] * nteams\n\n for team in range(nteams):\n team_pop[team] = sum(1 for a in pop if a.team == team)\n\n self.text = []\n drawTop = 0\n for t in range(nteams):\n drawTop += 20\n self.show_text(str(team_pop[t]), TEAM_COLORS[t], (10, drawTop))\n self.show_text(f\"FPS: {fps}\", (255, 255, 255), (10, drawTop + 20))\n\n for text, textpos in self.text:\n self.surface.blit(text, textpos)\n\n def flip(self):\n pygame.display.flip()\n\n\nclass Plant(object):\n color = 0x00FF00\n \n def __init__(self, x, y, eff):\n self.x = x\n self.y = y\n self.eff = eff\n\n def get_pos(self):\n return (self.x, self.y)\n\n def get_eff(self):\n return self.eff\n\n def get_view(self):\n return PlantView(self)\n\n\nclass MessageQueue(object):\n def __init__(self):\n self.__inlist = []\n self.__outlist = []\n\n def update(self):\n self.__outlist = self.__inlist\n self.__inlist = []\n\n def send_message(self, m):\n self.__inlist.append(m)\n\n def get_messages(self):\n return self.__outlist\n\n\nclass Message(object):\n def __init__(self, message):\n self.message = message\n def get_message(self):\n return self.message\n\n\ndef main():\n global bounds, symmetric, mind_list\n \n try:\n config.read('default.cfg')\n bounds = config.getint('terrain', 'bounds')\n symmetric = config.getboolean('terrain', 'symmetric')\n minds_str = str(config.get('minds', 'minds'))\n except Exception as e:\n print('Got error: %s' % e)\n config.add_section('minds')\n config.set('minds', 'minds', 'mind1,mind2')\n config.add_section('terrain')\n config.set('terrain', 'bounds', '300')\n config.set('terrain', 'symmetric', 'true')\n\n with open('default.cfg', 'w') as configfile:\n config.write(configfile)\n\n config.read('default.cfg')\n bounds = config.getint('terrain', 'bounds')\n symmetric = config.getboolean('terrain', 'symmetric')\n minds_str = str(config.get('minds', 'minds'))\n mind_list = [(n, get_mind(n)) for n in minds_str.split(',')]\n\n # accept command line arguments for the minds over those in the config\n try:\n if len(sys.argv)>2:\n mind_list = [(n,get_mind(n)) for n in sys.argv[1:] ]\n except (ImportError, IndexError):\n pass\n\n\nif __name__ == \"__main__\":\n main()\n while True:\n game = Game(bounds, mind_list, symmetric, -1)\n while game.winner is None:\n game.tick()\n", "id": "5496449", "language": "Python", "matching_score": 4.886949062347412, "max_stars_count": 3, "path": "cells.py" }, { "content": "STARTING_ENERGY = 20\nSCATTERED_ENERGY = 10 \n\n#Plant energy output. Remember, this should always be less\n#than ATTACK_POWER, because otherwise cells sitting on the plant edge\n#might become invincible.\nPLANT_MAX_OUTPUT = 20\nPLANT_MIN_OUTPUT = 5\n\n#BODY_ENERGY is the amount of energy that a cells body contains\n#It can not be accessed by the cells, think of it as: they can't\n#eat their own body. It is released again at death.\nBODY_ENERGY = 25\nATTACK_POWER = 30\n#Amount by which attack power is modified for each 1 height difference.\nATTACK_TERR_CHANGE = 2\nENERGY_CAP = 2500\n\n#SPAWN_COST is the energy it takes to seperate two cells from each other.\n#It is lost forever, not to be confused with the BODY_ENERGY of the new cell.\nSPAWN_LOST_ENERGY = 20\nSUSTAIN_COST = 0\nMOVE_COST = 1 \n#MESSAGE_COST = 0 \n\n#BODY_ENERGY + SPAWN_COST is invested to create a new cell. What remains is split evenly.\n#With this model we only need to make sure a cell can't commit suicide by spawning.\nSPAWN_TOTAL_ENERGY = BODY_ENERGY + SPAWN_LOST_ENERGY\n\nTEAM_COLORS = [(255, 0, 0), (255, 255, 255), (255, 0, 255), (255, 255, 0)]\nTEAM_COLORS_FAST = [0xFF0000, 0xFFFFFF, 0xFF00FF, 0xFFFF00]\n", "id": "7119426", "language": "Python", "matching_score": 2.773688554763794, "max_stars_count": 3, "path": "constants.py" }, { "content": "from constants import ATTACK_POWER, ATTACK_TERR_CHANGE, TEAM_COLORS_FAST\nclass Agent(object):\n __slots__ = ['x', 'y', 'mind', 'energy', 'alive', 'team', 'loaded', 'color',\n 'act']\n def __init__(self, x, y, energy, team, AgentMind, cargs):\n self.x = x\n self.y = y\n self.mind = AgentMind(cargs)\n self.energy = energy\n self.alive = True\n self.team = team\n self.loaded = False\n self.color = TEAM_COLORS_FAST[team % len(TEAM_COLORS_FAST)]\n self.act = self.mind.act\n def __str__(self):\n return \"Agent from team %i, energy %i\" % (self.team,self.energy)\n def attack(self, other, offset = 0, contested = False):\n if not other:\n return False\n max_power = ATTACK_POWER + ATTACK_TERR_CHANGE * offset\n if contested:\n other.energy -= min(self.energy, max_power)\n else:\n other.energy -= max_power\n return other.energy <= 0\n\n def get_team(self):\n return self.team\n\n def get_pos(self):\n return (self.x, self.y)\n\n def set_pos(self, x, y):\n self.x = x\n self.y = y\n\n def get_view(self):\n return AgentView(self)\n\n\nclass AgentView(object):\n def __init__(self, agent):\n (self.x, self.y) = agent.get_pos()\n self.team = agent.get_team()\n\n def get_pos(self):\n return (self.x, self.y)\n\n def get_team(self):\n return self.team\n", "id": "9166108", "language": "Python", "matching_score": 0.9128682613372803, "max_stars_count": 3, "path": "agent.py" }, { "content": "#!/usr/bin/env python\n\nimport sys\nimport configparser\nfrom cells import Game\n\nconfig = configparser.RawConfigParser()\n\ndef get_mind(name):\n full_name = 'minds.' + name\n __import__(full_name)\n mind = sys.modules[full_name]\n mind.name = name\n return mind\n\nbounds = None # HACK\nsymmetric = None\nmind_list = None\n\ndef main():\n global bounds, symmetric, mind_list\n try:\n config.read('tournament.cfg')\n bounds = config.getint('terrain', 'bounds')\n symmetric = config.getboolean('terrain', 'symmetric')\n minds_str = str(config.get('minds', 'minds'))\n\n except Exception as e:\n print('Got error: %s' % e)\n config.add_section('minds')\n config.set('minds', 'minds', 'mind1,mind2')\n config.add_section('terrain')\n config.set('terrain', 'bounds', '300')\n config.set('terrain', 'symmetric', 'true')\n\n with open('tournament.cfg', 'wb') as configfile:\n config.write(configfile)\n\n config.read('tournament.cfg')\n bounds = config.getint('terrain', 'bounds')\n symmetric = config.getboolean('terrain', 'symmetric')\n minds_str = str(config.get('minds', 'minds'))\n mind_list = [(n, get_mind(n)) for n in minds_str.split(',')]\n\n # accept command line arguments for the minds over those in the config\n try:\n if len(sys.argv)>2:\n mind_list = [(n,get_mind(n)) for n in sys.argv[1:] ]\n except (ImportError, IndexError):\n pass\n\n\nif __name__ == \"__main__\":\n main()\n scores = [0 for x in mind_list]\n tournament_list = [[mind_list[a], mind_list[b]] for a in range(len(mind_list)) for b in range (a)]\n for n in range(4):\n for pair in tournament_list:\n game = Game(bounds, pair, symmetric, 5000, headless = True)\n while game.winner == None:\n game.tick()\n if game.winner >= 0:\n idx = mind_list.index(pair[game.winner])\n scores[idx] += 3\n if game.winner == -1:\n idx = mind_list.index(pair[0])\n scores[idx] += 1\n idx = mind_list.index(pair[1])\n scores[idx] += 1\n print(scores)\n print([m[0] for m in mind_list])\n names = [m[0] for m in mind_list]\n name_score = list(zip(names,scores))\n f = open(\"scores.csv\",'w')\n srt = sorted(name_score,key=lambda ns: -ns[1])\n for x in srt:\n f.write(\"%s;%s\\n\" %(x[0],str(x[1])))\n f.close()\n", "id": "8691600", "language": "Python", "matching_score": 0.5519241094589233, "max_stars_count": 3, "path": "tournament.py" }, { "content": "import numpy\nimport random\nimport math\n\nclass terrain_generator():\n def create_random(self, size, range, symmetric=False):\n \"\"\"Creates a random terrain map\"\"\"\n ret = numpy.random.random_integers(0, range, size)\n\n if symmetric:\n ret = self.make_symmetric(ret)\n return ret\n\n def create_streak(self, size, range_, symmetric=False):\n \"\"\"Creates a terrain map containing streaks that run from north-west to south-east\n\n Starts with a single point [[a]] and converts it into [[a, b], [c, d]]\n where:\n b = a + (random change)\n c = a + (random change)\n d = b + (random change) and c + (random change)\n\n Repeat untill size matches required size\"\"\"\n add_random_range = self.add_random_range\n\n # Creates the top row\n ret = [[add_random_range(0, 0, range_)]]\n for x in range(size[0] - 1):\n pos_west = ret[0][-1]\n if pos_west <= 0:\n ret[0].append(add_random_range(pos_west, 0, 1))\n elif pos_west >= range_:\n ret[0].append(add_random_range(pos_west, -1, 0))\n else:\n ret[0].append(add_random_range(pos_west, -1, 1))\n\n # Create the next row down\n for y in range(size[1] - 1):\n pos_north = ret[-1][0]\n if pos_north <= 0:\n next_row = [add_random_range(pos_north, 0, 1)]\n elif pos_north >= range_:\n next_row = [add_random_range(pos_north,-1, 0)]\n else:\n next_row = [add_random_range(pos_north, -1, 1)]\n\n for x in range(size[0] - 1):\n pos_north = ret[-1][x+1]\n pos_west = next_row[-1]\n if pos_west == pos_north:\n if pos_west <= 0:\n next_row.append(add_random_range(pos_west, 0, 1))\n elif pos_west >= range_:\n next_row.append(add_random_range(pos_west, -1, 0))\n else:\n next_row.append(add_random_range(pos_west, -1, 1))\n elif abs(pos_west - pos_north) == 2:\n next_row.append((pos_west + pos_north) // 2)\n else:\n next_row.append(random.choice((pos_west, pos_north)))\n ret.append(next_row)\n\n if symmetric:\n ret = self.make_symmetric(ret)\n return numpy.array(ret)\n\n def create_simple(self, size, range_, symmetric=False):\n \"\"\"Creates a procedural terrain map\n\n Starts with corner points [[a, b], [c, d]] and converts it into [[a, e, b], [f, g, h], [c, i, d]]\n where:\n e = (a+b)//2 + (random change)\n f = (a+c)//2 + (random change)\n g = (a+b+c+d)//4 + (random change)\n h = (b+d)//2 + (random change)\n i = (c+d)//2 + (random change)\n\n Repeat untill size is greater than required and truncate\"\"\"\n add_random_range = self.add_random_range\n\n ret = [[add_random_range(0, 0, range_), add_random_range(0, 0, range_)], [add_random_range(0, 0, range_), add_random_range(0, 0, range_)]]\n\n while len(ret) <= size[0]:\n new_ret = []\n\n for key_x, x in enumerate(ret):\n new_ret.append(x)\n\n if key_x != len(ret) - 1:\n next_row = []\n for key_y, pos_south in enumerate(x):\n pos_north = ret[key_x+1][key_y]\n pos_avg = (pos_north + pos_south) // 2\n if pos_avg <= 0:\n next_row.append(add_random_range(pos_avg, 0, 1))\n elif pos_avg >= range_:\n next_row.append(add_random_range(pos_avg, -1, 0))\n else:\n next_row.append(add_random_range(pos_avg, -1, 1))\n new_ret.append(next_row)\n ret = new_ret\n\n new_ret = []\n for key_x, x in enumerate(ret):\n next_row = [x[0]]\n for key_y, pos_east in enumerate(x[1:]):\n pos_west = next_row[-1]\n if key_x % 2 and not key_y % 2:\n pos_north = ret[key_x-1][key_y+1]\n pos_south = ret[key_x+1][key_y+1]\n pos_avg = (pos_north + pos_south + pos_east + pos_west) // 4\n if pos_avg <= 0:\n next_row.append(add_random_range(pos_avg, 0, 1))\n elif pos_avg >= range_:\n next_row.append(add_random_range(pos_avg, -1, 0))\n else:\n next_row.append(add_random_range(pos_avg, -1, 1))\n else:\n pos_avg = (pos_east + pos_west) // 2\n if pos_avg <= 0:\n next_row.append(add_random_range(pos_avg, 0, 1))\n elif pos_avg >= range:\n next_row.append(a_dd_random_range(pos_avg, -1, 0))\n else:\n next_row.append(add_random_range(pos_avg, -1, 1))\n next_row.append(pos_east)\n new_ret.append(next_row)\n ret = new_ret\n\n ret = [x[:size[0]] for x in ret][:size[0]]\n\n if symmetric:\n ret = self.make_symmetric(ret)\n return numpy.array(ret)\n \n def create_perlin(self, size, roughness, symmetric = False):\n (width, height) = size\n values = numpy.zeros(size, dtype=float)\n noise = numpy.random.random_sample((width+1, height+1))\n octaves = (256, 8, 2)\n def interpolate_noise(x, y, mx, my):\n x = int(x)\n y = int(y)\n p1 = noise[x][y]\n p2 = noise[x][y+1]\n p3 = noise[x+1][y]\n p4 = noise[x+1][y+1]\n\n top = p1 * (1 - mx) + p2 * mx\n bottom = p3 * (1 - mx) + p4 * mx\n return top * (1 - my) + bottom * my\n interpolate_noise_vec = numpy.vectorize(interpolate_noise)\n multiplier = 0.5\n for i in octaves:\n octave_values = numpy.indices(size, dtype=float)\n octave_offsets = numpy.indices(size, dtype=float)\n octave_values //= i\n octave_offsets %= i\n octave_offsets /= i\n p1 = noise[:-1, :-1]\n p2 = noise[:-1, 1:]\n p3 = noise[1:, :-1]\n p4 = noise[1:, 1:]\n mx = octave_offsets[0]\n my = octave_offsets[1]\n for mat in (p1, p2, p3, p4, mx, my):\n assert mat.shape == size\n top = p1 * (1 - mx) + p2 * mx\n bottom = p3 * (1 - mx) + p4 * mx\n interpolated = top * (1 - my) + bottom * my\n values += interpolated * multiplier\n multiplier *= 0.5\n\n values *= roughness\n\n if symmetric:\n values = numpy.tril(values) + numpy.triu(values.T, 1)\n return numpy.array(values, dtype=int)\n \n #Some helper functions.\n def interpolate(self, p1, p2, p3, p4, x, y):\n top = self.interpolate1d(p1, p2, x)\n bottom = self.interpolate1d(p3, p4, x)\n return self.interpolate1d(top, bottom, y)\n \n def interpolate1d(self, p1, p2, mu):\n return p1*(1-mu)+p2*mu\n\n def add_random_range(self, x, rand_min, rand_max):\n \"\"\"Returns a number that is between x + rand_min and x + rand_max (inclusive)\"\"\"\n return x + random.randrange(rand_min, rand_max + 1)\n\n def make_symmetric(self, ret):\n \"\"\"Takes a 2-dimentional list and makes it symmetrical about the north-west / south-east axis\"\"\"\n for x in range(len(ret)):\n for y in range(x):\n ret[x][y] = ret[y][x]\n\n return ret\n", "id": "3670401", "language": "Python", "matching_score": 1.1102294921875, "max_stars_count": 3, "path": "terrain/generator.py" }, { "content": "import unittest\nimport numpy\nimport torch\nimport nestedtensor as NT\nfrom numbers import Number\nfrom math import inf\nfrom collections import OrderedDict\n\nstring_classes = (str, bytes)\n\ndef is_iterable(obj):\n try:\n iter(obj)\n return True\n except TypeError:\n return False\n\n# NOTE: Methods copy pasted from https://github.com/pytorch/pytorch/blob/4314620ba05bc1867f6a63455c4ac77fdfb1018d/test/common_utils.py#L773\nclass TestCaseBase(unittest.TestCase):\n longMessage = True\n precision = 1e-5\n\n def safeCoalesce(self, t):\n tc = t.coalesce()\n self.assertEqual(tc.to_dense(), t.to_dense())\n self.assertTrue(tc.is_coalesced())\n\n # Our code below doesn't work when nnz is 0, because\n # then it's a 0D tensor, not a 2D tensor.\n if t._nnz() == 0:\n self.assertEqual(t._indices(), tc._indices())\n self.assertEqual(t._values(), tc._values())\n return tc\n\n value_map = {}\n for idx, val in zip(t._indices().t(), t._values()):\n idx_tup = tuple(idx.tolist())\n if idx_tup in value_map:\n value_map[idx_tup] += val\n else:\n value_map[idx_tup] = val.clone() if isinstance(val, torch.Tensor) else val\n\n new_indices = sorted(list(value_map.keys()))\n new_values = [value_map[idx] for idx in new_indices]\n if t._values().ndimension() < 2:\n new_values = t._values().new(new_values)\n else:\n new_values = torch.stack(new_values)\n\n new_indices = t._indices().new(new_indices).t()\n tg = t.new(new_indices, new_values, t.size())\n\n self.assertEqual(tc._indices(), tg._indices())\n self.assertEqual(tc._values(), tg._values())\n\n if t.is_coalesced():\n self.assertEqual(tc._indices(), t._indices())\n self.assertEqual(tc._values(), t._values())\n\n return tg\n\n def assertEqual(self, x, y, prec=None, message='', allow_inf=False):\n if isinstance(prec, str) and message == '':\n message = prec\n prec = None\n if prec is None:\n prec = self.precision\n\n if isinstance(x, torch.Tensor) and isinstance(y, Number):\n self.assertEqual(x.item(), y, prec=prec, message=message,\n allow_inf=allow_inf)\n elif isinstance(y, torch.Tensor) and isinstance(x, Number):\n self.assertEqual(x, y.item(), prec=prec, message=message,\n allow_inf=allow_inf)\n elif isinstance(x, torch.Tensor) and isinstance(y, numpy.bool_):\n self.assertEqual(x.item(), y, prec=prec, message=message,\n allow_inf=allow_inf)\n elif isinstance(y, torch.Tensor) and isinstance(x, numpy.bool_):\n self.assertEqual(x, y.item(), prec=prec, message=message,\n allow_inf=allow_inf)\n elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):\n def assertTensorsEqual(a, b):\n super(TestCaseBase, self).assertEqual(a.size(), b.size(), message)\n if a.numel() > 0:\n if (a.device.type == 'cpu' and (a.dtype == torch.float16 or a.dtype == torch.bfloat16)):\n # CPU half and bfloat16 tensors don't have the methods we need below\n a = a.to(torch.float32)\n if (a.device.type == 'cuda' and a.dtype == torch.bfloat16):\n # CUDA bfloat16 tensors don't have the methods we need below\n a = a.to(torch.float32)\n b = b.to(a)\n\n if (a.dtype == torch.bool) != (b.dtype == torch.bool):\n raise TypeError(\"Was expecting both tensors to be bool type.\")\n else:\n if a.dtype == torch.bool and b.dtype == torch.bool:\n # we want to respect precision but as bool doesn't support subtraction,\n # boolean tensor has to be converted to int\n a = a.to(torch.int)\n b = b.to(torch.int)\n\n diff = a - b\n if a.is_floating_point():\n # check that NaNs are in the same locations\n nan_mask = torch.isnan(a)\n self.assertTrue(torch.equal(nan_mask, torch.isnan(b)), message)\n diff[nan_mask] = 0\n # inf check if allow_inf=True\n if allow_inf:\n inf_mask = torch.isinf(a)\n inf_sign = inf_mask.sign()\n self.assertTrue(torch.equal(inf_sign, torch.isinf(b).sign()), message)\n diff[inf_mask] = 0\n # TODO: implement abs on CharTensor (int8)\n if diff.is_signed() and diff.dtype != torch.int8:\n diff = diff.abs()\n max_err = diff.max()\n self.assertLessEqual(max_err, prec, message)\n super(TestCaseBase, self).assertEqual(x.is_sparse, y.is_sparse, message)\n super(TestCaseBase, self).assertEqual(x.is_quantized, y.is_quantized, message)\n if x.is_sparse:\n x = self.safeCoalesce(x)\n y = self.safeCoalesce(y)\n assertTensorsEqual(x._indices(), y._indices())\n assertTensorsEqual(x._values(), y._values())\n elif x.is_quantized and y.is_quantized:\n self.assertEqual(x.qscheme(), y.qscheme(), prec=prec,\n message=message, allow_inf=allow_inf)\n if x.qscheme() == torch.per_tensor_affine:\n self.assertEqual(x.q_scale(), y.q_scale(), prec=prec,\n message=message, allow_inf=allow_inf)\n self.assertEqual(x.q_zero_point(), y.q_zero_point(),\n prec=prec, message=message,\n allow_inf=allow_inf)\n elif x.qscheme() == torch.per_channel_affine:\n self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), prec=prec,\n message=message, allow_inf=allow_inf)\n self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),\n prec=prec, message=message,\n allow_inf=allow_inf)\n self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),\n prec=prec, message=message)\n self.assertEqual(x.dtype, y.dtype)\n self.assertEqual(x.int_repr().to(torch.int32),\n y.int_repr().to(torch.int32), prec=prec,\n message=message, allow_inf=allow_inf)\n else:\n assertTensorsEqual(x, y)\n elif isinstance(x, string_classes) and isinstance(y, string_classes):\n super(TestCaseBase, self).assertEqual(x, y, message)\n elif type(x) == set and type(y) == set:\n super(TestCaseBase, self).assertEqual(x, y, message)\n elif isinstance(x, dict) and isinstance(y, dict):\n if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):\n self.assertEqual(x.items(), y.items(), prec=prec,\n message=message, allow_inf=allow_inf)\n else:\n self.assertEqual(set(x.keys()), set(y.keys()), prec=prec,\n message=message, allow_inf=allow_inf)\n key_list = list(x.keys())\n self.assertEqual([x[k] for k in key_list],\n [y[k] for k in key_list],\n prec=prec, message=message,\n allow_inf=allow_inf)\n elif is_iterable(x) and is_iterable(y):\n super(TestCaseBase, self).assertEqual(len(x), len(y), message)\n for x_, y_ in zip(x, y):\n self.assertEqual(x_, y_, prec=prec, message=message,\n allow_inf=allow_inf)\n elif isinstance(x, bool) and isinstance(y, bool):\n super(TestCaseBase, self).assertEqual(x, y, message)\n elif isinstance(x, Number) and isinstance(y, Number):\n if abs(x) == inf or abs(y) == inf:\n if allow_inf:\n super(TestCaseBase, self).assertEqual(x, y, message)\n else:\n self.fail(\"Expected finite numeric values - x={}, y={}\".format(x, y))\n return\n super(TestCaseBase, self).assertLessEqual(abs(x - y), prec, message)\n else:\n super(TestCaseBase, self).assertEqual(x, y, message)\n\n def assertAlmostEqual(self, x, y, places=None, msg=None, delta=None, allow_inf=None):\n prec = delta\n if places:\n prec = 10**(-places)\n self.assertEqual(x, y, prec, msg, allow_inf)\n\n def assertNotEqual(self, x, y, prec=None, message=''):\n if isinstance(prec, str) and message == '':\n message = prec\n prec = None\n if prec is None:\n prec = self.precision\n\n if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):\n if x.size() != y.size():\n super(TestCaseBase, self).assertNotEqual(x.size(), y.size())\n self.assertGreater(x.numel(), 0)\n y = y.type_as(x)\n y = y.cuda(device=x.get_device()) if x.is_cuda else y.cpu()\n nan_mask = x != x\n if torch.equal(nan_mask, y != y):\n diff = x - y\n if diff.is_signed():\n diff = diff.abs()\n diff[nan_mask] = 0\n # Use `item()` to work around:\n # https://github.com/pytorch/pytorch/issues/22301\n max_err = diff.max().item()\n self.assertGreaterEqual(max_err, prec, message)\n elif type(x) == str and type(y) == str:\n super(TestCaseBase, self).assertNotEqual(x, y)\n elif is_iterable(x) and is_iterable(y):\n super(TestCaseBase, self).assertNotEqual(x, y)\n else:\n try:\n self.assertGreaterEqual(abs(x - y), prec, message)\n return\n except (TypeError, AssertionError):\n pass\n super(TestCaseBase, self).assertNotEqual(x, y, message)\n\nclass TestCase(TestCaseBase):\n # ToDo: remove ignore_contiguity flag. We should not use it.\n def assertAlmostEqual(self, x, y, places=None, msg=None, delta=None, allow_inf=None, ignore_contiguity=False):\n prec = delta\n if places:\n prec = 10**(-places)\n self.assertEqual(x, y, prec, msg, allow_inf, ignore_contiguity)\n\n def assertEqual(self, x, y, prec=None, message='', allow_inf=False, ignore_contiguity=False):\n if not isinstance(x, NT.NestedTensor) and not isinstance(y, NT.NestedTensor):\n super().assertEqual(x, y, prec, message, allow_inf)\n elif not isinstance(x, NT.NestedTensor) or not isinstance(y, NT.NestedTensor):\n raise TypeError(\"Comparing a nested tensor to a non nested tensor\")\n else:\n if x.dim() != y.dim():\n self.fail(\"Nested tensors dimensionality don't match. {} != {}\".format(x.dim(), y.dim()))\n\n if x.nested_dim() != y.nested_dim():\n self.fail(\"Nested tensors nested dimensionality don't match. {} != {}\".format(x.nested_dim(), y.nested_dim()))\n\n if x.tensor_dim() != y.tensor_dim():\n self.fail(\"Nested tensors dimentionality don't match. {} != {}\".format(x.tensor_dim(), y.tensor_dim()))\n\n if x.is_pinned() != y.is_pinned():\n self.fail(\"Nested tensors pinned memmory values don't match. {} != {}\".format(x.is_pinned(), y.is_pinned()))\n\n if x.layout != y.layout:\n self.fail(\"Nested tensors layouts don't match. {} != {}\".format(x.layout, y.layout))\n\n if x.dtype != y.dtype:\n self.fail(\"Nested tensors dtypes don't match. {} != {}\".format(x.dtype, y.dtype))\n\n if x.device != y.device:\n self.fail(\"Nested tensors devices don't match. {} != {}\".format(x.device, y.device))\n\n if x.requires_grad != y.requires_grad:\n self.fail(\"Nested tensors requires grad properties don't match. {} != {}\".format(x.requires_grad, y.requires_grad))\n\n # uncomment once nested_tensor([]).is_contiguous() == nested_tensor([], dtype=torch.float).is_contiguous()\n #if not ignore_contiguity and x.is_contiguous() != y.is_contiguous():\n # self.fail(\"Nested tensors contiguity don't match. {} != {}\".format(x.is_contiguous(), y.is_contiguous()))\n\n if x.element_size() != y.element_size():\n self.fail(\"Nested tensors element sizes don't match. {} != {}\".format(x.element_size(), y.element_size()))\n\n if x.size() != y.size():\n self.fail(\"Nested tensors sizes don't match. {} != {}\".format(x.size(), y.size()))\n\n if x.nested_size() != y.nested_size():\n print(x.nested_size())\n print(y.nested_size())\n self.fail(\"Nested tensors nested sizes don't match. {} != {}\".format(x.nested_size(), y.nested_size()))\n\n # If you ignore contiguity you should also ignore the striding\n if not ignore_contiguity and x.nested_stride() != y.nested_stride():\n self.fail(\"Nested tensors nested strides don't match. {} != {}\".format(x.nested_stride(), y.nested_stride()))\n\n for x_, y_ in zip(x, y):\n self.assertEqual(x_, y_, prec=prec, message=message,\n allow_inf=allow_inf, ignore_contiguity=ignore_contiguity)\n", "id": "3440725", "language": "Python", "matching_score": 2.8994381427764893, "max_stars_count": 229, "path": "test/utils_test_case.py" }, { "content": "import torch\nimport nestedtensor as nt\nimport unittest\nfrom utils_test_case import TestCase\n\n\nclass TestTensorMask(TestCase):\n #\n # Group of tests to test to_tensor_mask()\n #\n def test_empty_nt(self):\n a = nt.nested_tensor([])\n tensor, mask = a.to_tensor_mask()\n\n TestCase.assertEqual(self, mask, torch.tensor(False))\n TestCase.assertEqual(self, tensor, torch.tensor([0]))\n\n # TODO once .to_list() bug fixed\n def test_empty_tensor(self):\n a = nt.nested_tensor([\n torch.tensor([])\n ])\n self.assertRaisesRegex(RuntimeError,\n \"Empty tensors are not yet supported.\",\n lambda: a.to_tensor_mask())\n\n def test_single_scalar(self):\n a = nt.nested_tensor([\n torch.tensor(1, dtype=torch.uint8)\n ])\n tensor, mask = a.to_tensor_mask()\n TestCase.assertEqual(\n self, tensor, torch.tensor([1], dtype=torch.uint8))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=0)\n TestCase.assertEqual(\n self, tensor, torch.tensor([1], dtype=torch.uint8))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=1)\n TestCase.assertEqual(\n self, tensor, torch.tensor([1], dtype=torch.uint8))\n TestCase.assertEqual(self, mask, torch.tensor([True]))\n\n self.assertRaisesRegex(\n RuntimeError,\n \"Requested mask dimension 2 is bigger than dimension 1 of given NestedTensor.\",\n lambda: a.to_tensor_mask(mask_dim=2))\n\n # TODO once .to_list() bug fixed\n @unittest.skip(\"Currently only supporting nested dim 1.\")\n def test_multi_scalar(self):\n # TODO: add test cases\n a = nt.nested_tensor([\n torch.tensor(1),\n torch.tensor(2),\n torch.tensor(3)\n ])\n tensor, mask = a.to_tensor_mask()\n\n TestCase.assertEqual(self, tensor, torch.tensor([[1, 2, 3]]))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=1)\n TestCase.assertEqual(self, tensor, torch.tensor([[1, 2, 3]]))\n TestCase.assertEqual(self, mask, torch.tensor([True]))\n\n tensor, mask = a.to_tensor_mask(mask_dim=2)\n TestCase.assertEqual(self, tensor, torch.tensor([[1, 2, 3]]))\n TestCase.assertEqual(self, mask, torch.tensor([[True, True, True]]))\n\n self.assertRaisesRegex(\n RuntimeError,\n \"Requested mask dimension 3 is bigger than dimension 2 of given NestedTensor.\",\n lambda: a.to_tensor_mask(mask_dim=3))\n\n def test_single_tensor(self):\n a = nt.nested_tensor([\n torch.tensor([1])\n ])\n tensor, mask = a.to_tensor_mask()\n TestCase.assertEqual(self, tensor, torch.tensor([[1]]))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=0)\n TestCase.assertEqual(self, tensor, torch.tensor([[1]]))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=1)\n TestCase.assertEqual(self, tensor, torch.tensor([[1]]))\n TestCase.assertEqual(self, mask, torch.tensor([True]))\n\n tensor, mask = a.to_tensor_mask(mask_dim=2)\n TestCase.assertEqual(self, tensor, torch.tensor([[1]]))\n TestCase.assertEqual(self, mask, torch.tensor([[True]]))\n\n self.assertRaisesRegex(\n RuntimeError,\n \"Requested mask dimension 3 is bigger than dimension 2 of given NestedTensor.\",\n lambda: a.to_tensor_mask(mask_dim=3))\n\n def test_multi_tensor(self):\n a = nt.nested_tensor([\n torch.tensor([1]),\n torch.tensor([2]),\n torch.tensor([3])\n ])\n tensor, mask = a.to_tensor_mask()\n TestCase.assertEqual(self, tensor, torch.tensor([[1],\n [2],\n [3]]))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=0)\n TestCase.assertEqual(self, tensor, torch.tensor([[1],\n [2],\n [3]]))\n TestCase.assertEqual(self, mask, torch.tensor(True))\n\n tensor, mask = a.to_tensor_mask(mask_dim=1)\n TestCase.assertEqual(self, tensor, torch.tensor([[1],\n [2],\n [3]]))\n TestCase.assertEqual(self, mask, torch.tensor([True, True, True]))\n\n tensor, mask = a.to_tensor_mask(mask_dim=2)\n TestCase.assertEqual(self, tensor, torch.tensor([[1],\n [2],\n [3]]))\n TestCase.assertEqual(\n self, mask, torch.tensor([[True], [True], [True]]))\n\n @torch.inference_mode()\n def test_mask_dim_too_small_error(self):\n a = nt.nested_tensor([\n torch.tensor([1, 2, ]),\n torch.tensor([3, 4, 5, 6]),\n ])\n\n self.assertRaisesRegex(\n RuntimeError, \"Mask dimension is too small to represent data tensor.\", lambda: a.to_tensor_mask(mask_dim=1))\n #\n # Group of tests to test nested_tensor_from_tensor_mask()\n #\n def test_ntftm_nested_dim_0_error(self):\n tensor = torch.tensor([])\n self.assertRaisesRegex(RuntimeError, \"Nested dimension can't be 0.\",\n lambda: nt.nested_tensor_from_tensor_mask(tensor, tensor, nested_dim=0))\n\n def test_ntftm_none_passed(self):\n self.assertRaises(\n RuntimeError, lambda: nt.nested_tensor_from_tensor_mask(None, None))\n self.assertRaises(RuntimeError, lambda: nt.nested_tensor_from_tensor_mask(\n torch.tensor([]), None))\n\n @torch.inference_mode()\n def test_ntftm_empty(self):\n tensor = torch.tensor([])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, tensor)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n TestCase.assertEqual(self, res_nt.nested_dim(), 1)\n\n res_nt = nt.nested_tensor_from_tensor_mask(\n tensor, tensor, nested_dim=1)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n TestCase.assertEqual(self, res_nt.nested_dim(), 1)\n\n self.assertRaises(RuntimeError, lambda: nt.nested_tensor_from_tensor_mask(\n tensor, tensor, nested_dim=2))\n\n def test_ntftm_empty2(self):\n tensor = torch.tensor([[], []])\n\n expected_nt1 = nt.nested_tensor([\n torch.tensor([]),\n torch.tensor([]),\n ])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, tensor)\n TestCase.assertEqual(self, res_nt, expected_nt1)\n\n res_nt = nt.nested_tensor_from_tensor_mask(\n tensor, tensor, nested_dim=1)\n TestCase.assertEqual(self, res_nt, expected_nt1)\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, tensor)\n TestCase.assertEqual(self, res_nt, expected_nt1)\n\n res_nt = nt.nested_tensor_from_tensor_mask(\n tensor, tensor, nested_dim=1)\n TestCase.assertEqual(self, res_nt, expected_nt1)\n\n def test_ntftm_empty3(self):\n tensor = torch.tensor([0])\n mask = torch.tensor(False)\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n\n tensor = torch.tensor([[0], [0]])\n mask = torch.tensor([[False], [False]])\n\n def test_ntftm_empty_error(self):\n tensor = torch.tensor([])\n mask = torch.tensor([True])\n self.assertRaisesRegex(RuntimeError,\n \"Data tensor can't be emtpy if a mask has values.\",\n lambda: nt.nested_tensor_from_tensor_mask(tensor, mask))\n\n tensor = torch.tensor([1])\n mask = torch.tensor([])\n self.assertRaisesRegex(RuntimeError,\n \"Mask tensor can't be emtpy if a data tensor has values.\",\n lambda: nt.nested_tensor_from_tensor_mask(tensor, mask))\n\n def test_ntftm_single_scalar_mask_false(self):\n scalar = torch.tensor([1], dtype=torch.uint8)\n mask = torch.tensor(False)\n\n res_nt = nt.nested_tensor_from_tensor_mask(scalar, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n\n def test_ntftm_single_scalar_error(self):\n tensor = torch.tensor(1)\n mask = torch.tensor(True)\n self.assertRaisesRegex(RuntimeError, \"Can't construct nested tensor from a scalar.\",\n lambda: nt.nested_tensor_from_tensor_mask(tensor, mask))\n\n def test_ntftm_single_scalar(self):\n tensor = torch.tensor([1], dtype=torch.float)\n mask = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([torch.tensor(1)]))\n\n mask = torch.tensor([True])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([torch.tensor(1)]))\n\n # Extra dim\n tensor = torch.tensor([[1]], dtype=torch.float)\n mask = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.tensor([1])\n ]))\n\n def test_ntftm_multi_scalars(self):\n tensor = torch.tensor([1, 2, 3])\n mask = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.tensor(1),\n torch.tensor(2),\n torch.tensor(3)\n ], dtype=torch.int64))\n\n mask = torch.tensor([True])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.tensor(1),\n torch.tensor(2),\n torch.tensor(3)\n ], dtype=torch.int64))\n\n self.assertRaises(RuntimeError, lambda: nt.nested_tensor_from_tensor_mask(\n tensor, mask, nested_dim=2))\n\n # Extra dim\n tensor = torch.tensor([[1, 2, 3]])\n mask = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.tensor([1, 2, 3])\n ], dtype=torch.int64))\n\n def test_ntftm_single_tensor_all_true_mask(self):\n tensor = torch.tensor([[1]], dtype=torch.float)\n mask = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(\n self, res_nt, nt.nested_tensor([torch.tensor([1])]))\n\n mask = torch.tensor([True])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(\n self, res_nt, nt.nested_tensor([torch.tensor([1])]))\n\n def test_ntftm_multi_tensor_scalar_true_mask(self):\n tensor = torch.tensor([[1], [2], [3]])\n mask = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.tensor([1]),\n torch.tensor([2]),\n torch.tensor([3])\n ], dtype=tensor.dtype))\n\n # Extra dim\n tensor = torch.tensor([[[1]], [[2]], [[3]]])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n expected_res1 = nt.nested_tensor([\n torch.tensor([[1]]),\n torch.tensor([[2]]),\n torch.tensor([[3]])\n ], dtype=tensor.dtype)\n TestCase.assertEqual(self, res_nt, expected_res1)\n\n def test_ntftm_multi_tensor_true_mask(self):\n extected_nt1 = nt.nested_tensor([\n torch.tensor([[1]]),\n torch.tensor([[2]]),\n torch.tensor([[3]])\n ])\n\n tensor = torch.tensor([[[1]],\n [[2]],\n [[3]]], dtype=torch.float)\n\n # Mask dim 3\n mask3 = torch.tensor([[[True]],\n [[True]],\n [[True]]])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask3)\n TestCase.assertEqual(self, extected_nt1, res_nt)\n\n # Mask dim 2\n mask2 = torch.tensor([[True],\n [True],\n [True]])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask2)\n TestCase.assertEqual(self, extected_nt1, res_nt)\n\n # Mask dim 1\n mask1 = torch.tensor([True, True, True])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask1)\n TestCase.assertEqual(self, extected_nt1, res_nt)\n\n # Mask dim 0\n mask0 = torch.tensor(True)\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask0)\n TestCase.assertEqual(self, extected_nt1, res_nt)\n\n def test_ntftm_single_tensor_all_false_mask(self):\n tensor = torch.tensor([[1]])\n mask = torch.tensor([False])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n\n tensor = torch.tensor([[1, 2, 3]])\n mask = torch.tensor([False])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n\n def test_ntftm_multi_tensor_all_false_mask(self):\n tensor = torch.tensor([[[1], [2], [3]]])\n mask = torch.tensor([False])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n\n mask = torch.tensor([False, False, False])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt, nt.nested_tensor([]))\n\n mask = torch.tensor([[False], [False], [False]])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.tensor([], dtype=tensor.dtype)\n ], dtype=torch.int64))\n\n def test_ntftm_multi_tensor_all_false_mask2(self):\n tensor = torch.tensor([[[1], [2], [3]]])\n mask = torch.tensor([[[False], [False], [False]]])\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, res_nt,\n nt.nested_tensor([\n torch.empty((3, 0), dtype=tensor.dtype)\n ], dtype=tensor.dtype))\n\n def test_ntgtm_multi_scalar_mix_mask(self):\n tensor = torch.tensor([1, 2, 3, 4], dtype=torch.float)\n mask = torch.tensor([True, False, False, True])\n expected_nt = nt.nested_tensor([\n torch.tensor(1),\n torch.tensor(4)\n ])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, expected_nt, res_nt)\n\n def test_ntgtm_multi_tensor_mix_mask(self):\n tensor = torch.tensor([[1], [2], [3], [4]], dtype=torch.float)\n mask = torch.tensor([True, False, False, True])\n expected_nt = nt.nested_tensor([\n torch.tensor([1]),\n torch.tensor([4])\n ])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, expected_nt, res_nt)\n\n def test_ntgtm_scalar_with_empty_mix_mask(self):\n tensor = torch.tensor([[0], [11]], dtype=torch.float)\n mask = torch.tensor([False, True])\n\n expected_nt1 = nt.nested_tensor([\n torch.tensor([11], dtype=torch.long)\n ])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask)\n TestCase.assertEqual(self, expected_nt1, res_nt)\n\n def test_ntftm_test_multi_tensor_mix_mask(self):\n expected_nt1 = nt.nested_tensor([\n torch.tensor([1, 2, 3]),\n torch.tensor([4])\n ])\n\n tensor = torch.tensor([[1, 2, 3],\n [4, 0, 0]], dtype=torch.float)\n mask = torch.tensor([[True, True, True],\n [True, False, False]])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask, nested_dim=1)\n TestCase.assertEqual(self, expected_nt1, res_nt)\n\n def test_ntftm_test_multi_tensor_mix_mask2(self):\n expected_nt1 = nt.nested_tensor([\n torch.tensor([[1, 2, 3]]),\n torch.tensor([[4]])\n ])\n\n tensor = torch.tensor([[[1, 2, 3]],\n [[4, 0, 0]]], dtype=torch.float)\n mask = torch.tensor([[[True, True, True]],\n [[True, False, False]]])\n\n res_nt = nt.nested_tensor_from_tensor_mask(tensor, mask, nested_dim=1)\n TestCase.assertEqual(self, expected_nt1, res_nt)\n\n self.assertRaises(RuntimeError, lambda: nt.nested_tensor_from_tensor_mask(\n tensor, mask, nested_dim=4))\n\n def test_to_padded_tensor(self):\n data1 = torch.tensor(\n [[[0.8413, 0.7325, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.0000, 0.0000],\n [0.0000, 0.0000, 0.0000, 0.0000]],\n\n [[0.6334, 0.5473, 0.3273, 0.0564],\n [0.3023, 0.6826, 0.3519, 0.1804],\n [0.8431, 0.1645, 0.1821, 0.9185]]])\n mask1 = torch.tensor(\n [[[True, True, False, False],\n [False, False, False, False],\n [False, False, False, False]],\n\n [[True, True, True, True],\n [True, True, True, True],\n [True, True, True, True]]])\n nt2 = nt.nested_tensor_from_tensor_mask(data1, mask1)\n data2, mask2 = nt2.to_tensor_mask()\n self.assertEqual(data1, data2)\n self.assertEqual(mask1, mask2)\n data3 = nt2.to_padded_tensor(padding=-10)\n data1 = data1 + ~mask1 * -10\n self.assertEqual(data1, data3)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "10264014", "language": "Python", "matching_score": 2.8422458171844482, "max_stars_count": 229, "path": "test/test_nested_tensor_masking.py" }, { "content": "import torch\nimport torch.nn.functional as F\nimport numbers\nimport collections\n\nfrom . import creation\nimport nestedtensor\n\nTensorMask = collections.namedtuple('TensorMask', 'tensor mask')\n\n\ndef nested_tensor_from_padded_tensor(tensor, nested_dim=1, padding=-1):\n mask = (tensor != padding)\n return nested_tensor_from_tensor_mask(tensor, mask, nested_dim)\n\n\n# Constructs nested tensor from passed tensor and mask.\ndef nested_tensor_from_tensor_mask(tensor, mask, nested_dim=1):\n if tensor is None:\n raise RuntimeError(\"Tensor can't be undefined (None).\")\n\n if mask is None:\n raise RuntimeError(\"Mask can't be undefined (None).\")\n\n # Scalar was passed\n if tensor.dim() == 0:\n raise RuntimeError(\"Can't construct nested tensor from a scalar.\")\n\n if nested_dim == 0:\n raise RuntimeError(\"Nested dimension can't be 0.\")\n\n if nested_dim is not None and nested_dim > tensor.dim():\n raise RuntimeError(\"Nested dimension ({0}) can't be bigger than data tensor dimension ({1}).\".format(\n nested_dim, tensor.dim()))\n\n if tensor.numel() == 0 and mask.numel() != 0:\n raise RuntimeError(\"Data tensor can't be emtpy if a mask has values.\")\n\n if tensor.numel() != 0 and mask.numel() == 0:\n raise RuntimeError(\n \"Mask tensor can't be emtpy if a data tensor has values.\")\n\n return nt_from_tensor_mask(tensor, mask, nested_dim)\n\n\ndef nt_from_tensor_mask(tensor, mask, nested_dim):\n result = torch.ops.nestedtensor.nt_from_tensor_mask(\n tensor, mask, nested_dim)\n assert result is not None\n return nestedtensor.NestedTensor(result).contiguous()\n", "id": "11514471", "language": "Python", "matching_score": 1.8003119230270386, "max_stars_count": 229, "path": "nestedtensor/nested/masking.py" }, { "content": "import torch\n\nfrom .nested.creation import as_nested_tensor\nfrom .nested.creation import nested_tensor\n\nfrom .nested.masking import nested_tensor_from_tensor_mask\nfrom .nested.masking import nested_tensor_from_padded_tensor\n\nfrom .nested.nested import NestedTensor\nfrom .nested.nested import to_nested_tensor\nfrom .nested.nested import transpose_nchw_nhwc\nfrom .nested.nested import transpose_nhwc_nchw\n\nfrom .nested.fuser import fuse_conv_bn\nfrom .nested.fuser import fuse_conv_relu\nfrom .nested.fuser import fuse_conv_add_relu\n\nfrom . import nested\n\nfrom . import _C\n\nfrom . import nn\n", "id": "12355686", "language": "Python", "matching_score": 0.40416938066482544, "max_stars_count": 229, "path": "nestedtensor/__init__.py" }, { "content": "import torch\nimport numbers\nimport warnings\n\nfrom . import nested\nimport nestedtensor\n\n\ndef nested_tensor(data, dtype=None, device=None, requires_grad=False, pin_memory=False, channels_last=False):\n \"\"\"\n Arguments match torch.tensor\n \"\"\"\n if dtype is None:\n dtype = torch.get_default_dtype()\n if device is None:\n device = torch.device('cpu')\n if channels_last is None:\n channels_last = False\n return nested.NestedTensor(nestedtensor._C.nested_tensor_impl(data, dtype, device, requires_grad, pin_memory, channels_last))\n\n\ndef as_nested_tensor(data, dtype=None, device=None, requires_grad=False, pin_memory=False):\n # TODO: Needs tests to check failure cases\n if not isinstance(data, nested.NestedTensor):\n return nested_tensor(data, dtype, device, requires_grad, pin_memory)\n return data\n", "id": "1993181", "language": "Python", "matching_score": 1.4351756572723389, "max_stars_count": 0, "path": "nestedtensor/nested/creation.py" }, { "content": "# NOTES:\n# Look at torch/include/ATen/Functions.h for confusing cases (i.e. unexpected argument order)\n# TODO: Add pow and scalar other variants. Write templates more compactly.\n\nHEADER = \"\"\"# include <nestedtensor/csrc/BinaryOps.h>\n\nnamespace at {\n\nusing namespace torch::nested_tensor;\n\"\"\"\nBINARY_OP_DEFAULT = \"\"\"\nTensor NestedTensor_{op}(const Tensor & self_, const Tensor & other_) {{\n Tensor self;\n Tensor other;\n std::tie(self, other) = _expand_other_as(self_, other_);\n return map_nested_tensor(\n [](Tensor s, Tensor o) {{ return at::{op}(s, o); }}, self, other);\n}}\n\"\"\"\n\nBINARY_OP = \"\"\"\nTensor NestedTensor_{op}_Tensor(const Tensor & self_, const Tensor & other_) {{\n Tensor self;\n Tensor other;\n std::tie(self, other) = _expand_other_as(self_, other_);\n return map_nested_tensor(\n [](Tensor s, Tensor o) {{ return at::{op}(s, o); }}, self, other);\n}}\n\"\"\"\nBINARY_OP_SCALAR = \"\"\"\nTensor NestedTensor_{op}_Tensor(const Tensor & self_, const Tensor & other_, const Scalar& alpha) {{\n Tensor self;\n Tensor other;\n std::tie(self, other) = _expand_other_as(self_, other_);\n return map_nested_tensor(\n [&alpha](Tensor s, Tensor o) {{ return at::{op}(s, o, alpha); }}, self, other);\n}}\n\"\"\"\nBINARY_INPLACE_OP = \"\"\"\nTensor & NestedTensor_{op}__Tensor(Tensor & self_, const Tensor & other_) {{\n at::Tensor self;\n at::Tensor other;\n std::tie(self, other) = _expand_other_as(self_, other_);\n apply_nested_tensor(\n [](Tensor& tensor, const Tensor other) {{ tensor.{op}_(other); return tensor;}},\n self,\n other);\n return self_;\n}}\n\"\"\"\nBINARY_INPLACE_OP_DEFAULT = \"\"\"\nTensor & NestedTensor_{op}_(Tensor & self_, const Tensor & other_) {{\n at::Tensor self;\n at::Tensor other;\n std::tie(self, other) = _expand_other_as(self_, other_);\n apply_nested_tensor(\n [](Tensor& tensor, const Tensor other) {{ tensor.{op}_(other); return tensor;}},\n self,\n other);\n return self_;\n}}\n\"\"\"\nBINARY_INPLACE_OP_SCALAR = \"\"\"\nTensor & NestedTensor_{op}__Tensor(Tensor & self_, const Tensor & other_, const Scalar& alpha) {{\n at::Tensor self;\n at::Tensor other;\n std::tie(self, other) = _expand_other_as(self_, other_);\n apply_nested_tensor(\n [&alpha](Tensor& tensor, const Tensor other) {{ tensor.{op}_(other, alpha); return tensor;}},\n self,\n other);\n return self_;\n}}\n\"\"\"\nBINARY_OUT_OP = \"\"\"\nTensor & NestedTensor_{op}_out(\nconst Tensor & self, \nconst Tensor & other,\nTensor & out) {{\n TORCH_CHECK(\n is_nested_tensor_impl(out),\n \"NT binary out variant requires NT as out argument.\");\n TORCH_CHECK(\n is_nested_tensor_impl(out, self, other),\n \"binary_out doesn't support non-NT arguments.\")\n apply_nested_tensor(\n [](Tensor& self, Tensor& other, Tensor& out) {{\n return at::{op}_out(self, other, out);\n }},\n self, other, out);\n return out;\n}}\n\"\"\"\nBINARY_OUT_OP_SCALAR = \"\"\"\nTensor & NestedTensor_{op}_out(\nconst Tensor & self, \nconst Tensor & other,\nconst Scalar& alpha,\nTensor & out) {{\n TORCH_CHECK(\n is_nested_tensor_impl(out),\n \"NT binary out variant requires NT as out argument.\");\n TORCH_CHECK(\n is_nested_tensor_impl(out, self, other),\n \"binary_out doesn't support non-NT arguments.\")\n apply_nested_tensor(\n [&alpha](Tensor& self, Tensor& other, Tensor& out) {{\n return at::{op}_out(out, self, other, alpha);\n }},\n self, other, out);\n return out;\n}}\n\"\"\"\nBINARY_SCALAR_OP = \"\"\"\nTensor NestedTensor_{op}_Scalar(const Tensor & self, const Scalar & other) {{\nreturn self;\n}}\n\"\"\"\nBINARY_INPLACE_SCALAR_OP = \"\"\"\nTensor & NestedTensor_{op}__Scalar(Tensor & self, const Scalar & other) {{\nreturn self;\n}}\n\"\"\"\nBINARY_TEMPLATES = [\n BINARY_OP,\n BINARY_INPLACE_OP,\n BINARY_OUT_OP,\n BINARY_SCALAR_OP,\n BINARY_INPLACE_SCALAR_OP\n]\n\nREGISTRATION_HEADER = \"\"\"\nTORCH_LIBRARY_IMPL(aten, NestedTensor, m) {\n\"\"\"\nREGISTRATION_FOOTER = \"\"\"\n}\n\"\"\"\n\nFOOTER = \"\"\"\n} // namespace at\n\"\"\"\n\n\ndef print_file(template_map):\n print(HEADER, end='')\n for k, v in template_map.items():\n print(v)\n print(REGISTRATION_HEADER, end='')\n for k, v in template_map.items():\n reg = \"nt_impl(m, \\\"\"\n reg += k\n reg += \"\\\", NestedTensor_\"\n reg += k.replace('.', '_')\n reg += \");\"\n print(reg)\n print(REGISTRATION_FOOTER, end='')\n print(FOOTER, end='')\n\n\ndef parse_registration_declarations(path):\n with open(path) as f:\n import hashlib\n path_hash = hashlib.md5(f.read().encode('utf-8')).hexdigest()\n # Based on PT GH master commit hash bd3c63aeeb\n if path_hash != \"b1200869a8c0b75d7fdb91d6c0f5569b\":\n raise RuntimeError(\"RegistrationDeclarations file changed again.\")\n with open(path) as f:\n lines = f.read().split(\"\\n\")\n ops = []\n for line in lines:\n if \"//\" in line:\n declaration, schema_dict = line.split(\"//\")\n if declaration.strip() != '':\n schema_dict = eval(schema_dict)\n schema = schema_dict['schema']\n assert schema[:6] == \"aten::\"\n ops.append((declaration, schema[6:]))\n return ops\n\n\ndef get_binary_functions():\n return [\n 'add',\n 'mul',\n 'sub',\n 'div',\n 'pow',\n 'atan2',\n 'remainder',\n ]\n\n\nTEMPLATE_MAP = {\n \"mul.Tensor\": BINARY_OP,\n \"mul.Tensor\": BINARY_OP,\n \"mul_.Tensor\": BINARY_INPLACE_OP,\n \"mul.out\": BINARY_OUT_OP,\n \"mul.Scalar\": BINARY_SCALAR_OP,\n \"mul_.Scalar\": BINARY_INPLACE_SCALAR_OP\n}\n\n\ndef create_template_map(ops):\n template_map = {}\n for op in ops:\n op_reg, op_args = op[1].split(\"(\", 1)\n op_args = \"(\" + op_args\n variant = None\n if \".\" in op_reg:\n op_name, variant = op_reg.split(\".\", 1)\n else:\n op_name = op_reg\n for b in get_binary_functions():\n if op_name == b:\n if variant is None:\n template_map[op_reg] = BINARY_OP_DEFAULT.format(op=b)\n if variant == \"Tensor\":\n if \"Scalar & alpha\" in op[0]:\n template_map[op_reg] = BINARY_OP_SCALAR.format(op=b)\n else:\n template_map[op_reg] = BINARY_OP.format(op=b)\n if variant == \"out\":\n if \"Scalar & alpha\" in op[0]:\n template_map[op_reg] = BINARY_OUT_OP_SCALAR.format(op=b)\n else:\n template_map[op_reg] = BINARY_OUT_OP.format(op=b)\n if op_name == b + \"_\":\n if variant is None:\n template_map[op_reg] = BINARY_INPLACE_OP_DEFAULT.format(op=b)\n if variant == \"Tensor\":\n if \"Scalar & alpha\" in op[0]:\n template_map[op_reg] = BINARY_INPLACE_OP_SCALAR.format(op=b)\n else:\n template_map[op_reg] = BINARY_INPLACE_OP.format(op=b)\n return template_map\n\n\nif __name__ == \"__main__\":\n import sys\n import os\n if not os.path.exists(sys.argv[1]):\n raise RuntimeError(\"Must provide path as argument\")\n path = os.path.abspath(sys.argv[1])\n ops = parse_registration_declarations(path)\n template_map = create_template_map(ops)\n print_file(template_map)\n", "id": "2228262", "language": "Python", "matching_score": 1.3031655550003052, "max_stars_count": 229, "path": "nestedtensor/csrc/scripts/binaryops.py" }, { "content": "import nestedtensor\nimport torch\nimport argparse\nimport time\nimport random\nimport pprint\n\nEMBED_DIM = 128\n\nSEED = 0\n\n\ndef gen_tensor():\n globals()['SEED'] += 1\n # return torch.tensor([globals()['SEED']])\n return torch.rand(EMBED_DIM).to(device='cuda')\n\n\ndef gen_clusters(num_clusters, size_range):\n\n def gen_cluster(num_entries):\n return [gen_tensor() for _ in range(num_entries)]\n\n return [gen_cluster(random.randint(*size_range)) for _ in range(num_clusters)]\n\n\ndef gen_algorithm_naive(keys, sub_clusters):\n # For-loops over vectors\n def _naive():\n results = []\n for sub_cluster, key in zip(sub_clusters, keys):\n sub_cluster_results = []\n for cluster in sub_cluster:\n sub_cluster_results.append(\n [torch.dot(key, entry).item() for entry in cluster])\n results.append(sub_cluster_results)\n return results\n return _naive\n\ndef gen_algorithm_mv(keys, sub_clusters):\n # For-loops over vectors and matrices\n new_sub_clusters = []\n for sub_cluster in sub_clusters:\n new_sub_cluster = [torch.stack(cluster) for cluster in sub_cluster]\n new_sub_clusters.append(new_sub_cluster)\n sub_clusters = new_sub_clusters\n def _mv():\n results = []\n for sub_cluster, key in zip(sub_clusters, keys):\n sub_cluster_results = []\n for cluster in sub_cluster:\n sub_cluster_results.append(torch.mv(cluster, key))\n results.append(sub_cluster_results)\n return results\n return _mv\n\ndef gen_algorithm_nested_mv(keys, sub_clusters):\n # For-loops over vectors and matrices\n new_sub_clusters = []\n for sub_cluster in sub_clusters:\n new_sub_cluster = [torch.tensor(list(map(list, cluster))) for cluster in sub_cluster]\n new_sub_clusters.append(new_sub_cluster)\n nested_sub_clusters = nestedtensor.nested_tensor(sub_clusters).to_tensor(2)\n nested_keys = nestedtensor.nested_tensor(keys)\n def _nested_mv():\n return torch.mv(nested_sub_clusters, nested_keys)\n return _nested_mv\n\ndef gen_algorithm_nested_jit_mv(keys, sub_clusters):\n # For-loops over vectors and matrices\n new_sub_clusters = []\n for sub_cluster in sub_clusters:\n new_sub_cluster = []\n for cluster in sub_cluster:\n new_sub_cluster.append(torch.stack(cluster))\n new_sub_clusters.append(new_sub_cluster)\n nested_sub_clusters = nestedtensor.as_nested_tensor(new_sub_clusters)\n nested_keys = nestedtensor.as_nested_tensor(keys)\n\n @nestedtensor._C.jit_tensorwise()\n @torch.jit.script\n def my_fun(x, y):\n return torch.mv(x, y)\n\n def _nested_jit_mv():\n return my_fun(nested_sub_clusters, nested_keys)\n return _nested_jit_mv\n\n\ndef print_results(results, keys, sub_clusters, print_details=False):\n if print_details:\n for i, sub_cluster in enumerate(sub_clusters):\n print(\"\\n\\u001b[31msub cluster {} count {} total number of entries {}\\u001b[0m\".format(\n i, len(sub_cluster), sum(map(len, sub_cluster))))\n pprint.pprint(sub_cluster)\n print(\"\\nkeys\")\n pprint.pprint(keys)\n print(\"\")\n\n for i, result in enumerate(results):\n print(\n \"result scores for \\u001b[31msub cluster {} and key {}\\u001b[0m\".format(i, i))\n pprint.pprint(result)\n\ndef benchmark_fn(fn, run_time = 15.0):\n times = []\n fn()\n t = 0.0\n while (t < run_time):\n ti = time.time()\n fn()\n torch.cuda.synchronize()\n ti = time.time() - ti\n t += ti\n times.append(ti)\n times = torch.tensor(times) * 1e6\n return \"fn {:<15} avg(us): {:10.4f} std(us): {:10.4f} num_runs: {}\".format(fn.__name__, times.mean().item(), times.std().item(), len(times))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--print-results', dest='print_results', action='store_true')\n args = parser.parse_args()\n # NOTE: This dodging creating these subclusters from a single set of clusters\n # This additional memory pressure might be crucial\n keys = [gen_tensor()] * 16\n clusters = gen_clusters(16, (16,16))\n sub_clusters = [[clusters[random.randint(0, 15)]] * 8 for _ in range(16)]\n\n # Two keys for now\n # Simulating some overlap\n\n sub_clusters = [clusters[:3], clusters[2:]]\n\n # Get algorithm\n gen_results_naive = gen_algorithm_naive(keys, sub_clusters)\n gen_results_mv = gen_algorithm_mv(keys, sub_clusters)\n gen_results_nested_mv = gen_algorithm_nested_mv(keys, sub_clusters)\n # gen_results_nested_jit_mv = gen_algorithm_nested_jit_mv(keys, sub_clusters)\n\n print(benchmark_fn(gen_results_nested_mv))\n print(benchmark_fn(gen_results_naive))\n print(benchmark_fn(gen_results_mv))\n # print(benchmark_fn(gen_results_nested_jit_mv))\n # import cProfile, pstats, io\n # pr = cProfile.Profile()\n # pr.enable()\n # pr.disable()\n # s = io.StringIO()\n # sortby = 'tottime'\n # ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n # ps.print_stats()\n # print(s.getvalue())\n # print(benchmark_fn(gen_results_nested_mv))\n\n if args.print_results:\n print('naive')\n print_results(gen_results_naive(), keys, sub_clusters)\n print('\\nmv')\n print_results(gen_results_mv(), keys, sub_clusters)\n print('\\nnested_mv')\n print_results(gen_results_nested_mv(), keys, sub_clusters)\n", "id": "10285318", "language": "Python", "matching_score": 1.7318692207336426, "max_stars_count": 229, "path": "benchmarks/nearest_neighbors.py" }, { "content": "import torch\nimport nestedtensor\nimport utils\nimport time\n\n\n@nestedtensor._C.jit_tensorwise()\[email protected]\ndef f(i, w):\n return torch.conv2d(i, w)\n\ndef loop_f(inp1, w):\n for inp in inp1:\n torch.conv2d(inp, w)\n\n\nif __name__ == \"__main__\":\n w = torch.randn(64, 3, 9, 9).cuda()\n inp1 = list(torch.randn(128, 1, 3, 16, 16).cuda().unbind())\n inp3 = nestedtensor.as_nested_tensor(inp1)._impl\n # print(sum(inp.numel() for inp in inp1))\n # print(inp3.numel())\n\n fc = nestedtensor._C.jit_tensorwise()(torch.conv2d)\n\n t0 = time.time()\n count = 0\n while(time.time() - t0 < 5.0):\n r2 = fc(inp3, w)\n torch.cuda.synchronize()\n count += 1\n print(\"jit: \" + str(count))\n\n t0 = time.time()\n count = 0\n while(time.time() - t0 < 5.0):\n loop_f(inp1, w)\n torch.cuda.synchronize()\n count += 1\n print(\"for loop: \" + str(count))\n\n \n # print(r.nested_size())\n\n # na = nestedtensor._C.jit_tensorwise()(torch.mul)\n\n # print(\"111\")\n # out = nestedtensor.as_nested_tensor([torch.randn(1, 2)])\n # print(na(\n # nestedtensor.as_nested_tensor([torch.randn(1, 2)])._impl,\n # 4.0,\n # ))\n # print(\"222\")\n # print('out')\n # print(out)\n\n # nv = nestedtensor._C.jit_tensorwise()(torch.mv)\n # print(nv(\n # nestedtensor._C._ListNestedTensor([torch.randn(1, 2)]),\n # nestedtensor._C._ListNestedTensor([torch.randn(2)]),\n # ))\n\n # print(\"333\")\n # print(na(\n # torch.randn(1, 2),\n # torch.randn(1, 2),\n # ))\n # print(\"444\")\n", "id": "5061951", "language": "Python", "matching_score": 0.5355833172798157, "max_stars_count": 229, "path": "benchmarks/jit_tensorwise.py" }, { "content": "__version__ = '0.1.4+5b45731'\ngit_version = '5b457313bfb6578b43d76282b321657bf85ee1b3'\nfrom nestedtensor import _C\nif hasattr(_C, 'CUDA_VERSION'):\n cuda = _C.CUDA_VERSION\n", "id": "151516", "language": "Python", "matching_score": 0.19441989064216614, "max_stars_count": 0, "path": "nestedtensor/version.py" } ]
2.341916
FlyerInk
[ { "content": "\"\"\"\nSlot Read/Write Example to demonstrate encrypted and unencrypted transfers\n\"\"\"\n# (c) 2015-2018 Microchip Technology Inc. and its subsidiaries.\n#\n# Subject to your compliance with these terms, you may use Microchip software\n# and any derivatives exclusively with Microchip products. It is your\n# responsibility to comply with third party license terms applicable to your\n# use of third party software (including open source software) that may\n# accompany Microchip software.\n#\n# THIS SOFTWARE IS SUPPLIED BY MICROCHIP \"AS IS\". NO WARRANTIES, WHETHER\n# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED\n# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A\n# PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT,\n# SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE\n# OF ANY KIND WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF\n# MICROCHIP HAS BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE\n# FORESEEABLE. TO THE FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL\n# LIABILITY ON ALL CLAIMS IN ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED\n# THE AMOUNT OF FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR\n# THIS SOFTWARE.\n\nfrom cryptoauthlib import *\nfrom cryptoauthlib.device import *\nfrom common import *\nimport time\n\n# Slot 4 IO Encryption key\nENC_KEY = bytearray([\n 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,\n 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,\n 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,\n 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11\n])\n\nIO_KEY = bytearray([\n 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,\n 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,\n 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,\n 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22\n])\n\nread_write_config = {\n 'ATSHA204A': {'clear': 8, 'encrypted': 3},\n 'ATECC508A': {'clear': 8, 'encrypted': 9},\n 'ATECC608A': {'clear': 8, 'encrypted': 5}\n}\n\n\ndef read_write(iface='hid', device='ecc', **kwargs):\n ATCA_SUCCESS = 0x00\n\n # Loading cryptoauthlib(python specific)\n load_cryptoauthlib()\n\n # Get the target default config\n cfg = eval('cfg_at{}a_{}_default()'.format(atca_names_map.get(device), atca_names_map.get(iface)))\n\n # Set interface parameters\n if kwargs is not None:\n for k, v in kwargs.items():\n icfg = getattr(cfg.cfg, 'atca{}'.format(iface))\n setattr(icfg, k, int(v, 16))\n\n # Basic Raspberry Pi I2C check\n if 'i2c' == iface and check_if_rpi():\n cfg.cfg.atcai2c.bus = 1\n\n # Initialize the stack\n assert atcab_init(cfg) == ATCA_SUCCESS\n\n # Check device type\n info = bytearray(4)\n assert atcab_info(info) == ATCA_SUCCESS\n dev_name = get_device_name(info)\n dev_type = get_device_type_id(dev_name)\n\n # Reinitialize if the device type doesn't match the default\n if dev_type != cfg.devtype:\n cfg.dev_type = dev_type\n assert atcab_release() == ATCA_SUCCESS\n time.sleep(1)\n assert atcab_init(cfg) == ATCA_SUCCESS\n\n slots = read_write_config.get(dev_name)\n if slots is None:\n raise ValueError('No slot configuration for {}'.format(dev_name))\n\n # Read the config to find some setup values\n config_data = bytearray(128)\n assert ATCA_SUCCESS == atcab_read_config_zone(config_data)\n if dev_name == 'ATSHA204A':\n config = Atsha204aConfig.from_buffer(config_data[:88])\n elif dev_name == 'ATECC508A':\n config = Atecc508aConfig.from_buffer(config_data)\n elif dev_name == 'ATECC608A':\n config = Atecc608aConfig.from_buffer(config_data)\n else:\n raise ValueError('Unsupported device {}'.format(dev_name))\n\n # Find the write key slot for the encrypted write slot\n write_key_slot = config.SlotConfig[slots['encrypted']].WriteKey\n\n write_data = bytearray(32)\n read_data = bytearray(32)\n\n print('\\nGeneraing data using RAND command')\n assert atcab_random(write_data) == ATCA_SUCCESS\n print(' Generated data:')\n print(pretty_print_hex(write_data, indent=' '))\n\n # Writing a data to slot\n print('\\nWrite command:')\n print(' Writing data to slot {}'.format(slots['clear']))\n assert atcab_write_zone(2, slots['clear'], 0, 0, write_data, 32) == ATCA_SUCCESS\n print(' Write Success')\n\n # Reading the data in the clear from slot\n print('\\nRead command:')\n print(' Reading data stored in slot {}'.format(slots['clear']))\n assert atcab_read_zone(2, slots['clear'], 0, 0, read_data, 32) == ATCA_SUCCESS\n print(' Read data:')\n print(pretty_print_hex(read_data, indent=' '))\n\n # Compare the read data to the written data\n print('\\nVerifing read data matches written data:')\n print(' Data {}!'.format('Matches' if (read_data == write_data) else 'Does Not Match'))\n\n # Writing IO protection key. This key is used as IO encryption key.\n print('\\nWriting IO Protection Secret')\n assert atcab_write_enc(write_key_slot, 0, IO_KEY, ENC_KEY, 1, 0) == ATCA_SUCCESS\n\n print('\\nGeneraing data using RAND command')\n assert atcab_random(write_data) == ATCA_SUCCESS\n print(' Generated data:')\n print(pretty_print_hex(write_data, indent=' '))\n\n # Writing a key to slot '1' through encrypted write\n print('\\nEncrypted Write Command:')\n print(' Writing data to slot {}'.format(slots['encrypted']))\n assert atcab_write_enc(slots['encrypted'], 0, write_data, IO_KEY, write_key_slot) == ATCA_SUCCESS\n print(' Write Success')\n\n # Reading the key in plain text from slot '10'\n print('\\nEncrypted Read Command:')\n print(' Reading data stored in slot {}'.format(slots['encrypted']))\n assert atcab_read_enc(slots['encrypted'], 0, read_data, IO_KEY, write_key_slot) == ATCA_SUCCESS\n print(' Read data:')\n print(pretty_print_hex(read_data, indent=' '))\n\n # Compare the read data to the written data\n print('\\nVerifing read data matches written data:')\n print(' Data {}!'.format('Matches' if (read_data == write_data) else 'Does Not Match'))\n\n # Free the library\n atcab_release()\n\n\nif __name__ == '__main__':\n parser = setup_example_runner(__file__)\n args = parser.parse_args()\n\n print('\\nBasic Read/Write Example')\n read_write(args.iface, args.device, **parse_interface_params(args.params))\n print('\\nDone')\n", "id": "804954", "language": "Python", "matching_score": 6.215433597564697, "max_stars_count": 0, "path": "python/examples/read_write.py" }, { "content": "\"\"\"\nDevice Info Retrieval Example\n\"\"\"\n# (c) 2015-2018 Microchip Technology Inc. and its subsidiaries.\n#\n# Subject to your compliance with these terms, you may use Microchip software\n# and any derivatives exclusively with Microchip products. It is your\n# responsibility to comply with third party license terms applicable to your\n# use of third party software (including open source software) that may\n# accompany Microchip software.\n#\n# THIS SOFTWARE IS SUPPLIED BY MICROCHIP \"AS IS\". NO WARRANTIES, WHETHER\n# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED\n# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A\n# PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT,\n# SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE\n# OF ANY KIND WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF\n# MICROCHIP HAS BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE\n# FORESEEABLE. TO THE FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL\n# LIABILITY ON ALL CLAIMS IN ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED\n# THE AMOUNT OF FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR\n# THIS SOFTWARE.\n\nfrom cryptoauthlib import *\nfrom common import *\n\n\ndef info(iface='hid', device='ecc', **kwargs):\n ATCA_SUCCESS = 0x00\n\n # Loading cryptoauthlib(python specific)\n load_cryptoauthlib()\n\n # Get the target default config\n cfg = eval('cfg_at{}a_{}_default()'.format(atca_names_map.get(device), atca_names_map.get(iface)))\n\n # Set interface parameters\n if kwargs is not None:\n for k, v in kwargs.items():\n icfg = getattr(cfg.cfg, 'atca{}'.format(iface))\n setattr(icfg, k, int(v, 16))\n\n # Basic Raspberry Pi I2C check\n if 'i2c' == iface and check_if_rpi():\n cfg.cfg.atcai2c.bus = 1\n\n # Initialize the stack\n assert atcab_init(cfg) == ATCA_SUCCESS\n print('')\n\n # Request the Revision Number\n info = bytearray(4)\n assert atcab_info(info) == ATCA_SUCCESS\n print('\\nDevice Part:')\n print(' ' + get_device_name(info))\n\n # Request the Serial Number\n serial_number = bytearray(9)\n assert atcab_read_serial_number(serial_number) == ATCA_SUCCESS\n print('\\nSerial number: ')\n print(pretty_print_hex(serial_number, indent=' '))\n\n # Read the configuration zone\n config_zone = bytearray(128)\n assert atcab_read_config_zone(config_zone) == ATCA_SUCCESS\n\n print('\\nConfiguration Zone:')\n #print(pretty_print_c_hex(config_zone, indent=' '))\n print(pretty_print_hex(config_zone, indent=' '))\n\n # Check the device locks\n print('\\nCheck Device Locks')\n is_locked = AtcaReference(False)\n assert atcab_is_locked(0, is_locked) == ATCA_SUCCESS\n config_zone_locked = bool(is_locked.value)\n print(' Config Zone is %s' % ('locked' if config_zone_locked else 'unlocked'))\n\n assert atcab_is_locked(1, is_locked) == ATCA_SUCCESS\n data_zone_locked = bool(is_locked.value)\n print(' Data Zone is %s' % ('locked' if data_zone_locked else 'unlocked'))\n\n # Load the public key\n if 'ecc' == device and data_zone_locked:\n print('\\nLoading Public key\\n')\n public_key = bytearray(64)\n assert atcab_get_pubkey(0, public_key) == ATCA_SUCCESS\n print(pretty_print_hex(public_key, indent=' '))\n print(convert_ec_pub_to_pem(public_key))\n\n # Free the library\n atcab_release()\n\n\nif __name__ == '__main__':\n parser = setup_example_runner(__file__)\n args = parser.parse_args()\n\n info(args.iface, args.device, **parse_interface_params(args.params))\n print('\\nDone')\n", "id": "5493817", "language": "Python", "matching_score": 4.096518516540527, "max_stars_count": 0, "path": "python/examples/info.py" }, { "content": "from cryptoauthlib import *\nfrom common import *\nimport time\nimport hashlib\n\n# Safe input if using python 2\ntry: input = raw_input\nexcept NameError: pass\n\n# Example rootKey, store in Slot0\nrootkey = bytearray.fromhex (\n '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00'\n '00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00')\n\ndef test_mac(iface='hid', device='ecc', i2c_addr=None, keygen=True, **kwargs):\n ATCA_SUCCESS = 0x00\n\n # Loading cryptoauthlib(python specific)\n load_cryptoauthlib()\n\n # Get the target default config\n cfg = eval('cfg_at{}a_{}_default()'.format(atca_names_map.get(device), atca_names_map.get(iface)))\n\n # Set interface parameters\n if kwargs is not None:\n for k, v in kwargs.items():\n icfg = getattr(cfg.cfg, 'atca{}'.format(iface))\n setattr(icfg, k, int(v, 16))\n\n # Basic Raspberry Pi I2C check\n if 'i2c' == iface and check_if_rpi():\n cfg.cfg.atcai2c.bus = 1\n\n # Initialize the stack\n assert atcab_init(cfg) == ATCA_SUCCESS\n\n # Check device type\n info = bytearray(4)\n assert atcab_info(info) == ATCA_SUCCESS\n dev_name = get_device_name(info)\n dev_type = get_device_type_id(dev_name)\n\n # Reinitialize if the device type doesn't match the default\n if dev_type != cfg.devtype:\n cfg.dev_type = dev_type\n assert atcab_release() == ATCA_SUCCESS\n time.sleep(1)\n assert atcab_init(cfg) == ATCA_SUCCESS\n\n # Request the Serial Number\n serial_number = bytearray(9)\n assert atcab_read_serial_number(serial_number) == ATCA_SUCCESS\n print('Serial number: ')\n print(pretty_print_hex(serial_number, indent=' '))\n \n # Check the device locks\n print('Check Device Locks')\n is_locked = AtcaReference(False)\n assert atcab_is_locked(0, is_locked) == ATCA_SUCCESS\n config_zone_locked = bool(is_locked.value)\n print(' Config Zone is %s' % ('locked' if config_zone_locked else 'unlocked'))\n\n assert atcab_is_locked(1, is_locked) == ATCA_SUCCESS\n data_zone_locked = bool(is_locked.value)\n print(' Data Zone is %s' % ('locked' if data_zone_locked else 'unlocked'))\n\n # Run a nonce command to get a random data\n seed_in = bytearray.fromhex(\n '22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22 22')\n randout = bytearray(32)\n assert atcab_nonce_rand(seed_in, randout) == ATCA_SUCCESS\n print('Challenge: ')\n print(pretty_print_hex(randout, indent=' '))\n\n # Run a MAC command with Slot0, Slot0 have programmed with customers secure key\n digest = bytearray(32)\n assert atcab_mac(0x01, 0, 0, digest) == ATCA_SUCCESS\n print('MAC Digest: ')\n print(pretty_print_hex(digest, indent=' '))\n \n # Get Tempkey from randout\n hashBytes = bytearray.fromhex('16 00 00')\n inputdata = randout + seed_in + hashBytes\n sha256 = hashlib.sha256()\n sha256.update(inputdata)\n tempkey = sha256.digest()\n print('Tempkey: ')\n print(pretty_print_hex(tempkey, indent=' '))\n\n # Get Host MAC from RootKey + SN + SN Pad + ...\n macbytes = bytearray.fromhex(\n '08 01 00 00 00 00 00 00'\n '00 00 00 00 00 00 00 EE'\n '00 00 00 00 01 23 00 00')\n inputdata = rootkey + tempkey + macbytes\n sha256 = hashlib.sha256()\n sha256.update(inputdata)\n sw_digest = sha256.digest()\n print('SW Digest: ')\n print(pretty_print_hex(sw_digest, indent=' '))\n\n if sw_digest == digest:\n print('MAC Verify Success!\\n')\n else:\n print('MAC Verify Fail!\\n')\n\n atcab_release()\n\nif __name__ == '__main__':\n parser = setup_example_runner(__file__)\n parser.add_argument('--i2c', help='I2C Address (in hex)')\n parser.add_argument('--gen', default=True, help='Generate new keys')\n args = parser.parse_args()\n\n if args.i2c is not None:\n args.i2c = int(args.i2c, 16)\n\n print('\\nTest MAC Starting...\\n')\n test_mac(args.iface, args.device, args.i2c, args.gen, **parse_interface_params(args.params))\n", "id": "943619", "language": "Python", "matching_score": 1.866866946220398, "max_stars_count": 0, "path": "python/examples/mac.py" }, { "content": "\"\"\" Common helper functions for cryptoauthlib examples \"\"\"\nimport argparse\nimport os\nimport base64\nimport sys\n\n# Maps common name to the specific name used internally\natca_names_map = {'i2c': 'i2c', 'hid': 'kithid', 'sha': 'sha20x', 'ecc': 'eccx08'}\n\ntry:\n FileNotFoundError\nexcept NameError:\n FileNotFoundError = IOError\n\n\ndef setup_example_runner(module):\n \"\"\"\n Common helper function that sets up the script entry for all examples\n \"\"\"\n example = os.path.basename(module).split('.')[0]\n\n try:\n with open(example + '.md', 'r') as f:\n details = f.read()\n except FileNotFoundError:\n details = example.upper() + ' Example'\n\n parser = argparse.ArgumentParser(description=details, \n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument('-i', '--iface', default='hid', choices=['i2c', 'hid'], help='Interface type (default: hid)')\n parser.add_argument('-d', '--device', default='ecc', choices=['ecc', 'sha'], help='Device type (default: ecc)')\n parser.add_argument('-p', '--params', nargs='*', help='Interface Parameters in the form key=value')\n\n return parser\n\n\ndef parse_interface_params(list):\n \"\"\"\n Parse a variable list of key=value args into a dictionary suitable for kwarg usage\n \"\"\"\n return {} if list is None else dict([s.split('=') for s in list])\n\ndef pretty_print_c_hex(a, l=16, indent=''):\n \"\"\"\n Format a list/bytes/bytearray object into a formatted ascii hex string\n \"\"\"\n s = ''\n a = bytearray(a)\n for x in range(0, len(a), l):\n s += indent + ''.join(['0x%02X, ' % y for y in a[x:x+l]]) + '\\n'\n return s\n\ndef pretty_print_hex(a, l=16, indent=''):\n \"\"\"\n Format a list/bytes/bytearray object into a formatted ascii hex string\n \"\"\"\n lines = []\n a = bytearray(a)\n for x in range(0, len(a), l):\n lines.append(indent + ' '.join(['{:02X}'.format(y) for y in a[x:x+l]]))\n return '\\n'.join(lines)\n\n\ndef convert_ec_pub_to_pem(raw_pub_key):\n \"\"\"\n Convert to the key to PEM format. Expects bytes\n \"\"\"\n public_key_der = bytearray.fromhex('3059301306072A8648CE3D020106082A8648CE3D03010703420004') + raw_pub_key\n public_key_b64 = base64.b64encode(public_key_der).decode('ascii')\n public_key_pem = (\n '-----BEGIN PUBLIC KEY-----\\n'\n + '\\n'.join(public_key_b64[i:i + 64] for i in range(0, len(public_key_b64), 64)) + '\\n'\n + '-----END PUBLIC KEY-----'\n )\n return public_key_pem\n\n\ndef check_if_rpi():\n \"\"\"\n Does a basic check to see if the script is running on a Raspberry Pi\n \"\"\"\n is_rpi = False\n try:\n with open('/sys/firmware/devicetree/base/model', 'r') as f:\n if f.readline().startswith('Raspberry'):\n is_rpi = True\n except FileNotFoundError:\n is_rpi = False\n\n return is_rpi\n", "id": "256143", "language": "Python", "matching_score": 0.9726772904396057, "max_stars_count": 0, "path": "python/examples/common.py" } ]
2.981693
Chensien-cse
[ { "content": "num=10\nnum=20\n\nnum=30\n", "id": "7166629", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "a.py" } ]
0
aby2s
[ { "content": "from setuptools import setup\nfrom setuptools import find_packages\n\n\nsetup(name='harmonium',\n version='0.0.2',\n description='Framework for building RBM',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/aby2s/harmonium',\n download_url='https://github.com/aby2s/harmonium',\n license='Apache 2.0',\n install_requires=['tensorflow','numpy', 'Pillow', 'scikit-learn'],\n packages=find_packages())\n", "id": "8144447", "language": "Python", "matching_score": 0.26312360167503357, "max_stars_count": 7, "path": "setup.py" }, { "content": "import argparse\nimport gzip\nimport os\nimport pickle\nimport numpy as np\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import StandardScaler\n\nfrom harmonium.optimizers import cd, pcd\nfrom harmonium.rbm import RBMModel, RBMLayer\nfrom harmonium.rbm_utils import save_weights, save_hidden_state\nimport tensorflow as tf\nimport urllib.request as request\nimport sys\nfrom tensorflow.python import debug as tf_debug\n\nfrom harmonium.regularizers import SparsityTarget\n\n\ndef load_mnist(data_path):\n if not os.path.exists(data_path):\n os.makedirs(data_path)\n\n dataset = os.path.join(data_path, 'mnist.pkl.gz')\n\n if not os.path.isfile(dataset):\n print('loading data... ')\n request.urlretrieve('http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz', dataset)\n print('loading data complete... ')\n\n with gzip.open(dataset, 'rb') as f:\n try:\n train_set, valid_set, test_set = pickle.load(f, encoding='latin1')\n except:\n train_set, valid_set, test_set = pickle.load(f)\n\n return [train_set[0], valid_set[0], test_set[0]]\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Example of RBM training on MNIST dataset. You can tune some parameters via command line.')\n\n parser.add_argument('--hidden_units', action=\"store\", dest=\"hidden_units\", default=100, type=int,\n help='Number of hidden units', required=False)\n\n parser.add_argument('--visible_bias', action=\"store_true\", dest=\"visible_bias\", default=True,\n help='Use visible bias', required=False)\n\n parser.add_argument('--hidden_bias', action=\"store_true\", dest=\"hidden_bias\", default=True,\n help='Use hidden bias', required=False)\n\n parser.add_argument('--visible_activation', action=\"store\", dest=\"visible_activation\", default='sigmoid',\n choices=['sigmoid', 'linear'], help='Visible units type', required=False)\n\n parser.add_argument('--hidden_activation', action=\"store\", dest=\"hidden_activation\", default='sigmoid',\n choices=['sigmoid', 'relu'], help='Hidden units type', required=False)\n\n parser.add_argument('--output_folder', action=\"store\", dest=\"output_folder\", default='./output',\n help='Folder to store outputs, hidden activations and weights', required=False)\n\n parser.add_argument('--mnist_folder', action=\"store\", dest=\"mnist_folder\", default='./data',\n help='Folder containing mnist dataset. If not present, it will be downloaded automatically',\n required=False)\n\n parser.add_argument('--tfdebug', action=\"store\", dest=\"tfdebug\", default=None, choices=['cli', 'tensorboard'],\n help='Use debug session wrapper: cli, tensorboard or none', required=False)\n\n parser.add_argument('--tbserver', action=\"store\", dest=\"tbserver\", default='localhost:2333',\n help='TensorBoard server address to use with TensorBoardDebugWrapperSession',\n required=False)\n\n params = parser.parse_args(sys.argv[1:])\n\n if not os.path.exists(params.output_folder):\n os.makedirs(params.output_folder)\n\n train_set, valid_set, test_set = load_mnist(params.mnist_folder)\n\n if params.visible_activation == 'linear':\n scaler = StandardScaler()\n train_set = scaler.fit_transform(train_set)\n valid_set = scaler.transform(valid_set)\n test_set = scaler.transform(test_set)\n\n n_hidden = params.hidden_units\n n_visible = 784\n\n with tf.Session() as session:\n if params.tfdebug == 'cli':\n session = tf_debug.LocalCLIDebugWrapperSession(session)\n elif params.tfdebug == 'tensorboard':\n session = tf_debug.TensorBoardDebugWrapperSession(session, params.tbserver)\n\n rbm = RBMModel(visible=RBMLayer(activation=params.visible_activation, units=n_visible,\n use_bias=params.visible_bias, sampled=False),\n hidden=RBMLayer(activation=params.hidden_activation, units=n_hidden,\n use_bias=params.hidden_bias, sampled=True),\n session=session)\n\n rbm.compile(pcd(1, lr=1e-2), kernel_regularizer=SparsityTarget(l=0.9, p=0.01))\n\n visualisation_set = valid_set[np.random.randint(len(valid_set), size=400)]\n for i in range(10):\n rbm.fit(train_set, batch_size=128, nb_epoch=10, verbose=2)\n\n weights = rbm.get_weights()\n save_weights(os.path.join(params.output_folder, 'weights{}.jpg'.format(i)), weights, shape=(28, 28), tile=(10, 10), spacing=(1, 1))\n visualisation_inference = rbm.generate(visualisation_set, sampled=False)\n\n if params.visible_activation == 'linear':\n visualisation_inference = scaler.inverse_transform(visualisation_inference)\n\n hidden_state = rbm.hidden_state(visualisation_inference, sampled=True)\n save_weights(os.path.join(params.output_folder, 'output{}.jpg'.format(i)),\n visualisation_inference.T, shape=(28, 28), tile=(20, 20), spacing=(1, 1))\n\n save_hidden_state(os.path.join(params.output_folder, 'hidden{}.jpg'.format(i)), hidden_state)\n bias = np.array([0]) # rbm.visible.get_bias()\n print(\"RBM, epoch {}, GENERATE ERROR: {}, MAX WEIGHT: {}, MIN WEIGHT {}, MEDIAN WEIGHT {}, MAX BIAS {}, MIN BIAS {}, MEDIAN BIAS {}\".\n format(i, mean_squared_error(visualisation_set, visualisation_inference), np.max(weights), np.min(weights),\n np.median(weights), np.max(bias), np.min(bias), np.median(bias)))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "id": "11367245", "language": "Python", "matching_score": 3.795428514480591, "max_stars_count": 7, "path": "examples/mnist_example.py" }, { "content": "import tensorflow as tf\nimport numpy as np\nimport collections\nimport pickle\nfrom harmonium.core import sample_bernoulli, sample_gaussian\n\n\nclass RBMLayer(object):\n activations = {'sigmoid': tf.nn.sigmoid, 'linear': None, 'relu': tf.nn.relu}\n #samplers = {'sigmoid': sample_bernoulli, 'linear': sample_gaussian, 'relu': lambda x: sample_gaussian(relu=True)}\n samplers = {'sigmoid': sample_bernoulli, 'linear': lambda x: x, 'relu': lambda x: x}\n\n def __init__(self, units,\n activation=None,\n use_bias=False,\n bias=None,\n sampled=False):\n \"\"\"\n\n :param units: int, number of units\n :param activation: string, 'sigmoid', 'linear' or 'relu'\n :param use_bias: boolean, flag to use bias, if false bias set to zero and never updated\n :param bias: 1d-array, bias initial value, if None, bias initialized with zeros\n :param name: string, layer name\n :param sampled: boolean,\n \"\"\"\n self.units = units\n self.use_bias = use_bias\n self.default_sampled = sampled\n self.bias_initializer = bias\n\n\n if activation in self.activations:\n self.activation = self.activations[activation]\n self.sampler = self.samplers[activation]\n self.binary = activation == \"sigmoid\"\n else:\n raise ValueError('Unknown activation identifier {}'.format(activation))\n\n self.session = None\n\n def initialize(self, session, name='layer'):\n self.session = session\n self.name = name\n if self.use_bias:\n self.bias = tf.get_variable(name=name + '_bias', shape=(self.units,),\n initializer=tf.zeros_initializer() if self.bias_initializer is None\n else tf.constant_initializer(self.bias_initializer))\n\n def call(self, input, weights, transpose_weights=False, sampled=None):\n sampled = self.default_sampled if sampled is None else sampled\n\n kernel = tf.matmul(input, weights, transpose_b=transpose_weights, name='{}_kernel'.format(self.name))\n if self.use_bias:\n kernel = tf.add(kernel, self.bias)\n\n return self.nonlinearity(kernel, sampled)\n\n def nonlinearity(self, kernel, sampled):\n output = kernel if self.activation is None else self.activation(kernel, name='{}_activation'.format(self.name))\n if sampled:\n output = self.sampler(output)\n return output\n\n\n\n def get_bias(self):\n return self.session.run(self.bias)\n\n\n\n\n\n\nclass RBMModel(object):\n def __init__(self, visible, hidden, session=None, weights=None, weights_stddev=0.01, scope='RBM'):\n \"\"\"\n :param visible: RBMLayer, visible layer\n :param hidden: RBMLayer, hidden layer\n :param weights: 2d-array, weights for initialization\n :param weights_stddev: float, if weights aren't provided, RBM weights are initialized with\n gaussian random values with mean=0 and stddev=weights_stddev\n \"\"\"\n\n self.session = tf.Session() if session is None else session\n self.hidden = hidden\n self.visible = visible\n\n with tf.variable_scope(scope) as scope:\n self.W = tf.get_variable(name='weights', shape=(self.visible.units, self.hidden.units),\n initializer=tf.random_normal_initializer(stddev=weights_stddev) if weights is None\n else tf.constant_initializer(weights))\n\n self.visible.initialize(self.session, name='visible')\n self.hidden.initialize(self.session, name='hidden')\n self.input = tf.placeholder(\"float\", [None, self.visible.units], name='input')\n\n self.scope = scope\n\n\n\n def energy(self, visible_state, hidden_state, scope='energy'):\n with tf.variable_scope(scope):\n visible_state = tf.stop_gradient(visible_state, name=\"visible_state\")\n hidden_state = tf.stop_gradient(hidden_state, name=\"hidden_state\")\n energy = -tf.reduce_mean(tf.reduce_sum(tf.multiply(tf.matmul(visible_state, self.W, name='visible_weights'),\n hidden_state, name='weights_hidden')\n , axis=1, name='energy_sum'), name=\"batch_energy_mean\")\n\n if self.visible.use_bias:\n if self.visible.binary:\n energy = tf.add(energy, -tf.reduce_mean(\n tf.reduce_sum(tf.multiply(self.visible.bias, visible_state, name='visible_bias_energy'), axis=1)))\n else:\n v = visible_state - self.visible.bias\n energy = tf.add(energy, tf.reduce_mean(tf.reduce_sum(tf.multiply(v, v) / 2, axis=1)))\n\n\n if self.hidden.use_bias:\n if self.hidden.binary:\n energy = tf.add(energy, -tf.reduce_mean(\n tf.reduce_sum(tf.multiply(self.hidden.bias, hidden_state, name='hidden_bias_energy'), axis=1)))\n else:\n h = hidden_state - self.hidden.bias\n energy = tf.add(energy, tf.reduce_mean(tf.reduce_sum(tf.multiply(h, h) / 2, axis=1)))\n\n return energy\n\n def burn_in(self, visible_state=None, hidden_state=None, n=1, sampled=None):\n assert n > 0, 'Number of steps to burn in should be greater than zero'\n if hidden_state is None:\n hidden_state = self.hidden.call(visible_state, self.W, sampled=sampled)\n burned_in_hidden_state = hidden_state\n for i in range(n+1):\n burned_in_visible_state = self.visible.call(burned_in_hidden_state, self.W, transpose_weights=True,\n sampled=sampled)\n burned_in_hidden_state = self.hidden.call(burned_in_visible_state, self.W, sampled=sampled)\n return [burned_in_visible_state, burned_in_hidden_state]\n\n def get_weights(self):\n \"\"\"\n :return: 2d-array, RBM weights\n \"\"\"\n return self.session.run(self.W)\n\n\n\n def compile(self, optimizer,\n metrics=None, kernel_regularizer=None, bias_regularizer=None):\n \"\"\"\n :param optimizer: optimizer instance, supports only cd instance\n :param metrics: unsupported\n :param config: config to initialize TensorFlow session\n :param unstack: boolean. This option allows to train very large RBMs. You can switch it to true, if you get\n OOM. Never do it otherwise, because it makes training really slow.\n :param kernel_regularizer: available l1/l2 regularizers or None\n :param bias_regularizer: available l1/l2 regularizers or None\n \"\"\"\n\n\n self.summary_writer = tf.summary.FileWriter('./summary', self.session.graph)\n\n with tf.variable_scope(self.scope):\n with tf.name_scope(self.scope.original_name_scope):\n self.optimizer = optimizer(self)\n\n [energy, update] = self.optimizer.get_cost_update(\n self.input)\n\n self.energy_val = energy\n self.cost_update, self.cost = tf.metrics.mean(energy)\n self.update = update\n\n if kernel_regularizer:\n self.kernel_regularizer = kernel_regularizer(self, self.W)\n else:\n self.kernel_regularizer = None\n\n if bias_regularizer is not None:\n self.visible_bias_regularizer = bias_regularizer(self, self.visible.bias)\n self.hidden_bias_regularizer = bias_regularizer(self, self.hidden.bias)\n else:\n self.visible_bias_regularizer = None\n self.hidden_bias_regularizer = None\n\n self.kernel_regularizer = None\n self.visible_bias_regularizer = None\n self.hidden_bias_regularizer = None\n\n self.session.run(tf.global_variables_initializer())\n\n def fit(self, x, batch_size=32, nb_epoch=10, verbose=1, validation_data=None, shuffle=False):\n \"\"\"\n Do RBM fitting on provided training set\n :param x: 2d-array, training set\n :param batch_size: int, minibatch size\n :param nb_epoch: int, number of epochs\n :param verbose: 0 for no output, 1 for output per minibatch, 2 for output per epoch\n :param validation_data: 2d-array, validation data (unused right now)\n :param shuffle: boolean, flag to shuffle training data every epoch\n \"\"\"\n if verbose > 0:\n print(\"Fitting RBM on {} samples with {} batch size and {} epochs\".format(len(x), batch_size, nb_epoch))\n\n session_run = [self.update, self.cost_update, self.cost]\n\n if self.kernel_regularizer is not None:\n session_run.append(self.kernel_regularizer)\n\n if self.visible_bias_regularizer is not None:\n session_run.append(self.visible_bias_regularizer)\n\n if self.hidden_bias_regularizer is not None:\n session_run.append(self.hidden_bias_regularizer)\n\n\n samples_num = len(x)\n index_array = np.arange(samples_num)\n\n batches_num = int(len(x) / batch_size) + (1 if len(x) % batch_size > 0 else 0)\n\n for j in range(nb_epoch):\n if verbose > 0:\n self.log(\"Epoch {}/{}\", j + 1, nb_epoch)\n\n if shuffle:\n np.random.shuffle(index_array)\n\n batches = [(i * batch_size, min(samples_num, (i + 1) * batch_size)) for i in range(0, batches_num)]\n free_energy = 0\n self.session.run([tf.local_variables_initializer()])\n for batch_indices in batches:\n batch = x[index_array[batch_indices[0]:batch_indices[1]]]\n\n res = self.session.run(session_run, feed_dict={self.input: batch})\n\n free_energy = res[2]\n\n if verbose == 1:\n self.log('{}/{} free energy: {}'.format(batch_indices[1], len(x), free_energy))\n if verbose > 0:\n self.log('Epoch complete, free energy {}'.format(free_energy))\n\n if verbose > 0:\n self.log('Fitting completed')\n\n def generate(self, x, n=1, sampled=None):\n \"\"\"\n Returns visible state after applying n Gibbs sampling steps\n :param x: 2d-array, visible unit states\n :param n: int, number of Gibbs sampling steps\n :param sampled: boolean, if true, do sampling from units states\n :return: 2d-array, generated visible state\n \"\"\"\n visible, _ = self.burn_in(self.input, n=n, sampled=sampled)\n return self.session.run(visible, feed_dict={self.input: x})\n\n def hidden_state(self, x, sampled=None):\n \"\"\"\n Returns hidden state after applying n Gibbs sampling steps\n :param x: 2d-array, visible unit states\n :param n: int, number of Gibbs sampling steps\n :param sampled: boolean, if true, do sampling from units states\n :return: 2d-array, generated visible state\n \"\"\"\n hidden = self.hidden.call(self.input, self.W, sampled=sampled)\n return self.session.run(hidden, feed_dict={self.input: x})\n\n def log(self, str, *args):\n print(str.format(*args))\n", "id": "6368674", "language": "Python", "matching_score": 2.7806735038757324, "max_stars_count": 7, "path": "harmonium/rbm.py" }, { "content": "import tensorflow as tf\nimport collections\n\ndef sample_bernoulli(probability):\n return tf.where(probability - tf.random_uniform(tf.shape(probability)) > 0.0,\n tf.ones_like(probability), tf.zeros_like(probability))\n\ndef sample_gaussian(mean, stddev=1.0, relu=False):\n sample = mean+tf.random_normal(tf.shape(mean), stddev=stddev)\n if relu:\n return tf.nn.relu(sample)\n else:\n return sample\n\n", "id": "5097335", "language": "Python", "matching_score": 0.27153468132019043, "max_stars_count": 7, "path": "harmonium/core.py" }, { "content": "import datetime\n\nimport gc\nimport tensorflow as tf\nimport skimage.io as io\nimport numpy as np\nfrom tqdm import tqdm\nimport itertools\nimport resnet_model\nfrom im_classes import IM_CLASSES\nfrom PIL import Image\nimport cv2\nimport sys\nimport glob\nimport os\n\nIMAGENET_MEANS = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 3], name='img_mean')\n\n\ndef transform_image(image):\n image = tf.image.resize_images(tf.image.decode_jpeg(tf.read_file(image), channels=3, dct_method='INTEGER_ACCURATE'),\n size=(224, 224))\n return image - IMAGENET_MEANS\n\n\ndef transform_ds(x):\n keys_to_features = {'score': tf.FixedLenFeature([], tf.int64),\n 'mask': tf.FixedLenFeature([], tf.string),\n 'image': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(x, keys_to_features)\n image = transform_image(parsed_features['image'])\n masks = tf.reshape(tf.decode_raw(parsed_features['mask'], tf.int8), shape=[224, 224])\n return {'score': tf.cast(parsed_features['score'], tf.float32), 'mask': masks, 'image': image}\n\n\nclass SharpMask(resnet_model.Model):\n mask_size = 224\n types = {'score': tf.float32, 'mask': tf.int8, 'image': tf.float32}\n shapes = {'score': tf.TensorShape([None]),\n 'mask': tf.TensorShape([None, mask_size, mask_size]),\n 'image': tf.TensorShape([None, mask_size, mask_size, 3])}\n\n def __init__(self, train_path, validation_path, session=None, resnet_ckpt=None, summary_path=None,\n checkpoint_path=None, batch_size=32):\n super(SharpMask, self).__init__(\n resnet_size=50,\n bottleneck=True,\n num_classes=1001,\n num_filters=64,\n kernel_size=7,\n conv_stride=2,\n first_pool_size=3,\n first_pool_stride=2,\n second_pool_size=7,\n second_pool_stride=1,\n block_sizes=[3, 4, 6, 3],\n block_strides=[1, 2, 2, 2],\n final_size=2048,\n version=resnet_model.DEFAULT_VERSION,\n data_format=None,\n dtype=resnet_model.DEFAULT_DTYPE\n )\n if session is None:\n self.sess = tf.Session()\n else:\n self.sess = session\n\n it_structure = tf.data.Iterator.from_structure(self.types, self.shapes)\n self.iterator = it_structure.get_next()\n\n self.image_placeholder = tf.placeholder_with_default(\"\", shape=())\n\n self.image_input = self.iterator['image']\n self.score_target = self.iterator['score']\n self.seg_target = self.iterator['mask']\n\n self.score_placeholder = tf.placeholder_with_default([1.0], (1,))\n self.mask_placeholder = tf.placeholder_with_default(tf.ones((1, self.mask_size, self.mask_size), dtype=tf.int8),\n (1, self.mask_size, self.mask_size))\n dummy_ds = tf.data.Dataset.from_tensor_slices(\n {'image': tf.expand_dims(transform_image(self.image_placeholder), 0),\n 'score': self.score_placeholder, 'mask': self.mask_placeholder}).map(\n lambda x: {'score': tf.expand_dims(x['score'], 0), 'mask': tf.expand_dims(x['mask'], 0),\n 'image': tf.expand_dims(x['image'], 0)})\n self.placeholder_init_op = it_structure.make_initializer(dummy_ds)\n\n if train_path is not None:\n self.train_ds = self._create_dataset(train_path, batch_size)\n self.training_init_op = it_structure.make_initializer(self.train_ds)\n\n if validation_path is not None:\n self.validation_ds = self._create_dataset(validation_path, batch_size)\n self.validation_init_op = it_structure.make_initializer(self.validation_ds)\n\n if summary_path is not None:\n self.summary_writer = tf.summary.FileWriter(summary_path, self.sess.graph)\n\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n\n self.checkpoint_file = os.path.join(checkpoint_path, 'sharpmask.ckpt')\n\n self.resnet_output = self(self.image_input, False)\n\n if resnet_ckpt is not None:\n saver = tf.train.Saver()\n saver.restore(self.sess, resnet_ckpt)\n\n self.block_layers = [self.sess.graph.get_tensor_by_name(\"resnet_model/block_layer{}:0\".format(i + 1)) for i in\n range(4)]\n\n self.training_mode = tf.placeholder_with_default(True, shape=())\n\n with tf.variable_scope(\"deepmask_trunk\"):\n trunk = tf.layers.conv2d(self.block_layers[-1], 512, (1, 1), activation=tf.nn.relu,\n data_format=self.data_format)\n trunk = tf.layers.flatten(trunk)\n trunk = tf.layers.dense(trunk, 512)\n self.sess.run(\n tf.variables_initializer(tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='deepmask_trunk')))\n\n with tf.variable_scope(\"segmentation_branch\"):\n seg_predictions = tf.layers.dense(trunk, 56 * 56)\n seg_predictions = tf.reshape(seg_predictions, [-1, 56, 56, 1])\n self.dm_seg_prediction = tf.squeeze(tf.image.resize_bilinear(seg_predictions, [224, 224]), 3)\n\n self.sess.run(\n tf.variables_initializer(tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='segmentation_branch')))\n\n with tf.variable_scope(\"score_branch\"):\n score_predictions = tf.layers.dropout(trunk, rate=0.5, training=self.training_mode)\n score_predictions = tf.layers.dense(score_predictions, 1024, activation=tf.nn.relu)\n score_predictions = tf.layers.dropout(score_predictions, rate=0.5, training=self.training_mode)\n self.score_predictions = tf.layers.dense(score_predictions, 1, name='score_out')\n\n self.sess.run(\n tf.variables_initializer(tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='score_branch')))\n\n #self.saver = tf.train.Saver()\n\n k = 32\n with tf.variable_scope(\"refinement\"):\n M = tf.layers.dense(trunk, k*7*7, name='vertical_0')\n M = tf.reshape(M, [-1, k, 7, 7]) if self.data_format == \"channels_first\" else tf.reshape(M, [-1, 7, 7, k])\n\n for i in range(1, 5):\n ki = int(k/2**(i-1))\n knext = int(ki/2)\n\n F = self.block_layers[4-i]\n S = tf.layers.conv2d(F, 64 if i < 4 else 32, (3, 3), padding='SAME',\n activation=tf.nn.relu, data_format=self.data_format,\n name='horizontal_{}_64'.format(i))\n\n S = tf.layers.conv2d(S, ki, (3, 3), padding='SAME',\n activation=tf.nn.relu, data_format=self.data_format,\n name='horizontal_{}_{}'.format(i, ki))\n S = tf.layers.conv2d(S, knext, (3, 3), padding='SAME', data_format=self.data_format,\n name='horizontal_{}_{}'.format(i, knext))\n\n M = tf.layers.conv2d(M, k/2**(i-1), (3, 3), padding='SAME',\n activation=tf.nn.relu, data_format=self.data_format,\n name='vertical_{}_{}'.format(i, ki))\n M = tf.layers.conv2d(M, knext, (3, 3), padding='SAME', data_format=self.data_format,\n name='vertical_{}_{}'.format(i, knext))\n\n M = tf.nn.relu(S + M)\n if self.data_format == \"channels_first\":\n M = tf.transpose(M, perm=[0, 2, 3, 1])\n M = tf.image.resize_bilinear(M, [M.shape[1] * 2, M.shape[2] * 2])\n M = tf.transpose(M, perm=[0, 3, 1, 2])\n else:\n M = tf.image.resize_bilinear(M, [M.shape[1] * 2, M.shape[2] * 2])\n\n refinement_out = tf.layers.conv2d(M, 1, (3, 3), padding='SAME',\n data_format=self.data_format, name='refinement_out')\n if self.data_format == \"channels_first\":\n refinement_out = tf.transpose(refinement_out, perm=[0, 2, 3, 1])\n\n refinement_out = tf.image.resize_bilinear(refinement_out,\n [refinement_out.shape[1] * 2, refinement_out.shape[2] * 2])\n refinement_out = tf.squeeze(refinement_out, axis=3)\n self.refinement_prediction = refinement_out\n\n\n self.sess.run(\n tf.variables_initializer(tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='refinement')))\n\n with tf.variable_scope(\"metrics\"):\n score_metric_prediction = tf.where(self.score_predictions > 0.0,\n tf.ones_like(self.score_predictions),\n -tf.ones_like(self.score_predictions))\n self.score_accuracy_metric, self.score_accuracy_update = tf.metrics.accuracy(self.score_target,\n score_metric_prediction)\n\n self.dm_seg_iou_metric, self.dm_seg_iou_update = self._create_seg_metrics(self.dm_seg_prediction)\n self.sm_seg_iou_metric, self.sm_seg_iou_update = self._create_seg_metrics(\n self.refinement_prediction)\n\n self.saver = tf.train.Saver()\n\n def restore(self):\n self.saver.restore(self.sess, self.checkpoint_file)\n\n def fit_deepmask(self, epochs=25, lr=0.001, score_factor=1.0 / 32, weight_decay=0.00005):\n with tf.variable_scope(\"deepmask_training\"):\n score_loss, segmentation_loss = self._binary_regression_loss(self.dm_seg_prediction,\n score_factor=score_factor)\n\n lr_var = tf.constant(lr) # tf.train.inverse_time_decay(lr, global_step, 1,weight_decay)\n weight_loss, weight_vars = self._weight_decay()\n weight_decay_opt = tf.train.GradientDescentOptimizer(learning_rate=weight_decay)\n weight_decay_opt_op = weight_decay_opt.minimize(weight_loss, var_list=weight_vars)\n opt = tf.train.MomentumOptimizer(learning_rate=lr_var, momentum=0.9, use_nesterov=True)\n opt_op = opt.minimize(segmentation_loss+score_loss)\n\n self.sess.run(\n tf.variables_initializer(tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='deepmask_training')))\n\n self._fit_cycle(epochs, lr_var,\n progress_ops_dict={'segmentation_loss': segmentation_loss, 'score_loss': score_loss,\n 'segmentation_iou': self.dm_seg_iou_metric,\n 'score_accuracy': self.score_accuracy_metric},\n opt_ops=[opt_op, weight_decay_opt_op],\n metric_update_ops=[self.dm_seg_iou_update, self.score_accuracy_update])\n\n print('Deep mask fit cycle completed')\n\n def fit_sharpmask(self, epochs=25, lr=0.001, weight_decay=0.00005):\n with tf.variable_scope(\"sharpmask_training\"):\n _, segmentation_loss = self._binary_regression_loss(self.refinement_prediction)\n\n global_step = tf.Variable(initial_value=0)\n lr_var = tf.constant(lr)\n\n segmentation_opt = tf.train.MomentumOptimizer(learning_rate=lr_var, momentum=0.9, use_nesterov=True)\n segmentation_opt_op = segmentation_opt.minimize(segmentation_loss, global_step=global_step,\n var_list=tf.get_collection(\n key=tf.GraphKeys.GLOBAL_VARIABLES,\n scope='refinement'))\n\n self.sess.run(\n tf.variables_initializer(tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope='sharpmask_training')))\n\n self._fit_cycle(epochs, lr_var,\n progress_ops_dict={'segmentation_loss': segmentation_loss,\n 'segmentation_iou': self.sm_seg_iou_metric},\n opt_ops=[segmentation_opt_op],\n metric_update_ops=[self.sm_seg_iou_update])\n\n print('Sharp mask fit cycle completed')\n\n def deepmask_validation(self):\n self._run_validation({'segmentation_iou': self.dm_seg_iou_metric, 'score_accuracy': self.score_accuracy_metric},\n metric_update_ops=[self.dm_seg_iou_update, self.score_accuracy_update])\n\n def sharpmask_validation(self):\n self._run_validation({'segmentation_iou': self.sm_seg_iou_metric},\n metric_update_ops=[self.sm_seg_iou_update])\n\n def eval_sharpmask(self, eval_source, eval_target):\n self._eval_prediction(eval_source, eval_target, self.refinement_prediction)\n\n def eval_deepmask(self, eval_source, eval_target):\n self._eval_prediction(eval_source, eval_target, self.dm_seg_prediction)\n\n def _create_seg_metrics(self, seg_predictions):\n mask_indices = tf.where(self.score_target > 0)\n seg_metric_prediction = tf.gather(seg_predictions, mask_indices)\n seg_metric_prediction = tf.where(seg_metric_prediction > 0.0, tf.ones_like(seg_metric_prediction),\n tf.zeros_like(seg_metric_prediction))\n seg_mask = tf.gather(self.seg_target, mask_indices)\n seg_mask = tf.where(seg_mask > 0, tf.ones_like(seg_mask), tf.zeros_like(seg_mask))\n return tf.metrics.mean_iou(seg_mask, seg_metric_prediction, 2)\n\n def _eval_prediction(self, eval_source, eval_target, seg_predictions, threshold=-1.0):\n self.sess.run([self.placeholder_init_op],\n feed_dict={self.image_placeholder: eval_source, self.training_mode: False})\n score_predictions, seg_predictions = self.sess.run([self.score_predictions, seg_predictions])\n\n print('Predicted score is {}'.format(score_predictions[0]))\n\n eval_image = io.imread(eval_source)\n mask = np.where(seg_predictions[0] > threshold, 255, 0)\n mask = np.expand_dims(mask, axis=2).astype(np.uint8)\n mask = cv2.resize(mask, (eval_image.shape[1], eval_image.shape[0]))\n mask = Image.fromarray(mask)\n mask = mask.convert('RGB')\n\n eval_image = Image.fromarray(eval_image)\n eval_image = eval_image.convert('RGB')\n\n target_img = Image.blend(eval_image, mask, 0.5)\n target_img.save(eval_target)\n\n print('Image with the mask applied stored at {}'.format(eval_target))\n\n def _eval_resnet(self, eval_source):\n self.sess.run([self.placeholder_init_op], feed_dict={self.image_placeholder: eval_source})\n prediction = self.sess.run([self.resnet_output])\n return IM_CLASSES[np.argmax(prediction[0])]\n\n def _create_dataset(self, data_path, batch_size):\n tfrecord_files = glob.glob(os.path.join(data_path, '*.tfrecord'))\n dataset = tf.data.TFRecordDataset(tfrecord_files, buffer_size=1572864000)\n dataset = dataset.shuffle(20000)\n dataset = dataset.map(transform_ds, num_parallel_calls=20)\n dataset = dataset.batch(32)\n\n return dataset\n\n def _binary_regression_loss(self, seg_predictions, score_factor=1.0 / 32):\n mask_target = tf.cast(self.seg_target, tf.float32)\n segmentation_loss = tf.reduce_mean(\n (1.0 + self.score_target) / 2.0 * tf.reduce_mean(tf.log(1.0 + tf.exp(-seg_predictions * mask_target)),\n axis=[1, 2]))\n score_loss = tf.reduce_mean(tf.log(1.0 + tf.exp(-self.score_target * self.score_predictions))) * score_factor\n return score_loss, segmentation_loss\n\n def _weight_decay(self, scopes=['deepmask_trunk', 'segmentation_branch', 'score_branch']):\n weights = list(itertools.chain(*[tf.get_collection(key=tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) for scope in scopes]))\n weights = list(filter(lambda x: 'kernel' in x.name, weights))\n weights_norm = tf.reduce_sum(input_tensor=tf.stack([tf.nn.l2_loss(i) for i in weights]),\n name='weights_norm')\n\n return weights_norm, weights\n\n def _run_validation(self, progress_ops_dict, metric_update_ops, validation_steps_count=None):\n progress_ops_names, progress_ops = zip(*progress_ops_dict.items())\n progress_ops = list(progress_ops)\n\n validation_ops = metric_update_ops + progress_ops\n\n pbar = tqdm(total=validation_steps_count, desc='Validation', file=sys.stdout)\n counter = 0\n\n self.sess.run(tf.local_variables_initializer())\n self.sess.run(self.validation_init_op)\n\n while True:\n try:\n progress = self.sess.run(validation_ops, feed_dict={self.training_mode: False})[-len(progress_ops):]\n counter += 1\n pbar.update()\n pbar.set_description('Validation ({})'.format(\n ', '.join(['{}={}'.format(name, val) for name, val in zip(progress_ops_names, progress)])))\n except tf.errors.OutOfRangeError as oe:\n break\n\n result = {name: value for name, value in zip(progress_ops_names, progress)}\n result['total_steps'] = counter\n\n return result\n\n def _fit_cycle(self, epochs, lr_var, progress_ops_dict, opt_ops, metric_update_ops):\n progress_ops_names, progress_ops = zip(*progress_ops_dict.items())\n training_ops = opt_ops + metric_update_ops + list(progress_ops)\n\n train_steps_per_epoch = None\n validation_steps_per_epoch = None\n\n for e in range(epochs):\n tic = datetime.datetime.now()\n lr = self.sess.run([lr_var, self.training_init_op, tf.local_variables_initializer()])[0]\n\n print()\n tqdm.write(\"----- Epoch {}/{} ; learning rate {} -----\".format(e + 1, epochs, lr))\n pbar = tqdm(total=train_steps_per_epoch, desc='Training', file=sys.stdout)\n train_steps_per_epoch = 0\n\n while True:\n try:\n progress = self.sess.run(training_ops)[-len(progress_ops):]\n pbar.update()\n pbar.set_description('Training ({})'.format(\n ', '.join(['{}={}'.format(name, val) for name, val in zip(progress_ops_names, progress)])))\n train_steps_per_epoch += 1\n except tf.errors.OutOfRangeError as oe:\n break\n\n del pbar\n validation_results = self._run_validation(progress_ops_dict, metric_update_ops, validation_steps_per_epoch)\n training_report = ', '.join(\n ['Training {}={}'.format(name, val) for name, val in zip(progress_ops_names, progress)])\n validation_report = ', '.join(\n ['Validation {}={}'.format(name, val) for name, val in validation_results.items()])\n validation_steps_per_epoch = validation_results['total_steps']\n self.saver.save(self.sess, self.checkpoint_file)\n gc.collect()\n toc = datetime.datetime.now()\n tqdm.write(\n \"----- Epoch {} finished in {} -- {}. {}\".format(e + 1, toc - tic, training_report, validation_report))\n", "id": "4120958", "language": "Python", "matching_score": 3.634791135787964, "max_stars_count": 35, "path": "sharpmask.py" }, { "content": "from collections import deque\n\nfrom PIL import Image, ImageDraw\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom pycocotools.coco import COCO\nfrom tqdm import tqdm\nimport sys\nimport os\nimport argparse\n\n\ndef _int_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\nclass RecordCreator(object):\n def __init__(self, data_path, max_file_size=150000):\n self.data_path = data_path\n self.max_file_size = max_file_size\n\n def create_data(self, target_dir, data_type):\n coco = COCO('{}/annotations/instances_{}.json'.format(self.data_path, data_type))\n\n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n\n file_pattern = os.path.join(target_dir, 'coco_part{}.tfrecord')\n\n tfrecord_id = 0\n imgIds = coco.getImgIds()\n writer = tf.python_io.TFRecordWriter(file_pattern.format(tfrecord_id))\n\n balance = 0\n total_samples = 0\n pbar = tqdm(total=len(imgIds), desc='Creating record file')\n\n negative_queue = deque()\n no_anns = 0\n\n for i, id in enumerate(imgIds):\n img = coco.loadImgs(id)[0]\n if int((i + 1) % self.max_file_size) == 0:\n tfrecord_id += 1\n tfrecord_file = file_pattern.format(tfrecord_id)\n tqdm.write('Creating new tfrecord file id {}, name {}'.format(tfrecord_id, tfrecord_file))\n writer = tf.python_io.TFRecordWriter(tfrecord_file)\n\n im_path = '{}/images/{}/{}'.format(self.data_path, data_type, img['file_name'])\n\n annIds = coco.getAnnIds(imgIds=[id], iscrowd=0)\n anns = coco.loadAnns(annIds)\n score = 0\n if len(anns) == 0:\n no_anns += 1\n\n for ann in anns:\n score = self.get_score(ann, img)\n if score > 0:\n mask = Image.new('F', (img['width'], img['height']), color=-1)\n segs = list(zip(*[iter(ann['segmentation'][0])] * 2))\n ImageDraw.Draw(mask).polygon(segs, outline=1, fill=1)\n mask = np.asarray(mask)\n mask = cv2.resize(mask, (224, 224))\n mask = np.where(mask == -1.0, -1, 1).astype(np.int8)\n\n feature = {'score': _int_feature(score),\n 'image': _bytes_feature(im_path.encode()),\n 'mask': _bytes_feature(mask.tostring())}\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n writer.write(example.SerializeToString())\n total_samples += 1\n balance += score\n break\n\n\n\n if score < 0:\n feature = {'score': _int_feature(score),\n 'image': _bytes_feature(im_path.encode()),\n 'mask': _bytes_feature(mask.tostring())}\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n negative_queue.append(example)\n\n while balance > 0 and len(negative_queue) > 0:\n example = negative_queue.pop()\n writer.write(example.SerializeToString())\n total_samples += 1\n balance -= 1\n\n pbar.update()\n pbar.set_description(\n 'Creating record file (total samples created={}, balance={})'.format(total_samples, balance))\n\n print(i)\n tqdm.write('tfrecord file created, total samples {}, balance {}, {} images without annotation'.format(total_samples, balance, no_anns))\n\n def get_score(self, ann, img):\n ann_ratio = ann['area'] / (img['width'] * img['height'])\n\n ann_center = (int(ann['bbox'][0] + ann['bbox'][2] / 2), int(ann['bbox'][1] + ann['bbox'][3] / 2))\n ann_center_bounds = (range(int(img['width'] / 4), int(img['width'] - img['width'] / 4)),\n range(int(img['height'] / 4), int(img['height'] - img['height'] / 4)))\n ann_centered = ann_center[0] in ann_center_bounds[0] and ann_center[1] in ann_center_bounds[1]\n\n ann_br = (int(ann['bbox'][0] + ann['bbox'][2]), int(ann['bbox'][1] + ann['bbox'][3]))\n ann_fully_contained = ann['bbox'][0] > 0 and ann['bbox'][1] > 0 and \\\n ann_br[0] < img['width'] and ann_br[1] < img['height']\n\n return 1 if ann['iscrowd'] == 0 and ann_ratio > 0.05 and ann_centered and ann_fully_contained else -1\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Use this util to prepare tfrecord files before training DeepMask/SharpMask')\n\n parser.add_argument('--coco_path', action='store', dest='coco_path',\n help='A path to downloaded and unzipped coco dataset', required=True)\n parser.add_argument('--train_path', action=\"store\", dest=\"train_path\",\n help='A path to folder where to put train set tfrecord files', required=True)\n parser.add_argument('--validation_path', action=\"store\", dest=\"validation_path\",\n help='A path to folder where to put validation set tfrecord files', required=True)\n parser.add_argument('--max_per_file', action=\"store\", dest=\"max_per_file\",\n type=int, default=70000, help='Max number of samples per single tfrecord file')\n\n params = parser.parse_args(sys.argv[1:])\n rc = RecordCreator(data_path=params.coco_path)\n print('Preparing validation data')\n rc.create_data(params.validation_path, 'val2017')\n\n print('Preparing train data')\n rc.create_data(params.train_path, 'train2017')\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "id": "8353856", "language": "Python", "matching_score": 3.068716287612915, "max_stars_count": 35, "path": "prepare_data.py" }, { "content": "import sys\nimport argparse\nfrom sharpmask import SharpMask\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='DeepMask/SharpMask TensorFlow implementation')\n\n parser.add_argument('--model', action=\"store\", dest=\"model\", default='all',\n choices=['all', 'deepmask', 'sharpmask'],\n help='The model: deepmask, sharpmask or both',\n required=False)\n\n parser.add_argument('--restore', action=\"store_true\", dest=\"restore\",\n help='Restore model from checkpoint', required=False)\n\n parser.add_argument('--train', action=\"store_true\", dest=\"train\",\n help='Run the model training. For sharpmask model only refinement layers training is performed.',\n required=False)\n\n parser.add_argument('--validate', action=\"store_true\", dest=\"validate\",\n help='Run explicit validation of selected model on validation set provided in the validation_path argument',\n required=False)\n\n parser.add_argument('--evaluate', action=\"store_true\", dest=\"evaluate\",\n help='Evaluate the model applying mask on an image provided by the eval_source argument',\n required=False)\n\n parser.add_argument('--train_path', action=\"store\", dest=\"train_path\",\n help='A path to folder containing train set tfrecord files', required=False)\n\n parser.add_argument('--validation_path', action=\"store\", dest=\"validation_path\",\n help='A path to folder containing validation set tfrecord files', required=False)\n\n parser.add_argument('--resnet_ckpt', action=\"store\", dest=\"resnet_ckpt\",\n help='A path to pretrained resnet-50 checkpoint', required=False)\n\n parser.add_argument('--summary_path', action=\"store\", dest=\"summary_path\", help='A path to store model summary',\n required=False)\n\n parser.add_argument('--checkpoint_path', action=\"store\", dest=\"checkpoint_path\",\n help='A path to store model checkpoint',\n required=False)\n\n parser.add_argument('--eval_source', action=\"store\", dest=\"eval_source\", help='Source image for evalutation',\n required=False)\n\n parser.add_argument('--eval_target', action=\"store\", dest=\"eval_target\",\n help='Target file name for image with applied mask', required=False)\n\n params = parser.parse_args(sys.argv[1:])\n\n if not (params.train or params.validate or params.evaluate):\n print('To run the model at least one option from train, validate or evaluate should be chosen.',\n file=sys.stderr)\n parser.print_help(sys.stderr)\n sys.exit(-1)\n\n model = SharpMask(train_path=params.train_path, validation_path=params.validation_path,\n resnet_ckpt=params.resnet_ckpt, summary_path=params.summary_path,\n checkpoint_path=params.checkpoint_path)\n\n if params.restore:\n model.restore()\n\n if params.train:\n if params.model == 'deepmask' or params.train == 'all':\n model.fit_deepmask()\n if params.model == 'sharpmask' or params.model == 'all':\n model.fit_sharpmask()\n\n if params.validate:\n if params.model == 'deepmask':\n model.deepmask_validation()\n if params.model == 'sharpmask':\n model.sharpmask_validation()\n\n if params.evaluate:\n if params.model == 'deepmask':\n model.eval_deepmask(params.eval_source, params.eval_target)\n if params.model == 'sharpmask':\n model.eval_sharpmask(params.eval_source, params.eval_target)\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "id": "6688034", "language": "Python", "matching_score": 0.023149436339735985, "max_stars_count": 35, "path": "run_model.py" }, { "content": "import numpy as np\nfrom PIL import Image\n\n\ndef tile_weights(X, img_shape, tile_shape, tile_spacing=(0, 0)):\n columns = img_shape[0]*img_shape[1]\n rows = tile_shape[0]*tile_shape[1]\n shape = X.shape\n data = np.pad(X, ((0, rows-shape[0]), (0, columns-shape[1])), mode='constant', constant_values=((0, 0), (0, 0)))\n out = np.zeros(((img_shape[0]+tile_spacing[0])*tile_shape[0]-tile_spacing[0], (img_shape[1]+tile_spacing[1])*tile_shape[1]-tile_spacing[1]))\n for x, y in np.ndindex(tile_shape):\n img = data[y*tile_shape[0]+x].reshape(img_shape)\n out[x*(img_shape[0]+tile_spacing[0]):x*(img_shape[0]+tile_spacing[0])+img_shape[0],\n y * (img_shape[1] + tile_spacing[1]):y * (img_shape[1] + tile_spacing[1]) + img_shape[1]]=img\n return out.astype(np.uint8)\n\n\ndef save_weights(file, weights, shape=None, tile=None, spacing=(1,1)):\n \"\"\"\n Saves weights as tiled image, where each tile represents hidden units weight. If number of hidden units or number\n of visible units doesn't fit into shape do automatic padding with black pixels. Automatically scales weights to\n (0,255) range.\n :param file: string, file name to save image\n :param weights: 2d-array, weights matrix\n :param shape: tuple, image shape\n :param tile: tiles shape\n :param spacing: spacing between tiles\n \"\"\"\n weights = np.transpose(weights)\n current_min = weights.min()\n current_max = weights.max()\n weights = 255 * (weights - current_min) / (current_max - current_min)\n image = Image.fromarray(\n tile_weights(\n X=weights,\n img_shape=shape,\n tile_shape=tile,\n tile_spacing=spacing\n )\n )\n image.save(file)\n\n\ndef save_hidden_state(file, hidden_state):\n \"\"\"\n Just save hidden_state matrix to file as image\n :param file: string, file name to save image\n :param hidden_state: 2d array, hidden unit states across batch\n \"\"\"\n hidden_state = np.transpose(hidden_state)\n current_min = hidden_state.min()\n current_max = hidden_state.max()\n hidden_state = 255 * (hidden_state - current_min) / (current_max - current_min)\n image = Image.fromarray(hidden_state.astype(np.uint8))\n image.save(file)\n", "id": "7457224", "language": "Python", "matching_score": 1.0874255895614624, "max_stars_count": 7, "path": "harmonium/rbm_utils.py" }, { "content": "import tensorflow as tf\n\nclass CD(object):\n def __init__(self, model, n=1, lr=0.1, momentum=None):\n self.model = model\n self.n = n\n self.lr = lr\n self.momentum = momentum\n\n def get_cost_update(self, sample):\n positive_visible_state = sample\n positive_hidden_state = self.model.hidden.call(positive_visible_state, self.model.W)\n negative_visible_state, negative_hidden_state = self.sample_negative(positive_visible_state, positive_hidden_state)\n data_energy = self.model.energy(positive_visible_state, positive_hidden_state, scope='data_energy')\n model_energy = self.model.energy(negative_visible_state, negative_hidden_state, scope='model_energy')\n loss = tf.subtract(data_energy, model_energy, name='loss')\n\n if self.momentum:\n self.optimizer = tf.train.MomentumOptimizer(self.lr, self.momentum)\n else:\n self.optimizer = tf.train.GradientDescentOptimizer(self.lr, name='optimizer')\n\n update = self.optimizer.minimize(loss)\n return [data_energy, update]\n\n def sample_negative(self, visible, hidden):\n return self.model.burn_in(visible, hidden_state=hidden, n=self.n)\n\n\nclass PCD(CD):\n def __init__(self, model, n=1, lr=0.1, momentum=None):\n self.visible_negative = None\n self.hidden_negative = None\n\n super(PCD, self).__init__(model, n, lr, momentum)\n\n def sample_negative(self, visible, hidden):\n if self.visible_negative is None:\n self.visible_negative = visible\n\n\n [visible_negative, hidden_negative] = self.model.burn_in(self.visible_negative, hidden_state=self.hidden_negative, n=self.n)\n self.visible_negative = visible_negative\n self.hidden_negative = hidden_negative\n return [visible_negative, hidden_negative]\n\ndef cd(n=1, lr=0.1, momentum=None):\n \"\"\"\n Creates contrastive divergence optimizer\n :param lr: float, learning rate\n :param n: int, number of Gibbs sampling steps\n :return: contrastive divergence optimizer\n \"\"\"\n return lambda model: CD(model, n=n, lr=lr, momentum=momentum)\n\ndef pcd(n=1, lr=0.1, momentum=None):\n \"\"\"\n Creates contrastive divergence optimizer\n :param lr: float, learning rate\n :param n: int, number of Gibbs sampling steps\n :return: contrastive divergence optimizer\n \"\"\"\n return lambda model: PCD(model, n=n, lr=lr, momentum=momentum)", "id": "2049595", "language": "Python", "matching_score": 0.8883467316627502, "max_stars_count": 7, "path": "harmonium/optimizers.py" }, { "content": "import tensorflow as tf\n\nclass Regularizer(object):\n def __init__(self, l):\n self.l = l\n\n def __call__(self, model, learnable):\n raise NotImplementedError\n\n\nclass L2(Regularizer):\n \"\"\"\n Creates L2 regularizer\n :param l: regularization coefficient\n :return:\n \"\"\"\n def __init__(self, l):\n super(L2, self).__init__(l)\n\n def __call__(self, model, learnable):\n return learnable.assign_add(- self.l * learnable)\n\n\nclass L1(Regularizer):\n \"\"\"\n Creates L1 regularizer\n :param l: regularization coefficient\n :return:\n \"\"\"\n def __init__(self, l):\n super(L1, self).__init__(l)\n\n def __call__(self, model, learnable):\n return learnable.assign(tf.where(tf.abs(learnable) > self.l, learnable - self.l * tf.sign(learnable),\n tf.zeros(tf.shape(learnable))))\n\n\nclass SparsityTarget(Regularizer):\n def __init__(self, l, p):\n \"\"\"\n Creates sparsity target regularizer\n :param l: regularization coefficient\n :param p: sparsity target\n :return:\n \"\"\"\n super(SparsityTarget, self).__init__(l)\n self.p = p\n\n def __call__(self, model, learnable):\n q = tf.reduce_mean(model.hidden.call(model.input, model.W), 0)\n return learnable.assign(tf.add(learnable, self.l * (self.p-q)))", "id": "4038718", "language": "Python", "matching_score": 0.4346286356449127, "max_stars_count": 7, "path": "harmonium/regularizers.py" } ]
0.987886
fojek
[ { "content": "import os\nimport time\n\ni = 1\n\nprint 'Module video demarre.'\n\nwhile 1:\n\t# Film de 5 minutes 300000\n\tvideo = 'raspivid -t 300000 -o ' + str(i) + '.h264'\n\tos.system(video)\n\n\tphoto = 'raspistill -o ' + str(i) + '.jpg'\n\tos.system(photo)\t\n\n\ttime.sleep(2)\n\t\n\tif(i<5):\n\t\ti += 1\n\telse:\n\t\ti = 1\n", "id": "585894", "language": "Python", "matching_score": 1.004078984260559, "max_stars_count": 0, "path": "vid.py" }, { "content": "import os,sys,time\n\ndef enigme():\n\tprint \"C'est plus grand que Dieu,\"\n\tprint \"Et plus mechant que le diable.\"\n\tprint \"Les pauvres l'ont; les riches en ont besoin.\"\n\tprint \"Si vous le mangez, vous mourrez.\"\n\tprint \n\tprint \"Qu'est-ce que c'est?\"\n\tprint\n\ttest = raw_input(\"Reponse: \")\n\n\ttime.sleep(1)\n\t\n\treturn test\n\nos.system(\"clear\")\ndelais = 5\nwhile(True):\n\ttest = enigme()\n\tif(test == 'rien' or test == 'Rien'):\n\t\tprint\n\t\tprint\n\t\tprint\t\n\t\twhile(True):\n\t\t\tos.system(\"clear\")\n\t\t\ttest = raw_input(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\tRYTHME MONTREAL\")\n\telse:\n\t\ttempsrestant = delais\n\t\twhile(tempsrestant > 0):\n\t\t\tos.system(\"clear\")\n\t\t\tprint '\\n\\n\\n\\t\\tErreur.\\n\\n\\t\\tReinitialisation dans ', tempsrestant,' secondes.'\n\t\t\ttime.sleep(1)\n\t\t\ttempsrestant -= 1\n\t\t\tos.system(\"clear\")\n\t\tdelais += 10\n\t\tos.system(\"clear\")\n", "id": "9002271", "language": "Python", "matching_score": 0.09164939820766449, "max_stars_count": 0, "path": "escape.py" }, { "content": "#!/usr/bin/python\n\nimport Adafruit_BMP.BMP085 as BMP085\nimport sqlite3 as lite\nimport sys\nimport time\nimport os \nimport datetime\n\nsensor = BMP085.BMP085()\n\ndef insereDB(t, p, a, sa):\n\tcon = lite.connect('sputnik.db')\n\tcur = con.cursor() \n\t\n\trequete = 'create table courant (altitue'\n\t\n\trequete = 'insert into science (time, temperature, pression, altitude, sl_pres) values (\"' + str(datetime.datetime.now()) + '\",' + str(t) + ',' + str(p) + ',' + str(a) + ',' + str(sa) + ');'\n\n\tcur.execute(requete)\n\n\tcon.commit()\n\t\n\n\nprint 'Science Monsta pret.'\n\n# Boucle principale\nwhile 1:\n \n\t\ttemp = sensor.read_temperature()\n\t\tpres = sensor.read_pressure()\n\t\talt = sensor.read_altitude()\n\t\tsl_pres = sensor.read_sealevel_pressure()\n\t\t\n\t\tinsereDB(temp, pres, alt, sl_pres)\n\t\ttime.sleep(5)\n", "id": "221408", "language": "Python", "matching_score": 1.9203370809555054, "max_stars_count": 0, "path": "scMonsta.py" }, { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sqlite3 as lite\nimport sys\n\ncon = None\n\ntry:\n con = lite.connect('sputnik.db')\n \n time = 13\n latitude = 25.13324\n longitude = -28.45544\n \n cur = con.cursor() \n \n for i in range(0, 5):\n\t\t\n\t\ttime += 1\n\t\tlatitude += 0.5\n\t\tlongitude -= 0.5\n\t\t\n\t\trequete = 'insert into position (time, latitude, longitude) values (' + str(time) + ',' + str(latitude) + ',' + str(longitude) + ');' \n \n\t\tcur.execute(requete)\n\t\t\n\t\t#cur = con.cursor() \n\t\tcur.execute('SELECT * from position')\n\t\t\n\t\tdata = cur.fetchall()\n\t\t\n\t\tprint \"===========\"\n\t\tfor row in data:\n\t\t\tfor col in row:\n\t\t\t\tprint col\n \nexcept lite.Error, e:\n \n print \"Error %s:\" % e.args[0]\n sys.exit(1)\n \nfinally:\n \n if con:\n\t\tcon.commit()\n\t\tcon.close()\n", "id": "12690457", "language": "Python", "matching_score": 1.8646689653396606, "max_stars_count": 0, "path": "testSQLite.py" }, { "content": "# Pinout pour le GPS / RPi:\n# 1 2\n# 3 4\n# 5 6\n# 7 8\n# 9 10\n#\n# Vcc sur pin 1 (3.3V)\n# Gnd sur pin 9 (Gnd)\n# Rxd sur pin 8 (Txd)\n# Txd sur pin 10 (Rxd)\n\nimport serial\nimport sqlite3 as lite\nimport sys\nimport gammu\nimport time\nimport os \nimport datetime\n\ndef insereDB(test):\n\tcon = lite.connect('sputnik.db')\n\tcur = con.cursor() \n\t\n\tif(test == True):\n\t\ttime = str(infoGPS['heure']) + ':' + str(infoGPS['minute']) + ':' + str(infoGPS['seconde'])\n\t\trequete = 'insert into position (time, latitude, longitude, altitude) values (\"' + time + '\",' + str(infoGPS['lat_angle']) + ',' + str(infoGPS['lon_angle']) + \",\" + str(altitude()) + ');' \n\telse:\n\t\trequete = 'insert into position (time, altitude) values (\"' + str(datetime.datetime.now()) + '\",' + str(altitude()) + ');'\n\n\tprint requete\n\tcur.execute(requete)\n\n\trequete = 'select time from position;'\n\n\tresultat = cur.execute(requete)\n\t\n\tnb = 0\t\n\n\tfor row in resultat:\n\t\tnb += 1\n\t\n\tprint 'Nombre d enregistrements : ' + str(nb)\n\n\tcon.commit()\n\t\n# Retourne la derniere altitude connue\ndef altitude():\n\tcon = lite.connect('sputnik.db')\n\tcur = con.cursor() \n\trequete = 'select altitude from science order by time desc;'\n\tresultat = cur.execute(requete)\n\tresult = list()\n\tfor row in resultat:\n\t\tresult.append(row)\n\t\t\n\treturn result[0][0]\n\ndef getInfo( str ):\n\n # Separe aux ','\n infos = str.split(',')\n\n # Verifie la validite des coordonnees\n if len(infos[2]) == 0:\n return 0\n \n # Heure\n infoGPS['heure'] = int(infos[1][0:2])\n infoGPS['minute'] = int(infos[1][2:4])\n infoGPS['seconde'] = int(infos[1][4:6])\n\n # Latitude, longitude, altitude\n infoGPS['lat_angle'] = infos[2]\n infoGPS['lat_coord'] = infos[3]\n infoGPS['lon_angle'] = infos[4]\n infoGPS['lon_coord'] = infos[5]\n infoGPS['altitude'] = infos[11]\n\n return 1\n\n### Video init (background process)\nos.system(\"python vid.py &\")\n\n### ScienceMonsta (background process)\nos.system(\"python scMonsta.py &\")\n\n### GPS init\ngps = serial.Serial('/dev/ttyAMA0',9600, timeout=5)\n\ninfoGPS = { 'heure' : 0,\n 'minute' : 0,\n 'seconde' : 0,\n 'lat_angle' : 0,\n 'lat_coord' : '',\n 'lon_angle' : 0,\n 'lon_coord' : '',\n 'altitude' : 0}\n \nif not gps.isOpen():\n\tprint 'Erreur : impossible d ouvrir le port serie.'\n\texit()\n\nprint 'Connection GPS etablie.'\n\n### 3G init\n# Create object for talking with phone\nstate_machine = gammu.StateMachine()\n\n# Load config file\nstate_machine.ReadConfig()\n\n# Connect to the phone\nstate_machine.Init()\n\nmessage = {\n 'Text': 'Sputnik 11 pret au decollage.',\n 'SMSC': {'Location': 1},\n 'Number': '18195708580',\n}\n\nstate_machine.SendSMS(message)\n\n# Boucle principale\nwhile 1:\n data = gps.readline()\n \n if len(data) > 6:\n\n type = data[0:6]\n \n if type == \"$GPGGA\":\n if(getInfo(data[6:len(data)])):\n print 'Nouvelles coordonnees : '\n print infoGPS\n insereDB(True)\n message['Text'] = 'Position : ' + infoGPS['lat_angle'] + ',' + infoGPS['lon_angle'] + ', altitude : ' + str(altitude)\n state_machine.SendSMS(message)\n time.sleep(30)\n else:\n\t\t\t\tprint 'Pas de connection satellite.'\n\t\t\t\tinsereDB(False)\n\t\t\t\ttime.sleep(30)\n", "id": "9562355", "language": "Python", "matching_score": 3.151928663253784, "max_stars_count": 0, "path": "GPS.py" }, { "content": "#!/usr/bin/env python\n# Sample script to show how to send SMS\n\nfrom __future__ import print_function\nimport gammu\nimport sys\n\n# Create object for talking with phone\nstate_machine = gammu.StateMachine()\n\n# Load config file\nstate_machine.ReadConfig()\n\n# Connect to the phone\nstate_machine.Init()\n\n# Prepare message data\n# We tell that we want to use first SMSC number stored in phone\nmessage = {\n 'Text': 'Position: ' + lat + ',' + lon,\n 'SMSC': {'Location': 1},\n 'Number': '18195708580',\n}\n\n# Actually send the message\nstate_machine.SendSMS(message)\n\nwhile True:\n\ttest = state_machine.GetNextSMS(1, 0, 1)\n\t\n\tif test:\n\t\tprint(test)\n\t\tstate_machine.DeleteSMS(0)\n\t\n\t\n", "id": "8507387", "language": "Python", "matching_score": 1.287919521331787, "max_stars_count": 0, "path": "sms.py" }, { "content": "# commKoyo.py\n# ------------\n# Gestion de la communication avec Koyo, via libkoyo\n# ------------\n# \n# A faire :\n# - Ajouter la fonction d'ecriture des sorties\n#\n# ------------\n\n# Importation de la librarie libkoyo\nimport sys\nsys.path.insert(0, '/home/ProjetCabane/lib/Koyo')\n\n# Fonction de lecture des sorties\ndef ReadOut():\n\n\timport Koyo as plc\n\tmyKoyo = plc.Koyo('192.168.0.110')\n\tresult = myKoyo.ReadOutputs()\n\n\tprint 'Python : Readout : ' + result[::-1]\n\n\treturn int(result[::-1],2);\n\n# Fonction de lecture des entrees\ndef ReadIn():\n\n\timport Koyo as plc\n\tmyKoyo = plc.Koyo('192.168.0.110')\n\tresult = myKoyo.ReadInputs()\n\n\tprint 'Python : ReadIn : ' + result[::-1]\n\n\treturn int(result[::-1],2);\n\n# Fonction d'ecriture des sorties\ndef WriteOutput(a,b):\n\tprint 'Python : WriteOutput : ', a, b\n\timport Koyo as plc\n\tmyKoyo = plc.Koyo('192.168.0.110')\n\tmyKoyo.WriteOutput(a, b)\n #return result;\n", "id": "4939281", "language": "Python", "matching_score": 0.5773502588272095, "max_stars_count": 0, "path": "commKoyo.py" }, { "content": "# Fichier requis pour l'importation du script", "id": "1516983", "language": "Python", "matching_score": 0.1446150690317154, "max_stars_count": 0, "path": "__init__.py" } ]
1.145999
zheyu98
[ { "content": "import argparse\nimport os\nimport sys\nimport time\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom a2c_ppo_acktr.envs import make_vec_envs\nfrom a2c_ppo_acktr.utils import get_render_func, get_vec_normalize\n\nsys.path.append('a2c_ppo_acktr')\n\nparser = argparse.ArgumentParser(description='RL')\nparser.add_argument(\n '--seed', type=int, default=1, help='random seed (default: 1)')\nparser.add_argument(\n '--log-interval',\n type=int,\n default=10,\n help='log interval, one log per n updates (default: 10)')\nparser.add_argument(\n '--env-name',\n default='slackline',\n help='environment to train on (default: PongNoFrameskip-v4)')\nparser.add_argument(\n '--load-dir',\n default='./trained_models/a2c',\n help='directory to save agent logs (default: ./trained_models/)')\nparser.add_argument(\n '--non-det',\n action='store_true',\n default=False,\n help='whether to use a non-deterministic policy')\nargs = parser.parse_args()\n\nargs.det = not args.non_det\n\nenv = make_vec_envs(\n args.env_name,\n args.seed + 1000,\n 1,\n None,\n None,\n device='cpu',\n allow_early_resets=False)\n\n# Get a render function\nrender_func = get_render_func(env)\n\n# We need to use the same statistics for normalization as used in training\nactor_critic, obs_rms = \\\n torch.load(os.path.join(args.load_dir, args.env_name + \".pt\"),\n map_location='cpu')\n\nvec_norm = get_vec_normalize(env)\nif vec_norm is not None:\n vec_norm.eval()\n vec_norm.obs_rms = obs_rms\n\nrecurrent_hidden_states = torch.zeros(1,\n actor_critic.recurrent_hidden_state_size)\nmasks = torch.zeros(1, 1)\n\nobs = env.reset()\n\nif render_func is not None:\n render_func('human')\n\ncount = 1\nob = obs.clone().detach() \nt = 0\nob = np.append(ob,t)[np.newaxis, :]\n\nwhile count:\n with torch.no_grad():\n value, action, _, recurrent_hidden_states = actor_critic.act(\n obs, recurrent_hidden_states, masks, deterministic=args.det)\n\n # Obser reward and next obs\n obs, reward, done, _ = env.step(action)\n\n masks.fill_(0.0 if done else 1.0)\n\n if render_func is not None:\n render_func('human')\n time.sleep(0.03)\n\n t += 0.01\n cc = obs.clone().detach() \n cc = np.append(cc, t)\n cc = cc[np.newaxis, :]\n ob = np.append(ob, cc, axis=0)\n if done:\n count = 0\nprint(np.mean(np.rad2deg(ob[:,2])))\nprint(np.mean(np.rad2deg(ob[:,3])))\nprint(np.std(np.rad2deg(ob[:,2])))\nprint(np.std(np.rad2deg(ob[:,3])))\nplt.plot(ob[:,-1], np.rad2deg(ob[:,2]),'b',label='phib')\nplt.plot(ob[:,-1], np.rad2deg(ob[:,3]),color='orange',label='phit')\nplt.xlim(0,4)\nplt.xlabel('Time in s')\nplt.ylabel('Angles in deg')\nplt.title('phib and phit with small initial deviation')\nplt.legend(loc='lower left')\nplt.grid()\nplt.show()\n\n \n\n", "id": "11917669", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "A2C_Controller/enjoy.py" }, { "content": "'''\nAuthor: <NAME>\nDate: 2021-03-25 21:52:42\nLastEditTime: 2021-05-07 13:51:39\nLastEditors: Please set LastEditors\nDescription: In User Settings Edit\nFilePath: /origin/home/zheyu/Desktop/Deep_Learning/together/IAM-Reproduce/plot_results.py\n'''\nfrom stable_baselines3.common import results_plotter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pickle\n\n# #COMMENT\n# Plot all reward monitors of processes using SB3\n# log_dir = '/tmp/gym'\n# results_plotter.plot_results(\n# [log_dir], 4e6, results_plotter.X_TIMESTEPS, \"Warehouse\")\n# #END COMMENT\n\n#COMMENT \n# Plot manually stored mean rewards\nwith open('./logmean_rewards.txt', 'rb') as f:\n mean_episode_rewards = pickle.load(f)\n\nmean_episode_rewards = np.array(mean_episode_rewards)\n# timesteps = HERE * mean_log_interval * processes * num_step\ntimesteps = np.arange(mean_episode_rewards.shape[0]) * 10 * 16 * 5/ 1e6\n# print(timesteps.shape)\n\n# EWMA\nrho = 0.995 # Rho value for smoothing\n\ns_prev = 0 # Initial value ewma value\n\n# Empty arrays to hold the smoothed data\newma, ewma_bias_corr = np.empty(0), np.empty(0)\n\nfor i,y in enumerate(mean_episode_rewards):\n \n # Variables to store smoothed data point\n s_cur = 0\n s_cur_bc = 0\n\n s_cur = rho * s_prev + (1-rho) * y\n s_cur_bc = s_cur / (1-rho**(i+1))\n \n # Append new smoothed value to array\n ewma = np.append(ewma,s_cur)\n ewma_bias_corr = np.append(ewma_bias_corr,s_cur_bc)\n\n s_prev = s_cur\n\n# plt.scatter(timesteps, mean_episode_rewards, s=3) # Plot the noisy data in gray\n# plt.plot(timesteps, ewma, 'r--', linewidth=3) # Plot the EWMA in red \nplt.plot(timesteps, ewma_bias_corr, 'g--', linewidth=2) # Plot the EWMA with bias correction in green\n# plt.plot(timesteps, data_clean, 'orange', linewidth=3) # Plot the original data in orange\nplt.xlabel('Timesteps[1e6]')\nplt.ylabel('Mean reward')\nplt.title('Slacklining Robot')\n# plt.xlim([0,2])\n# plt.ylim([26,42])\nplt.grid()\nplt.show()\n\n\n\n# plt.plot(timesteps, mean_episode_rewards, color='magenta')\n# plt.xlabel('Timesteps[1e6]')\n# plt.ylabel('Mean reward')\n# plt.title('Warehouse with IAM')\n# plt.xlim([0,4])\n# plt.ylim([26,42])\n# plt.grid()\n# plt.show()\n# #END COMMENT", "id": "10712810", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "A2C_Controller/plot_results_single.py" } ]
0
JoyChou93
[ { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n api\n ~~~\n\n Implements API Server and Interface\n\n :author: Feei <<EMAIL>>\n :homepage: https://github.com/wufeifei/cobra\n :license: MIT, see LICENSE for more details.\n :copyright: Copyright (c) 2017 Feei. All rights reserved\n\"\"\"\nimport errno\nimport json\nimport multiprocessing\nimport os\nimport socket\nimport subprocess\nimport threading\nimport time\nimport traceback\n\nimport requests\nfrom flask import Flask, request, render_template\nfrom flask_restful import Api, Resource\nfrom werkzeug.urls import url_unquote\n\nfrom . import cli\nfrom .cli import get_sid\nfrom .config import Config, running_path, package_path\nfrom .engine import Running\nfrom .log import logger\nfrom .utils import allowed_file, secure_filename, PY2, split_branch\n\ntry:\n # Python 3\n import queue\nexcept ImportError:\n # Python 2\n import Queue as queue\n\nq = queue.Queue()\napp = Flask(__name__, static_folder='templates/asset')\nrunning_host = '0.0.0.0'\nrunning_port = 5000\n\n\ndef producer(task):\n q.put(task)\n\n\ndef consumer():\n while True:\n task = q.get()\n p = multiprocessing.Process(target=cli.start, args=task)\n p.start()\n p.join()\n q.task_done()\n\n\nclass AddJob(Resource):\n @staticmethod\n def post():\n data = request.json\n if not data or data == \"\":\n return {\"code\": 1003, \"msg\": \"Only support json, please post json data.\"}\n\n target = data.get(\"target\")\n formatter = data.get(\"formatter\")\n output = data.get(\"output\")\n rule = data.get(\"rule\")\n\n is_valid_key = key_verify(data=data)\n\n if is_valid_key is not True:\n return is_valid_key\n\n if not target or target == \"\":\n return {\"code\": 1002, \"msg\": \"URL cannot be empty.\"}\n\n if not formatter or formatter == '':\n formatter = 'json'\n if not output or output == '':\n output = ''\n if not rule or rule == '':\n rule = ''\n\n # Report All Id\n a_sid = get_sid(target, True)\n running = Running(a_sid)\n\n # Write a_sid running data\n running.init_list(data=target)\n\n # Write a_sid running status\n data = {\n 'status': 'running',\n 'report': ''\n }\n running.status(data)\n\n if isinstance(target, list):\n for t in target:\n # Scan\n arg = (t, formatter, output, rule, a_sid)\n producer(task=arg)\n\n result = {\n 'msg': 'Add scan job successfully.',\n 'sid': a_sid,\n 'total_target_num': len(target),\n }\n else:\n arg = (target, formatter, output, rule, a_sid)\n producer(task=arg)\n result = {\n 'msg': 'Add scan job successfully.',\n 'sid': a_sid,\n 'total_target_num': 1,\n }\n\n return {\"code\": 1001, \"result\": result}\n\n\nclass JobStatus(Resource):\n @staticmethod\n def post():\n data = request.json\n if not data or data == \"\":\n return {\"code\": 1003, \"msg\": \"Only support json, please post json data.\"}\n\n sid = data.get(\"sid\")\n\n is_valid_key = key_verify(data=data)\n if is_valid_key is not True:\n return is_valid_key\n\n if not sid or sid == \"\":\n return {\"code\": 1002, \"msg\": \"sid is required.\"}\n\n sid = str(data.get(\"sid\")) # 需要拼接入路径,转为字符串\n running = Running(sid)\n if running.is_file() is not True:\n data = {\n 'code': 1004,\n 'msg': 'scan id does not exist!',\n 'sid': sid,\n 'status': 'no such scan',\n 'report': ''\n }\n return data\n else:\n result = running.status()\n r_data = running.list()\n if result['status'] == 'running':\n ret = True\n result['still_running'] = dict()\n for s_sid, git in r_data['sids'].items():\n if Running(s_sid).is_file(True) is False:\n result['still_running'].update({s_sid: git})\n ret = False\n if ret:\n result['status'] = 'done'\n running.status(result)\n data = {\n 'msg': 'success',\n 'sid': sid,\n 'status': result.get('status'),\n 'report': request.url_root + result.get('report'),\n 'still_running': result.get('still_running'),\n 'total_target_num': r_data.get('total_target_num'),\n 'not_finished': int(r_data.get('total_target_num')) - len(r_data.get('sids'))\n + len(result.get('still_running')),\n }\n return {\"code\": 1001, \"result\": data}\n\n\nclass FileUpload(Resource):\n @staticmethod\n def post():\n \"\"\"\n Scan by uploading compressed files\n :return:\n \"\"\"\n if 'file' not in request.files:\n return {'code': 1002, 'result': \"File can't empty!\"}\n file_instance = request.files['file']\n if file_instance.filename == '':\n return {'code': 1002, 'result': \"File name can't empty!\"}\n if file_instance and allowed_file(file_instance.filename):\n filename = secure_filename(file_instance.filename)\n dst_directory = os.path.join(package_path, filename)\n file_instance.save(dst_directory)\n # Start scan\n a_sid = get_sid(dst_directory, True)\n data = {\n 'status': 'running',\n 'report': ''\n }\n Running(a_sid).status(data)\n try:\n cli.start(dst_directory, None, 'stream', None, a_sid=a_sid)\n except Exception as e:\n traceback.print_exc()\n code, result = 1001, {'sid': a_sid}\n return {'code': code, 'result': result}\n else:\n return {'code': 1002, 'result': \"This extension can't support!\"}\n\n\nclass ResultData(Resource):\n @staticmethod\n def post():\n \"\"\"\n pull scan result data.\n :return:\n \"\"\"\n data = request.json\n if not data or data == \"\":\n return {\"code\": 1003, \"msg\": \"Only support json, please post json data.\"}\n\n s_sid = data.get('sid')\n if not s_sid or s_sid == \"\":\n return {\"code\": 1002, \"msg\": \"sid is required.\"}\n\n s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))\n if not os.path.exists(s_sid_file):\n return {'code': 1002, 'msg': 'No such target.'}\n\n with open(s_sid_file, 'r') as f:\n scan_data = json.load(f)\n if scan_data.get('code') == 1001:\n scan_data = scan_data.get('result')\n else:\n return {\n 'code': scan_data.get('code'),\n 'msg': scan_data.get('msg'),\n }\n\n rule_filter = dict()\n for vul in scan_data.get('vulnerabilities'):\n rule_filter[vul.get('id')] = vul.get('rule_name')\n\n return {\n 'code': 1001,\n 'result': {\n 'scan_data': scan_data,\n 'rule_filter': rule_filter,\n }\n }\n\n\nclass ResultDetail(Resource):\n @staticmethod\n def post():\n \"\"\"\n get vulnerable file content\n :return:\n \"\"\"\n data = request.json\n if not data or data == \"\":\n return {'code': 1003, 'msg': 'Only support json, please post json data.'}\n\n sid = data.get('sid')\n file_path = url_unquote(data.get('file_path'))\n\n if not sid or sid == '':\n return {\"code\": 1002, \"msg\": \"sid is required.\"}\n\n if not file_path or file_path == '':\n return {'code': 1002, 'msg': 'file_path is required.'}\n\n s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=sid))\n if not os.path.exists(s_sid_file):\n return {'code': 1002, 'msg': 'No such target.'}\n\n with open(s_sid_file, 'r') as f:\n target_directory = json.load(f).get('result').get('target_directory')\n\n if not target_directory or target_directory == '':\n return {'code': 1002, 'msg': 'No such directory'}\n\n if PY2:\n file_path = map(secure_filename, [path.decode('utf-8') for path in file_path.split('/')])\n else:\n file_path = map(secure_filename, [path for path in file_path.split('/')])\n\n filename = target_directory\n for _dir in file_path:\n filename = os.path.join(filename, _dir)\n if os.path.exists(filename):\n extension = guess_type(filename)\n if is_text(filename):\n with open(filename, 'r') as f:\n file_content = f.read()\n else:\n file_content = 'This is a binary file.'\n else:\n return {'code': 1002, 'msg': 'No such file.'}\n\n return {'code': 1001, 'result': {'file_content': file_content, 'extension': extension}}\n\n\nclass Search(Resource):\n @staticmethod\n def post():\n \"\"\"\n Search specific rule.\n :return:\n \"\"\"\n data = request.json\n if not data or data == \"\":\n return {'code': 1003, 'msg': 'Only support json, please post json data.'}\n\n sid = data.get('sid')\n if not sid or sid == '':\n return {'code': 1002, 'msg': 'sid is required.'}\n\n rule_id = data.get('rule_id')\n if not rule_id or rule_id == '':\n return {'code': 1002, 'msg': 'rule_id is required.'}\n\n scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=sid))\n if not os.path.exists(scan_list_file):\n return {'code': 1002, 'msg': 'No such sid.'}\n\n with open(scan_list_file, 'r') as f:\n scan_list = json.load(f)\n\n if not isinstance(rule_id, list):\n rule_id = [rule_id]\n\n search_data = list()\n for s_sid in scan_list.get('sids').keys():\n target, branch = split_branch(scan_list.get('sids').get(s_sid))\n search_result = search_rule(s_sid, rule_id)\n cvi_count = list(search_result.values())\n if int(cvi_count[0]) > 0:\n search_data.append({\n 'target_info': {\n 'sid': s_sid,\n 'target': target,\n 'branch': branch,\n },\n 'search_result': search_result,\n })\n\n return {\n 'code': 1001,\n 'result': search_data,\n }\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef summary():\n a_sid = request.args.get(key='sid')\n key = Config(level1=\"cobra\", level2=\"secret_key\").value\n if a_sid is None:\n return render_template(template_name_or_list='index.html',\n key=key)\n\n status_url = 'http://{host}:{port}/api/status'.format(host=running_host, port=running_port)\n post_data = {\n 'key': key,\n 'sid': a_sid,\n }\n headers = {\n \"Content-Type\": \"application/json\",\n }\n r = requests.post(url=status_url, headers=headers, data=json.dumps(post_data))\n try:\n scan_status = json.loads(r.text)\n except ValueError as e:\n return render_template(template_name_or_list='error.html',\n msg='Check scan status failed: {0}'.format(e))\n\n if scan_status.get('code') != 1001:\n return render_template(template_name_or_list='error.html',\n msg=scan_status.get('msg'))\n else:\n if scan_status.get('result').get('status') == 'running':\n still_running = scan_status.get('result').get('still_running')\n for s_sid, target_str in still_running.items():\n target, branch = split_branch(target_str)\n still_running[s_sid] = {'target': target,\n 'branch': branch}\n else:\n still_running = dict()\n\n scan_status_file = os.path.join(running_path, '{sid}_status'.format(sid=a_sid))\n\n scan_list = Running(a_sid).list()\n\n start_time = os.path.getctime(filename=scan_status_file)\n start_time = time.localtime(start_time)\n start_time = time.strftime('%Y-%m-%d %H:%M:%S', start_time)\n\n total_targets_number = scan_status.get('result').get('total_target_num')\n not_finished_number = scan_status.get('result').get('not_finished')\n\n total_vul_number, critical_vul_number, high_vul_number, medium_vul_number, low_vul_number = 0, 0, 0, 0, 0\n rule_num = dict()\n rules = dict()\n targets = list()\n\n for s_sid, target_str in scan_list.get('sids').items():\n if s_sid not in still_running:\n target_info = dict()\n\n # 分割项目地址与分支,默认 master\n target, branch = split_branch(target_str)\n\n target_info.update({\n 'sid': s_sid,\n 'target': target,\n 'branch': branch,\n })\n s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))\n with open(s_sid_file, 'r') as f:\n s_sid_data = json.load(f)\n if s_sid_data.get('code') != 1001:\n continue\n else:\n s_sid_data = s_sid_data.get('result')\n total_vul_number += len(s_sid_data.get('vulnerabilities'))\n\n target_info.update({'total_vul_number': len(s_sid_data.get('vulnerabilities'))})\n target_info.update(s_sid_data)\n\n targets.append(target_info)\n\n for vul in s_sid_data.get('vulnerabilities'):\n if 9 <= int(vul.get('level')) <= 10:\n critical_vul_number += 1\n elif 6 <= int(vul.get('level')) <= 8:\n high_vul_number += 1\n elif 3 <= int(vul.get('level')) <= 5:\n medium_vul_number += 1\n elif 1 <= int(vul.get('level')) <= 2:\n low_vul_number += 1\n\n try:\n rule_num[vul.get('rule_name')] += 1\n except KeyError:\n rule_num[vul.get('rule_name')] = 1\n\n rules[vul.get('id')] = vul.get('rule_name')\n\n return render_template(template_name_or_list='summary.html',\n total_targets_number=total_targets_number,\n not_finished_number=not_finished_number,\n start_time=start_time,\n targets=targets,\n a_sid=a_sid,\n total_vul_number=total_vul_number,\n critical_vul_number=critical_vul_number,\n high_vul_number=high_vul_number,\n medium_vul_number=medium_vul_number,\n low_vul_number=low_vul_number,\n rule_num=rule_num,\n rules=rules,\n running=still_running,)\n\n\ndef key_verify(data):\n key = Config(level1=\"cobra\", level2=\"secret_key\").value\n _key = data.get(\"key\")\n\n if _key == key:\n return True\n elif not _key or _key == \"\":\n return {\"code\": 1002, \"msg\": \"Key cannot be empty.\"}\n elif not _key == key:\n return {\"code\": 4002, \"msg\": \"Key verify failed.\"}\n else:\n return {\"code\": 4002, \"msg\": \"Unknown key verify error.\"}\n\n\ndef is_text(fn):\n msg = subprocess.Popen(['file', fn], stdout=subprocess.PIPE).communicate()[0]\n return 'text' in msg.decode('utf-8')\n\n\ndef guess_type(fn):\n import mimetypes\n extension = mimetypes.guess_type(fn)[0]\n if extension:\n \"\"\"text/x-python or text/x-java-source\"\"\"\n # extension = extension.split('/')[1]\n extension = extension.replace('-source', '')\n else:\n extension = fn.split('/')[-1].split('.')[-1]\n\n custom_ext = {\n 'html': 'htmlmixed',\n 'md': 'markdown',\n }\n if custom_ext.get(extension) is not None:\n extension = custom_ext.get(extension)\n\n return extension.lower()\n\n\ndef search_rule(sid, rule_id):\n \"\"\"\n Search specific rule name in scan data.\n :param sid: scan data id\n :param rule_id: a list of rule name\n :return: {rule_name1: num1, rule_name2: num2}\n \"\"\"\n scan_data_file = os.path.join(running_path, '{sid}_data'.format(sid=sid))\n search_result = dict.fromkeys(rule_id, 0)\n if not os.path.exists(scan_data_file):\n return search_result\n\n with open(scan_data_file, 'r') as f:\n scan_data = json.load(f)\n\n if scan_data.get('code') == 1001 and len(scan_data.get('result').get('vulnerabilities')) > 0:\n for vul in scan_data.get('result').get('vulnerabilities'):\n if vul.get('id') in rule_id:\n search_result[vul.get('id')] += 1\n return search_result\n else:\n return search_result\n\n\ndef start(host, port, debug):\n logger.info('Start {host}:{port}'.format(host=host, port=port))\n api = Api(app)\n\n api.add_resource(AddJob, '/api/add')\n api.add_resource(JobStatus, '/api/status')\n api.add_resource(FileUpload, '/api/upload')\n api.add_resource(ResultData, '/api/list')\n api.add_resource(ResultDetail, '/api/detail')\n api.add_resource(Search, '/api/search')\n\n # consumer\n threads = []\n for i in range(5):\n threads.append(threading.Thread(target=consumer, args=()))\n\n for i in threads:\n i.setDaemon(daemonic=True)\n i.start()\n\n try:\n global running_port, running_host\n running_host = host if host != '0.0.0.0' else '127.0.0.1'\n running_port = port\n app.run(debug=debug, host=host, port=int(port), threaded=True, processes=1)\n except socket.error as v:\n if v.errno == errno.EACCES:\n logger.critical('[{err}] must root permission for start API Server!'.format(err=v.strerror))\n exit()\n else:\n logger.critical('{msg}'.format(msg=v.strerror))\n\n logger.info('API Server start success')\n", "id": "2829708", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "cobra/api.py" } ]
0
tengerye
[ { "content": "from models import models\n\n# utility params\nfig_mode = None\nembed_plot_epoch=10\n\n# model params\nuse_gpu = True\ndataset_mean = (0.5, 0.5, 0.5)\ndataset_std = (0.5, 0.5, 0.5)\n\nbatch_size = 512\nepochs = 1000\ngamma = 10\ntheta = 1\n\n# path params\ndata_root = './data'\n\nmnist_path = data_root + '/MNIST'\nmnistm_path = data_root + '/MNIST_M'\nsvhn_path = data_root + '/SVHN'\nsyndig_path = data_root + '/SynthDigits'\n\nsave_dir = './experiment'\n\n\n# specific dataset params\nextractor_dict = {'MNIST_MNIST_M': models.Extractor(),\n 'SVHN_MNIST': models.SVHN_Extractor(),\n 'SynDig_SVHN': models.SVHN_Extractor()}\n\nclass_dict = {'MNIST_MNIST_M': models.Class_classifier(),\n 'SVHN_MNIST': models.SVHN_Class_classifier(),\n 'SynDig_SVHN': models.SVHN_Class_classifier()}\n\ndomain_dict = {'MNIST_MNIST_M': models.Domain_classifier(),\n 'SVHN_MNIST': models.SVHN_Domain_classifier(),\n 'SynDig_SVHN': models.SVHN_Domain_classifier()}\n", "id": "12820388", "language": "Python", "matching_score": 0, "max_stars_count": 167, "path": "train/params.py" } ]
0
Las-Desire
[ { "content": "# -*- coding:utf-8 -*-\nimport unittest\nimport re\n\nclass JsonValidateCase(unittest.TestCase):\n\n def setUp(self):\n json_data_file_path = './data/events.json'\n json_schema_file_path = './test/json-schema.json'\n\n import json\n self.json_data = json.loads(self.get_json_from_file(json_data_file_path))\n self.json_schema = json.loads(self.get_json_from_file(json_schema_file_path))\n\n def get_json_from_file(self, path):\n with open(path, 'r') as f:\n return self.clean_json(f.read())\n\n\n def clean_json(self, string):\n string = re.sub(\",[ \\t\\r\\n]+}\", \"}\", string)\n string = re.sub(\",[ \\t\\r\\n]+\\]\", \"]\", string)\n return string\n\n\n def test_data_json(self):\n from jsonschema import validate, ValidationError\n try:\n validate(self.json_data, self.json_schema)\n except Exception as e:\n self.fail(str(e))\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "8285110", "language": "Python", "matching_score": 0, "max_stars_count": 88, "path": "test/test.py" }, { "content": "# -*- coding:utf-8 -*-\nimport json\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom collections import OrderedDict\n\n\ndef sorting_dict_by_key(src_dict, reverse=False):\n return _sorting_dict(src_dict=src_dict, sorting_index=0, reverse=reverse)\n\n\ndef sorting_dict_by_value(src_dict, reverse=False):\n return _sorting_dict(src_dict=src_dict, sorting_index=1, reverse=reverse)\n\n\ndef _sorting_dict(src_dict, sorting_index=0, reverse=False):\n return dict(OrderedDict(sorted(src_dict.items(), key=lambda t: t[sorting_index], reverse=reverse)))\n\n\njson_data_file_path = './data/events.json'\n\nevents = None\n\nwith open(json_data_file_path, 'r') as f:\n events = f.read()\n\nif events:\n events_json = json.loads(events, encoding=\"utf-8\")\n event_list = events_json['events']\n\n tag_result = defaultdict(lambda: 0)\n date_result = defaultdict(lambda: 0)\n month_result = defaultdict(lambda: 0)\n week_day_result = defaultdict(lambda: 0)\n st_hour_result = defaultdict(lambda: 0)\n ed_hour_result = defaultdict(lambda: 0)\n url_list = []\n\n for e in event_list:\n st = datetime.strptime(e['start'], \"%Y-%m-%d %H:%M:%S\")\n ed = datetime.strptime(e['end'], \"%Y-%m-%d %H:%M:%S\")\n\n # tag\n tag_list = e['tags'].split(',')\n for tag_key in tag_list:\n tag_key = tag_key.strip().lower()\n tag_result[tag_key] += 1\n\n # keys : date & weekday & month & st & ed\n week_day_key = st.weekday()\n date_key = st.strftime('%Y-%m-%d')\n month_key = st.strftime('%m')\n st_hour_key = st.strftime('%H')\n ed_hour_key = ed.strftime('%H')\n\n # date\n date_result[date_key] += 1\n\n # month\n month_result[month_key] += 1\n\n # week_day_result\n week_day_result[week_day_key] += 1\n\n # st_hour_result\n st_hour_result[st_hour_key] += 1\n\n # ed_hour_result\n ed_hour_result[ed_hour_key] += 1\n\n if e.get('url', None):\n url_list.append(e['url'])\n\n # sorting\n tag_result = sorting_dict_by_value(src_dict=tag_result, reverse=True)\n date_result = sorting_dict_by_key(src_dict=date_result, reverse=True)\n month_result = sorting_dict_by_key(src_dict=month_result, reverse=False)\n week_day_result = sorting_dict_by_key(src_dict=week_day_result, reverse=False)\n st_hour_result = sorting_dict_by_key(src_dict=st_hour_result, reverse=False)\n ed_hour_result = sorting_dict_by_key(src_dict=ed_hour_result, reverse=False)\n hour_keys = [\"%02d\" % i for i in range(0, 24)]\n\n for hour_key in hour_keys:\n if hour_key not in st_hour_result:\n st_hour_result[hour_key] = 0\n if hour_key not in ed_hour_result:\n ed_hour_result[hour_key] = 0\n\n statistic = {\n 'tag': tag_result,\n 'date': date_result,\n 'month': month_result,\n 'weekday': week_day_result,\n 'st_hour': st_hour_result,\n 'ed_hour': ed_hour_result\n }\n\n sorted_tags = sorted(tag_result.keys())\n tags = {\n 'tags': sorted_tags\n }\n sorted_url = sorted(url_list)\n urls = {\n 'url': sorted_url\n }\n\n write_file_path = './data/statistic.json'\n with open(write_file_path, 'w') as f:\n f.write(json.dumps(statistic, ensure_ascii=False, indent=4))\n\n write_file_path = './data/tags.json'\n with open(write_file_path, 'w') as f:\n f.write(json.dumps(tags, ensure_ascii=False, indent=4))\n\n write_file_path = './data/url.json'\n with open(write_file_path, 'w') as f:\n f.write(json.dumps(urls, ensure_ascii=False, indent=4))\n", "id": "1877935", "language": "Python", "matching_score": 3.3705339431762695, "max_stars_count": 88, "path": "script/event_statistic.py" }, { "content": "# -*- coding:utf-8 -*-\nimport json\nfrom datetime import datetime\njson_data_file_path = './data/events.json'\n\nevents = None\n\nwith open(json_data_file_path, 'r') as f:\n events = f.read()\n\nif events:\n events_json = json.loads(events, encoding=\"utf-8\")\n event_list = events_json['events']\n\n classified_events = dict()\n\n for e in event_list:\n st = datetime.strptime(e['start'], \"%Y-%m-%d %H:%M:%S\")\n key = st.strftime('%Y-%m')\n if key not in classified_events:\n classified_events[key] = dict()\n classified_events[key]['events'] = list()\n classified_events[key]['events'].append(e)\n\n for k, v in classified_events.items():\n write_file_path = './data/events-' + k + '.json'\n with open(write_file_path, 'w') as f:\n f.write(json.dumps(v, ensure_ascii=False, indent=4))\n", "id": "6931293", "language": "Python", "matching_score": 2.419788360595703, "max_stars_count": 88, "path": "script/event_seperator.py" } ]
2.419788
RossWilliamson
[ { "content": "import threading\nfrom time import sleep\nimport RPi.GPIO as GPIO\nimport logging\n\nlogging.basicConfig()\n\nclass hvacControl(threading.Thread):\n def __init__(self, temperature,\n update_rate = 10,\n hysterisis = 0.5):\n threading.Thread.__init__(self)\n self.logger = logging.getLogger('HVACControl')\n self.logger.setLevel(logging.DEBUG)\n \n self.temps = temperature\n self.update_rate = update_rate\n self.hysterisis = hysterisis\n #Set stupid setpoints \n self.heater_setpoint = -1e9\n self.cooling_setpoint = 1e9\n\n self.heat_max = 27\n self.cool_min = 10\n\n self.ATTIC_FAN = 22\n self.VENT_FAN = 18\n self.AC_UNIT = 23\n self.HEATER_UNIT = 24 \n\n self.stop_event = threading.Event()\n self.mutex = threading.Lock()\n \n self.setup_pins()\n self.start_safety_timer()\n\n def __del__(self):\n self.logger.info(\"Deleting Myself\")\n self.stop_event.set()\n \n def setup_pins(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(self.ATTIC_FAN, GPIO.OUT)\n GPIO.setup(self.VENT_FAN, GPIO.OUT)\n GPIO.setup(self.AC_UNIT, GPIO.OUT)\n GPIO.setup(self.HEATER_UNIT, GPIO.OUT) \n\n #And make sure they are all false\n GPIO.output(self.ATTIC_FAN, True) #wired up wrong\n GPIO.output(self.VENT_FAN, False)\n GPIO.output(self.AC_UNIT, False)\n GPIO.output(self.HEATER_UNIT, False)\n \n def start_safety_timer(self):\n self.logger.debug(\"Starting safety timer\")\n self.s_thread = threading.Thread(target=self.safety_timer)\n self.s_thread.deamon = True\n self.s_thread.start()\n \n def safety_timer(self):\n # Hard coded timer for compressor safety\n # No varibles here for time do not change\n self.ok_to_start = False\n for i in xrange(5):\n t_time = 5-i\n str = \"Time remaining to start %i\" % t_time\n self.logger.debug(str)\n sleep(60) \n self.ok_to_start = True\n self.logger.debug(\"OK TO START\")\n\n def start_fan_timer(self):\n self.logger.debug(\"Starting Fan timer\")\n self.f_thread = threading.Thread(target=self.fan_timer)\n self.f_thread.deamon = True\n self.f_thread.start()\n\n def fan_timer(self):\n #Time to wait before turning fan off\n #Hard coded to 90 seconds but not a safety isse\n sleep(60):\n GPIO.output(self.VENT_FAN, False)\n\n def set_heater_setpoint(self, temp):\n if temp > self.heat_max:\n self.logger.warn(\"Setting to max temp\")\n self.heater_setpoint = temp\n self.heater_on = self.heater_setpoint\n self.heater_off = self.heater_setpoint + self.hysterisis\n\n def set_cooling_setpoint(self, temp):\n if temp < self.cool_min:\n self.logger.warn(\"Setting to min temp\")\n self.cooling_setpoint = temp\n self.cooling_on = self.cooling_setpoint\n self.cooling_off = self.cooling_setpoing - self.hysterisis\n\n def set_state(self, state):\n if state == \"Cool\":\n self.state = state\n elif state == \"Heat\":\n self.state = state\n elif state == \"Fan\":\n self.state = state\n else:\n state = \"Off\"\n\n def AC_Switch(self, state):\n if state is True:\n GPIO.output(self.VENT_FAN, True)\n GPIO.output(self.AC_UNIT, True)\n else:\n GPIO.output(self.AC_UNIT, False)\n self.start_fan_timer()\n self.start_safety_timer()\n\n def heater_Switch(self, state):\n if state is True:\n GPIO.output(self.VENT_FAN, True)\n GPIO.output(self.HEATER_UNIT, True)\n else:\n GPIO.output(self.HEATER_UNIT, False)\n self.start_fan_timer()\n self.start_safety_timer()\n\n def vent_fan_Switch(self, state):\n if state is True:\n GPIO.output(self.VENT_FAN, True):\n else:\n GPIO.output(self.VENT_FAN, False):\n\n\n def state_machine(self):\n room_temp = self.temps.get_room_t()\n if self.state == \"Cool\":\n if room_temp > self.cooling_on:\n self.AC_Switch(True):\n elif room_temp < self.cooling_off:\n self.AC_Switch(False):\n\n elif self.state == \"Heat\":\n if room_temp < self.heating_on:\n self.heater_Switch(True):\n elif room_temp > self.heating_off:\n self.heater_Switch(False):\n\n elif self.state == \"Fan\":\n self.vent_fan_Switch(True)\n\n else:\n self.AC_Switch(False):\n self.heater_Switch(False):\n\n def run(self):\n self.stop_event.clear()\n while not self.stop_event.isSet():\n #Only run if we are not in a safety state\n if self.ok_to_start is True:\n self.state_machine()\n sleep(self.read_rate)\n\n self.mutex.release()\n", "id": "7859306", "language": "Python", "matching_score": 2.886942148208618, "max_stars_count": 0, "path": "python/hvacControl.py" }, { "content": "import RPi.GPIO as GPIO\nimport ephem\nimport logging\nimport datetime as dt\n\nlogging.basicConfig()\n\n#Simple class for sun data\nclass sunData():\n def __init__(self,\n lat=34.213869,\n lon =-118.161517,\n elev = 480,\n twilight = \"sensible\",\n horizon = None):\n\n self.logger = logging.getLogger(\"SunData\")\n self.logger.setLevel(logging.DEBUG)\n\n self.twilight_def = {\"sensible\" : -3,\n \"civil\" : -6,\n \"nautical\" : -12,\n \"astronomical\" : -18}\n\n self.meadows = ephem.Observer()\n self.meadows.lat = str(lat)\n self.meadows.lon = str(lon)\n self.meadows.elev = elev\n\n # No custom horizon so use standard definitaion\n if horizon is None:\n self.twilight = twilight\n self.meadows.horizon = str(self.twilight_def[twilight])\n else:\n self.twilight = \"custom\"\n self.meadows.horizon = str(horizon)\n\n def get_sunrise(self):\n self.meadows.date = ephem.now()\n if self.sunup() is True:\n sunrise = self.meadows.previous_rising(ephem.Sun(), use_center=True)\n else:\n sunrise = self.meadoews.next_rising(ephem.Sun(), use_center=True)\n return ephem.localtime(sunrise)\n\n def get_sunset(self):\n self.meadows.date = ephem.now()\n if self.sunup() is True:\n sunset = self.meadows.next_setting(ephem.Sun(), use_center=True)\n else:\n sunset = self.meadows.previous_setting(ephem.Sun(), use_center=True)\n return ephem.localtime(sunset)\n\n def sunup(self):\n # This calculates if the sun as above virtual horizon\n # useful as we need to know if it's day or night\n self.meadows.date = ephem.now()\n sun = ephem.Sun(self.meadows)\n sun.compute(self.meadows)\n sunup = (sun.alt > self.meadows.horizon)\n return sunup\n\n\nclass lightingZone():\n def __init__(self, name, pin):\n self.logger = logging.getLogger('LightingZone')\n self.logger.setLevel(logging.DEBUG)\n\n self.sundata = sunData()\n\n self.name = name\n self.pin = pin\n GPIO.setup(self.pin, GPIO.OUT)\n\n # Inputs are inverted as sinks current\n self.OFF = True\n self.ON = False\n\n self.modes = {\"Auto\" : 0,\n \"Timed\" : 1,\n \"Manual\" : 3,\n \"On\" : 4,\n \"Off\" : 5}\n\n self.start_mode = self.modes[\"Auto\"] #sunset\n self.stop_mode = self.modes[\"Manual\"] # 11pm\n self.start_time = None\n self.stop_time = None\n self.start_duration = None\n self.stop_duration = dt.timedelta(hours=2)\n\n self.manual_on = dt.time(19,30,00)\n self.manual_off = dt.time(23,00,00)\n\n self.lights_on = False\n self.update_times()\n\n def set_timer(self,\n start = None,\n stop = None):\n # Let's try a single function here\n # It either are None or gibberish then ignore\n # Do modes\n\n if start == \"Auto\":\n self.start_mode = self.modes[\"Auto\"]\n elif start == \"On\":\n self.start_mode = self.modes[\"On\"]\n elif start == \"Off\":\n self.start_mode = self.modes[\"Off\"]\n elif isinstance(start, dt.time):\n self.start_mode = self.modes[\"Manual\"]\n self.manual_on = start\n elif isinstance(start, float):\n self.start_mode = self.modes[\"Timed\"]\n self.start_duration = dt.timedelta(hours=start)\n\n if stop == \"Auto\":\n self.stop_mode = self.modes[\"Auto\"]\n elif isinstance(stop, dt.time):\n self.stop_mode = self.modes[\"Manual\"]\n self.manual_off = stop\n elif isinstance(stop, float):\n self.stop_mode = self.modes[\"Timed\"]\n self.stop_duration = dt.timedelta(hours=stop)\n\n self.update_times()\n\n def update_times(self):\n # This actually sets the times for running the lights\n # Should be called periodically\n # note the on and off cases do not use any times\n\n if self.start_mode is self.modes[\"Auto\"]:\n self.start_time = self.sundata.get_sunset()\n elif self.start_mode is self.modes[\"Manual\"]:\n self.start_time = dt.datetime.combine(dt.date.today(),\n self.manual_on)\n elif self.start_mode is self.modes[\"Timed\"]:\n self.start_time = dt.datetime.now() + self.start_duration\n\n if self.stop_mode is self.modes[\"Auto\"]:\n self.stop_time = self.sundata.get_sunrise()\n elif self.stop_mode is self.modes[\"Manual\"]:\n self.stop_time = dt.datetime.combine(dt.date.today(),\n self.manual_off)\n elif self.stop_mode is self.modes[\"Timed\"]:\n self.stop_time = self.start_time + self.stop_duration\n\n # We need to do the sanity check and make sure the stop_time\n # is after the start_time - should only be an issue when\n # using manual mode\n\n t_delta = self.stop_time - self.start_time\n if t_delta.total_seconds() < 0:\n self.stop_time = self.stop_time + dt.timedelta(days=1)\n\n self.set_lights()\n\n def set_lights(self):\n current_time = dt.datetime.now()\n print(current_time)\n print(self.start_time)\n print(self.stop_time)\n if self.start_mode is self.modes[\"On\"]:\n GPIO.output(self.pin, self.ON)\n self.lights_on = True\n elif self.start_mode is self.modes[\"Off\"]:\n GPIO.output(self.pin, self.OFF)\n self.lights_on = False\n\n elif (current_time > self.start_time) and (current_time < self.stop_time):\n GPIO.output(self.pin,self.ON)\n print(\"TURNING ON\")\n else:\n GPIO.output(self.pin,self.OFF)\n print(\"TURNING OFF\")\n\n\nclass lightingControl():\n def __init__(self,zones=None):\n self.logger = logging.getLogger('LightingControl')\n self.logger.setLevel(logging.DEBUG)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n\n # We could load infor from file etc but just setup\n # A default zone and pin list here that can be added\n # Upon at a later date if required\n\n if zones is None:\n # Dictionary is NAME: pin\n zones = {\"PATH\" : 2,\n \"FRONT\" : 3,\n \"SPARE\" : 4,\n \"ALLEY\" : 7}\n\n self.setup_zones(zones)\n self.local_settings()\n self.set_lights()\n\n def setup_zones(self, zones):\n self.zones = {}\n for name, pin in zones.items():\n self.zones[name] = lightingZone(name, pin)\n\n def set_lights(self):\n for name,zone in self.zones.items():\n self.logger.debug(name)\n zone.update_times()\n\n def local_settings(self):\n # This is to setup the local profile\n # It should really be a config file but seen\n # as it's just me I'll put them in here\n # Path comes on at civil twilight, turns off at 11pm\n # All other zones are off\n\n self.zones[\"PATH\"].set_timer(\"Auto\", dt.time(23,00,00))\n #self.zones[\"PATH\"].set_timer(\"Auto\", \"Auto\")\n self.zones[\"FRONT\"].set_timer(\"Off\")\n self.zones[\"SPARE\"].set_timer(\"Off\")\n self.zones[\"ALLEY\"].set_timer(\"Off\")\n", "id": "7608452", "language": "Python", "matching_score": 2.227903366088867, "max_stars_count": 0, "path": "home/lightingControl.py" }, { "content": "#!/usr/bin/env python\nfrom home import lightingControl as lc\nimport logging\n\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.internet.protocol import ServerFactory\nfrom twisted.internet import task\nfrom twisted.internet import reactor\n\nlogging.basicConfig()\n\nclass lightingProtocol(LineReceiver):\n def __init__(self):\n self.logger = logging.getLogger('LightingProtocol')\n self.logger.setLevel(logging.DEBUG)\n\n def lineReceived(self, line):\n sline = line.split()\n if sline[0] == \"read\":\n self.logger.debug(\"Sending Data\")\n\nclass lightingServer(ServerFactory):\n protocol = lightingProtocol\n\n def __init__(self):\n self.p = \"poop\"\n\n\nif __name__ == \"__main__\":\n lights = lc.lightingControl()\n light_loop = task.LoopingCall(lights.set_lights)\n light_loop.start(30)\n\n reactor.listenTCP(50000, lightingServer())\n reactor.run()\n", "id": "1277983", "language": "Python", "matching_score": 0.9023962616920471, "max_stars_count": 0, "path": "bin/lightingServer.py" }, { "content": "import urllib2\nimport json\n\n\ndef collect_pws(pws=\"KCAALTAD10\", \n key=\"c0464283a5ab0417\"):\n\n \"\"\"Collects weather data from personel station and\n returns info that is useful - default \n\n Parameters\n ----------\n pws : string\n This is the personell weather station ID\n key : string\n This is my weather underground API key\n\n Returns\n -------\n w_data : dictionary\n Dictionary of extracted useful data\n\n Notes\n -----\n\n The Free API key only allows a connection 10 times per \n minute and 500 calls a day. Seen as it will be on all\n the time don't call more than once every 5 minutes\n\n \"\"\"\n url_str = \"http://api.wunderground.com/api/%s/conditions/q/pws:%s.json\" % (key, pws)\n\n f = urllib2.urlopen(url_str)\n json_string = f.read()\n parsed_json = json.loads(json_string)\n f.close()\n\n parsed_json = parsed_json['current_observation']\n w_data = {}\n w_data[\"temperature\"] = parsed_json['temp_c']\n w_data[\"humidity\"] = float(parsed_json['relative_humidity'][:-1])\n w_data[\"pressure\"] = float(parsed_json['pressure_mb'])\n w_data[\"wind_dir\"] = parsed_json['wind_degrees']\n w_data[\"wind_mph\"] = parsed_json['wind_mph']\n w_data[\"precip_hour\"] = float(parsed_json['precip_1hr_metric'])\n w_data[\"precip_day\"] = float(parsed_json['precip_today_metric'])\n\n return w_data\n\n", "id": "6680834", "language": "Python", "matching_score": 1.369567632675171, "max_stars_count": 0, "path": "python/wunderground.py" }, { "content": "import MySQLdb\nimport threading\nfrom datetime import datetime\nfrom time import sleep\nfrom wunderground import collect_pws\nimport logging\n\nlogging.basicConfig()\n\nclass get_temps(threading.Thread):\n def __init__(self, read_rate = 10):\n threading.Thread.__init__(self)\n self.logger = logging.getLogger('GetTemps')\n self.logger.setLevel(logging.DEBUG)\n \n self.read_rate = read_rate\n self.attic_t_dev = \"28-000004a82b4a\"\n self.room_t_dev = \"28-000004a82a59\"\n self.attic_t_sum = 0\n self.attic_t_count = 0\n self.room_t_sum = 0\n self.room_t_count = 0\n self.attic_t = 0\n self.room_t = 0\n\n self.stop_event = threading.Event()\n self.mutex = threading.Lock()\n\n self.init_temps()\n\n def __del__(self):\n self.logger.info(\"Deleting Myself\")\n self.stop_event.set()\n \n def run(self):\n self.stop_event.clear()\n while not self.stop_event.isSet():\n self.collect_temps()\n sleep(self.read_rate)\n\n def read_temp(self, id, celcius=True):\n tmp_str = \"/sys/bus/w1/devices/\" + id + \"/w1_slave\"\n tfile = open(tmp_str)\n tmp_txt = tfile.read()\n tfile.close()\n\n lines = tmp_txt.split(\"\\n\")\n if lines[0].find(\"YES\") > 0:\n tt = float((lines[1].split(\" \")[9])[2:])\n tt /= 1000\n if celcius is False:\n return ctof(tt)\n else:\n return tt\n else:\n return False\n\n def collect_temps(self):\n self.mutex.acquire()\n attic_t = self.read_temp(self.attic_t_dev)\n room_t = self.read_temp(self.room_t_dev)\n\n if attic_t is not False:\n self.attic_t_sum += attic_t\n self.attic_t_count += 1\n if room_t is not False:\n self.room_t_sum += room_t\n self.room_t_count += 1\n \n self.mutex.release()\n\n def set_data(self):\n #This sets the room_t and attic_t to latest average\n #which can be read from the member variables\n self.mutex.acquire()\n if self.attic_t_count != 0 :\n self.attic_t = self.attic_t_sum*1.0/self.attic_t_count\n if self.room_t_count != 0:\n self.room_t = self.room_t_sum*1.0/self.room_t_count\n self.attic_t_sum = 0\n self.room_t_sum = 0\n self.attic_t_count = 0\n self.room_t_count = 0\n self.mutex.release()\n\n def init_temps(self):\n attic_t = self.read_temp(self.attic_t_dev)\n room_t = self.read_temp(self.room_t_dev)\n while attic_t is False:\n attic_t = self.read_temp(self.attic_t_dev)\n while room_t is False:\n room_t = self.read_temp(self.room_t_dev)\n\n self.attic_t = attic_t\n self.room_t = room_t\n\n def ctof(self,c):\n return c*9/5 + 32\n\n def ftoc(self,f):\n return (f-32)*5/9.0\n\n\nclass collect_data(threading.Thread):\n def __init__(self, lograte = 5):\n threading.Thread.__init__(self)\n self.daemon = True\n self.lograte = lograte*60.0\n self.conn = MySQLdb.connect(host = \"192.168.1.2\", \n user = \"hvac_master\",\n passwd = \"<PASSWORD>\",\n db = \"hvac\")\n self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)\n self.temp_collector = get_temps()\n self.ac_on = False\n self.attic_fan_on = False\n\n self.command_buffer = []\n \n self.stop_event = threading.Event()\n self.mutex = threading.Lock()\n self.temp_collector.daemon = True\n self.temp_collector.start()\n\n def run(self):\n self.stop_event.clear()\n #self.temp_collector.start()\n while not self.stop_event.isSet():\n self.temp_collector.set_data()\n self.attic_t = self.temp_collector.attic_t\n self.room_t = self.temp_collector.room_t\n try:\n self.outside_data = collect_pws()\n except:\n print \"outside temp failed\"\n print self.attic_t, self.room_t, self.outside_data[\"temperature\"]\n self.populate_db()\n sleep(self.lograte)\n\n def populate_db(self):\n now = datetime.now()\n tmp_str = \"\"\"INSERT INTO data_logs (timestamp, attic_fan1_on, attic_fan2_on, central_fan_on, heating_on, ac_on, attic_temp, inside_temp, outside_temp, outside_humidity, outside_pressure, wind_dir, wind_speed, precip_hour, precip_day) VALUES ('%s',%i,0,0,0,%i,%f,%f,%f, %f, %f, %f, %f, %f, %f)\"\"\" % (now,self.attic_fan_on,self.ac_on,self.attic_t,self.room_t, self.outside_data[\"temperature\"], self.outside_data[\"humidity\"], self.outside_data[\"pressure\"], self.outside_data[\"wind_dir\"], self.outside_data[\"wind_mph\"], self.outside_data[\"precip_hour\"], self.outside_data[\"precip_day\"])\n self.cursor.execute(tmp_str)\n self.conn.commit()\n\n def __del__(self):\n self.stop_event.set()\n", "id": "1104278", "language": "Python", "matching_score": 2.800424098968506, "max_stars_count": 0, "path": "python/hvacCommunicator.py" }, { "content": "#!/usr/bin/env python\nimport logging\nimport sqlite3\nimport time\nfrom datetime import datetime\nfrom scipy.constants import convert_temperature as ct\n\nimport nest\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"HomeLogger\")\nlogger.setLevel(logging.DEBUG)\n\nSBF_DB = \"/home/rw247/smadata/SBFspot.db\"\n\nCLIENT_ID = '1bd5b3d7-59da-44ef-a22a-a1c13757a701'\nCLIENT_SECRET = '<KEY>'\nACCESS_TOKEN = '<PASSWORD>'\n\nnapi = nest.Nest(client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n access_token_cache_file=ACCESS_TOKEN)\nnest_device = napi.structures[0].thermostats[0]\n\ndef get_nest_info():\n napi._bust_cache()\n temperature = nest_device.temperature\n if nest_device.temperature_scale == 'F':\n temperature = ct(temperature, \"F\", \"C\")\n\n humidity = nest_device.humidity\n\n logger.debug(\"Temp: %0.1fC\" % temperature)\n logger.debug(\"Humidity: %0.1f%%\" % humidity)\n\n return(temperature, humidity)\n\ndef write_to_db():\n n_t, n_h = get_nest_info()\n attic_t = 21.0\n attic_fan = False\n\n tmp_time = datetime.now()\n unix_time = int(tmp_time.strftime('%s'))\n\n conn = sqlite3.connect(SBF_DB)\n c = conn.cursor()\n sql_str = \"INSERT INTO HomeData VALUES (%i,%f,%f,%f,%i)\" % (\n unix_time, n_t, n_h, attic_t, attic_fan\n )\n logger.debug(sql_str)\n c.execute(sql_str)\n\n conn.commit()\n conn.close()\n\nif __name__ == \"__main__\":\n write_to_db()\n", "id": "7856635", "language": "Python", "matching_score": 2.18005633354187, "max_stars_count": 0, "path": "bin/nestlogger.py" } ]
2.20398
acollu
[ { "content": "import sys\n\nimport sqlite3\nfrom sqlite3 import Error\n\nclass Sqlite3Interface:\n def __init__(self, database_name):\n self.connection = self.connect(database_name)\n\n def __del__(self):\n self.connection.close()\n\n def connect(self, database_name):\n try:\n connection = sqlite3.connect(database_name)\n return connection\n except Error:\n print(Error)\n sys.exit()\n\n def execute(self, cmd, cmd_type='read', data=None):\n cursor = self.connection.cursor()\n if cmd_type == \"commit\":\n cursor.execute(cmd)\n self.connection.commit()\n elif cmd_type == \"commit_many\" and data is not None:\n cursor.executemany(cmd, data)\n self.connection.commit()\n elif cmd_type == \"fetch\":\n cursor.execute(cmd)\n data = cursor.fetchall()\n return data\n else:\n raise TypeError\n", "id": "6417936", "language": "Python", "matching_score": 1.3312392234802246, "max_stars_count": 0, "path": "sqlite_dbint/sqlite3_interface.py" }, { "content": "import itertools\nimport random\nimport unittest\nfrom ...sqlite_dbint.sqlite_dbint import SqliteDatabaseInterface\n\n# class under test\ndb = SqliteDatabaseInterface(\"test_database.db\")\n\nclass TestSqliteDatabaseInterface(unittest.TestCase):\n\n # Testing call\n\n def test_call(self):\n for valid_field in mapping.data_mapping:\n self.assertEqual(mapping(valid_field), mapping.data_mapping[valid_field])\n with self.assertRaises(KeyError):\n for non_valid_field in [10, \"a non valid field\", 5.5, None, \"\"]:\n mapping(non_valid_field)\n\n # Public method testing\n\n def test_is_valid_field(self):\n for valid_field in mapping.data_mapping:\n self.assertTrue(mapping.is_valid_field(valid_field))\n for non_valid_field in [10, \"a non valid field\", 5.5, None, \"\"]:\n self.assertFalse(mapping.is_valid_field(non_valid_field))\n\n def test_get_mapping(self):\n for valid_field in mapping.data_mapping:\n self.assertEqual(mapping.get_mapping(valid_field), mapping.data_mapping[valid_field])\n with self.assertRaises(KeyError):\n for non_valid_field in [10, \"a non valid field\", 5.5, None, \"\"]:\n mapping.get_mapping(non_valid_field)\n\n\n# class under test\nfmp = Fmp(\"../../../../../json_data/\")\n\n\nclass TestFmp(unittest.TestCase):\n # Testing data structures\n\n def test_data(self):\n self.assertEqual(fmp.relative_urls.keys(), fmp.data_structures.keys())\n self.assertEqual(fmp.relative_urls.keys(), fmp.periods.keys())\n for datafield in fmp.mapping.data_mapping.values():\n self.assertTrue(fmp._datafield_exists_in(datafield, fmp.data_structures))\n for document_name in fmp.periods:\n self.assertTrue(all([period in [\"annual\", \"quarter\"] for period in fmp.periods[document_name]]))\n\n # Testing private methods\n\n def test_current_data(self):\n for symbol, price in fmp.current_data.items():\n self.assertIsInstance(symbol, str)\n self.assertIsInstance(price, float)\n\n def test_field_exists_in(self):\n test_data_structure = {\"key_left\": {\"key_upper\": [{\"key\": \"\"}, {\"other_key\": \"\"}]}, \"key_right\": {\"key\": \"\"}}\n for valid_field in [\"key_left\", \"key_right\", \"key_upper\", \"key\", \"other_key\"]:\n self.assertTrue(fmp._field_exists_in(valid_field, test_data_structure))\n for non_valid_field in [10, \"a non valid field\", 5.5, None, \"\"]:\n self.assertFalse(fmp._field_exists_in(non_valid_field, test_data_structure))\n\n def test_datafield_exists_in(self):\n test_data_structure = {\"key_left\": \"\", \"key_upper\": [{\"key\": \"\"}], \"key_right\": [{\"other_key\": \"\"}]}\n for valid_field in [\"key\", \"other_key\"]:\n self.assertTrue(fmp._datafield_exists_in(valid_field, test_data_structure))\n for non_valid_field in [10, \"a non valid field\", 5.5, None, \"\", \"key_left\"]:\n self.assertFalse(fmp._datafield_exists_in(non_valid_field, test_data_structure))\n\n # Public method testing\n\n def test_get_symbols(self):\n self.assertEqual(fmp.get_symbols(), sorted(list(fmp.current_data)))\n\n def test_is_available_symbol(self):\n for valid_symbol in fmp.current_data:\n self.assertTrue(fmp.is_available_symbol(valid_symbol))\n for non_valid_symbol in [10, \"a non valid symbol\", 5.5, None, \"\"]:\n self.assertFalse(fmp.is_available_symbol(non_valid_symbol))\n\n def test_get_price(self):\n for valid_symbol in fmp.current_data:\n self.assertEqual(fmp.get_price(valid_symbol), fmp.current_data[valid_symbol])\n with self.assertRaises(UnknownSymbol):\n for non_valid_symbol in [10, \"a non valid symbol\", 5.5, None, \"\"]:\n fmp.get_price(non_valid_symbol)\n\n def test_get_url(self):\n valid_symbol = \"\"\n valid_period = \"\"\n valid_document_name = \"\"\n for i in range(1000):\n valid_symbol = list(fmp.current_data.keys())[random.randint(0, len(fmp.current_data.keys()) - 1)]\n valid_document_name = list(fmp.relative_urls.keys())[random.randint(0, len(fmp.relative_urls.keys()) - 1)]\n valid_period = fmp.periods[valid_document_name][random.randint(0, len(fmp.periods[valid_document_name]) - 1)]\n url = fmp.base_url + fmp.relative_urls[valid_document_name] + valid_symbol + \"?period=\" + valid_period\n self.assertEqual(fmp.get_url(valid_symbol, valid_period, valid_document_name), url)\n with self.assertRaises(UnknownSymbol):\n for non_valid_symbol in [10, \"a non valid symbol\", 5.5, None, \"\"]:\n fmp.get_url(non_valid_symbol, valid_period, valid_document_name)\n fmp.get_url(\"non valid symbol\", \"non valid period\", valid_document_name)\n fmp.get_url(\"non valid symbol\", \"non valid period\", \"non valid document name\")\n fmp.get_url(\"non valid symbol\", valid_period, \"non valid document name\")\n with self.assertRaises(InvalidPeriod):\n for non_valid_period in [10, \"a non valid period\", 5.5, None, \"\"]:\n fmp.get_url(valid_symbol, non_valid_period, valid_document_name)\n fmp.get_url(valid_symbol, \"non valid period\", valid_document_name)\n fmp.get_url(valid_symbol, \"non valid period\", \"non valid document name\")\n with self.assertRaises(UnknownFinancialDocument):\n for non_valid_document_name in [10, \"a non valid symbol\", 5.5, None, \"\"]:\n fmp.get_url(valid_symbol, valid_period, non_valid_document_name)\n\n def test_get_document_name_containing(self):\n datafields_list = [list(fmp.data_structures[document_name][\"financials\"][0].keys()) for document_name in fmp.data_structures if \"financials\" in fmp.data_structures[document_name]]\n datafields = list(itertools.chain(*datafields_list))\n for datafield in datafields:\n document_name = None\n for dname in fmp.data_structures:\n if fmp._field_exists_in(datafield, fmp.data_structures[dname]):\n document_name = dname\n break\n self.assertEqual(fmp.get_document_name_containing(datafield), document_name)\n with self.assertRaises(UnknownDatafield):\n for datafield in [10, \"a non valid datafield\", 5.5, None, \"\"]:\n fmp.get_document_name_containing(datafield)\n\n def test_get_documents_datafields_dict(self):\n datafields_list = [list(fmp.data_structures[document_name][\"financials\"][0].keys()) for document_name in fmp.data_structures if \"financials\" in fmp.data_structures[document_name]]\n datafields = sorted(list(itertools.chain(*datafields_list)))\n documents_datafields_dict = fmp.get_documents_datafields_dict(datafields)\n self.assertIsInstance(documents_datafields_dict, dict)\n datafields_output = []\n for document_name in documents_datafields_dict:\n self.assertIsInstance(documents_datafields_dict[document_name], list)\n for datafield in documents_datafields_dict[document_name]:\n self.assertIn(datafield, fmp.data_structures[document_name][\"financials\"][0].keys())\n datafields_output.extend(list(documents_datafields_dict[document_name]))\n datafields_output = sorted(datafields_output)\n self.assertEqual(datafields, datafields_output)\n with self.assertRaises(UnknownDatafield):\n fmp.get_documents_datafields_dict([10, \"a non valid datafield\", 5.5, None, \"\"])\n\n def test_get_document_data_from_url(self):\n pass\n\n def read_document_data_from_file(self):\n pass\n\n def get_periodic_data(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2083944", "language": "Python", "matching_score": 0.7438313364982605, "max_stars_count": 0, "path": "test/unit/other.py" }, { "content": "import sys\nfrom .sqlite3_interface import Sqlite3Interface\nfrom .field_formatter import FieldFormatter as ff\n\n# Terminology:\n# record: row\n# attribute: column\n\nclass SqliteDatabaseInterface:\n def __init__(self, database_name):\n self.db = Sqlite3Interface(database_name)\n\n def __del__(self):\n del self.db\n\n def get_table_names(self):\n table_names = self.db.execute('SELECT name FROM sqlite_master where type = \"table\"', \"fetch\")\n table_names = [str(table_name[0]) for table_name in table_names]\n return table_names\n\n def is_table(self, table_name, record_format):\n existing_table_names = self.get_table_names()\n if table_name not in existing_table_names:\n return False\n record_format_keys = [pair[0] for pair in record_format] \n record_format_key_types = [pair[1] for pair in record_format] \n table_attributes = self.get_attributes(table_name)\n table_attribute_types = self.get_attribute_types(table_name=table_name, add_primary_key_flag=True)\n return record_format_keys == table_attributes and record_format_key_types == table_attribute_types\n\n def get_attributes(self, table_name):\n table_info = self.db.execute('PRAGMA table_info(' + ff.format_table_name(table_name) + ')', \"fetch\")\n attributes = [attribute_info[1] for attribute_info in table_info]\n return attributes\n\n def get_attribute_types(self, table_name, add_primary_key_flag=False):\n table_info = self.db.execute('PRAGMA table_info(' + ff.format_table_name(table_name) + ')', \"fetch\")\n attribute_types = [attribute_info[2] for attribute_info in table_info]\n if add_primary_key_flag:\n for i, attribute_info in enumerate(table_info):\n if attribute_info[5] == 1:\n attribute_types[i] += \" PRIMARY KEY\"\n break\n return attribute_types\n\n def get_primary_attribute(self, table_name):\n table_info = self.db.execute('PRAGMA table_info(' + ff.format_table_name(table_name) + ')', \"fetch\")\n for i, attribute_info in enumerate(table_info):\n if attribute_info[5] == 1:\n return attribute_info[1]\n\n def get_primary_attribute_position(self, table_name):\n table_info = self.db.execute('PRAGMA table_info(' + ff.format_table_name(table_name) + ')', \"fetch\")\n for i, attribute_info in enumerate(table_info):\n if attribute_info[5] == 1:\n return i\n\n def create_table(self, table_name, record_format, records=[], overwrite=False):\n if self.is_table(table_name, record_format) and not overwrite:\n return\n self.drop_table(table_name)\n table_structure = \", \".join([ff.format_attribute(pair[0]) + \" \" + pair[1] for pair in record_format])\n create_table_cmd = \"CREATE TABLE \" + ff.format_table_name(table_name) + \"(\" + table_structure + \")\"\n self.db.execute(create_table_cmd, \"commit\")\n self.__insert_records(table_name, records)\n\n def drop_table(self, table_name):\n drop_table_cmd = \"DROP TABLE if exists \" + ff.format_table_name(table_name)\n self.db.execute(drop_table_cmd, \"commit\")\n\n def drop_all_tables(self):\n for table_name in self.get_table_names():\n self.drop_table(table_name)\n\n def is_record(self, table_name, unique_id_name, unique_id_value):\n unique_id_values = self.select_values(table_name=table_name, condition=None, attributes=[unique_id_name])\n unique_id_values = [value[0] for value in unique_id_values]\n return unique_id_value in unique_id_values\n\n def insert_record(self, table_name, record):\n record_values = [ff.format_value(value) for value in record]\n insert_record_cmd = \"INSERT INTO \" + ff.format_table_name(table_name) + \" VALUES(\" + \", \".join(record_values) + \")\"\n self.db.execute(insert_record_cmd, \"commit\")\n\n def __insert_records(self, table_name, records):\n if not records:\n return\n insert_records_cmd = \"INSERT INTO \" + ff.format_table_name(table_name) + \" VALUES(\" + \",\".join([\"?\" for i in range(0, len(records[0]))]) + \")\"\n self.db.execute(insert_records_cmd, \"commit_many\", records)\n\n def delete_record(self, table_name, condition):\n delete_record_cmd = \"DELETE FROM \" + ff.format_table_name(table_name) + \" \" + ff.format_condition(condition)\n\n def update_values(self, table_name, value, condition=None, attributes=all):\n update_value_cmd = \"UPDATE \" + ff.format_table_name(table_name) + \" SET (\" + ff.format_attributes(attributes) + \") = \" + ff.format_value(value) + \" \" + ff.format_condition(condition)\n self.db.execute(update_value_cmd, \"commit\")\n\n def select_values(self, table_name, condition=None, attributes=all, order_attributes=None, order_type=\"\"):\n attributes = ff.format_attributes(attributes)\n table_name = ff.format_table_name(table_name)\n condition = ff.format_condition(condition)\n order = ff.format_order(order_attributes, order_type)\n select_cmd = \"SELECT \" + attributes + \" FROM \" + table_name + \" \" + condition + \" \" + order\n data = self.db.execute(select_cmd, \"fetch\")\n return data\n\n def replace_data(self, table_name, data_old, data_new, attributes=all):\n record_values = [ff.format_value(value) for value in record]\n replace_data_cmd = \"REPLACE INTO \" + ff.format_table_name(table_name) + \"(\" + ff.format_attributes(attributes) + \") VALUES(\" + ff.format_values([data_old, data_new]) + \")\"\n self.db.execute(replace_data_cmd, \"commit\")\n\n def get_table(self, table_name):\n return self.select_values(table_name)\n\n def count_attributes(self, table_name):\n return len(self.get_attributes(table_name))\n\n def count_records(self, table_name):\n records = self.select_values(table_name)\n return len(records)\n", "id": "1229101", "language": "Python", "matching_score": 3.486128330230713, "max_stars_count": 0, "path": "sqlite_dbint/sqlite_database_interface.py" }, { "content": "from .sqlite_database_interface import SqliteDatabaseInterface\n\nclass SqliteDatabaseDisplay:\n def __init__(self, database_name):\n self.db = SqliteDatabaseInterface(database_name)\n\n def __del__(self):\n self.db\n\n def list_table_names(self):\n table_names = self.db.get_table_names()\n print(\"Database - table report\")\n print(\"Table name: number of records\")\n for table_name in table_names:\n number_of_records = self.db.count_records(table_name)\n print(table_name, \": \", str(number_of_records))\n\n def display_table(self, table_name, cell_width=20):\n primary_attribute_position = self.db.get_primary_attribute_position(table_name)\n primary_attribute = self.db.get_primary_attribute(table_name)\n other_attributes = [attribute for attribute in self.db.get_attributes(table_name) if attribute != primary_attribute]\n self.print_horizontal_delimiter(table_name, cell_width)\n self.print_info_line([primary_attribute, *other_attributes], cell_width)\n self.print_horizontal_delimiter(table_name, cell_width)\n for record in self.db.select_values(table_name):\n record = list(record)\n primary_attribute_value = record[primary_attribute_position]\n del record[primary_attribute_position]\n self.print_info_line([primary_attribute_value, *record], cell_width)\n self.print_horizontal_delimiter(table_name, cell_width)\n\n def print_horizontal_delimiter(self, table_name, cell_width):\n line = \"-\" + \"-\".join([\"-\"*cell_width for i in range(0, self.db.count_attributes(table_name))]) + \"-\"\n print(line)\n\n def print_info_line(self, record, cell_width):\n line = \"|\"\n for value in record:\n line += (value if isinstance(value, str) else str(value)).ljust(cell_width, \" \")\n line += \"|\"\n print(line)\n\n#display = SqliteDatabaseDisplay(\"test.db\")\n#display.list_table_names()\n#display.display_table(table_name=\"trial\", cell_width=30)\n", "id": "6391350", "language": "Python", "matching_score": 2.4518630504608154, "max_stars_count": 0, "path": "sqlite_dbint/sqlite_database_display.py" }, { "content": "from sqlite_database_interface import SqliteDatabaseInterface\n\n\ndb = SqliteDatabaseInterface(\"test.db\")\n\ndb.drop_all_tables()\n#db.drop_table(\"trial\")\n\ntable_name = \"trial\"\nrecord_format = [(\"id\", \"integer PRIMARY KEY\"), (\"name\", \"text\")]\ndb.create_table(table_name, record_format)\nrecord = (1, \"JOHN\")\ndb.insert_record(table_name, record)\nrecord = (2, \"ADAM\")\ndb.insert_record(table_name, record)\nrecord = (3, \"MIKE\")\ndb.insert_record(table_name, record)\nprint(db.select_values(table_name, [(\"id\", \"=\", 1)], [\"name\"]))\ndb.update_values(table_name, \"ADAM\", [(\"id\", \"=\", 1)], [\"name\"])\nrecords = db.select_values(table_name)\nprint(db.is_table(table_name, record_format))\nprint(db.count_records(table_name))\n", "id": "4666275", "language": "Python", "matching_score": 3.0312647819519043, "max_stars_count": 0, "path": "sqlite_dbint/template.py" }, { "content": "from sqlite_dbint import SqliteDatabase\n\n\ndb = SqliteDatabase(\"test.db\")\n\ndb.create_table(\"trial\", {\"id\": \"integer PRIMARY KEY\", \"name\": \"text\"})\n#db.recreate_table(\"trial\", {\"id\": \"integer PRIMARY KEY\", \"name\": \"text\"})\n#db.drop_table(\"trial\")\nentry = {\"id\": 1, \"name\": \"JOHN\"}\ndb.insert_entry(\"trial\", entry)\nentry = {\"id\": 2, \"name\": \"ADAM\"}\ndb.insert_entry(\"trial\", entry)\ndb.select_fields(\"trial\", [\"id\", \"=\", \"1\"], [\"name\"])\n#db.update_field_value(\"trial\", 1, \"name\", \"ADAM\")\nentries = db.select_fields(\"trial\")\n#print(db.is_table(\"trial\"))\nprint(db.count_entries(entries))\ndb.drop_all_tables()\n\n\n", "id": "11595698", "language": "Python", "matching_score": 0.2545514702796936, "max_stars_count": 0, "path": "test/unit/test_sqlite_dbint.py" }, { "content": "from .errors import InvalidConditionFormat, InvalidConditionSequence, InvalidLogicalOperator, InvalidComparisonMember, InvalidConditionalOperator\n\n\nclass FieldFormatter:\n comparison_operators = [\"=\", \"<>\", \">\", \"<\", \">=\", \"<=\"] # equal, not equal, higher, lower, higher equal, lower equal\n logical_operators = [\"AND\", \"OR\"]\n\n @staticmethod\n def format_table_name(table_name):\n if isinstance(table_name, str):\n return \"`\" + table_name + \"`\"\n else:\n TypeError\n\n @staticmethod\n def format_attribute(attribute):\n if isinstance(attribute, str):\n return \"`\" + attribute + \"`\"\n else:\n TypeError\n\n @staticmethod\n def format_attributes(attributes):\n if attributes is all:\n return \"*\"\n elif isinstance(attributes, list):\n return \", \".join([FieldFormatter.format_attribute(attribute) for attribute in attributes])\n else:\n TypeError\n\n @staticmethod\n def format_value(value):\n if isinstance(value, int):\n return str(value)\n elif isinstance(value, float):\n return str(value)\n elif isinstance(value, str):\n return '\"' + value + '\"'\n else:\n raise TypeError\n\n @staticmethod\n def format_values(values):\n return \", \".join([self.format_value(value) for value in values])\n\n @staticmethod\n def format_condition(condition):\n if condition is None:\n return \"\"\n elif isinstance(condition, list):\n formatted_condition = \"WHERE \"\n for i, member in enumerate(condition):\n if i % 2:\n if i + 1 == len(condition):\n raise InvalidConditionSequence\n if member not in FieldFormatter.logical_operators:\n raise InvalidLogicalOperator\n formatted_condition += \" \" + member + \" \"\n else:\n if len(member) != 3:\n raise InvalidComparisonMember\n if member[1] not in FieldFormatter.comparison_operators:\n raise InvalidConditionalOperator\n member = list(member)\n member[0] = FieldFormatter.format_attribute(member[0])\n member[2] = FieldFormatter.format_value(member[2])\n formatted_condition += \" \".join(member)\n return formatted_condition\n else:\n raise InvalidConditionFormat\n\n @staticmethod\n def format_order(order_attributes, order_type):\n if order_attributes is None:\n return \"\"\n elif isinstance(order_attributes, list):\n return \"ORDER BY \" + self.format_attributes(order_attributes) + \" \" + order_type\n else:\n raise TypeError\n", "id": "6626039", "language": "Python", "matching_score": 5.159536838531494, "max_stars_count": 0, "path": "sqlite_dbint/field_formatter.py" }, { "content": "class InvalidConditionFormat(Exception):\n pass\n\nclass InvalidConditionSequence(Exception):\n pass\n\nclass InvalidLogicalOperator(Exception):\n pass\n\nclass InvalidComparisonMember(Exception):\n pass\n\nclass InvalidConditionalOperator(Exception):\n pass\n", "id": "10724019", "language": "Python", "matching_score": 0.000955771713051945, "max_stars_count": 0, "path": "sqlite_dbint/errors.py" }, { "content": "import inspect\nfrom math import exp, log\nfrom .me_calculator_decorators import argument_checker\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nimport numpy as np\n\n\n# Some parameters that are not settable:\nmortgage_payment_range_min = 30000. # in dollars\nmortgage_duration_range_min = 1. # in years (>=1)\nmortgage_principal_range_min = 100000. # in dollars\nmortgage_interest_rate_range_min = 0.0 # as a fraction of principal\nmortgage_interest_range_min = 0.0 # in dollars\nproperty_value_growth_rate_min = 0.0 # as a fraction of property value\ntime_range_min = 0. # in years\nmortgage_payment_range_max = 100000. # in dollars\nmortgage_duration_range_max = 30. # in years\nmortgage_principal_range_max = 1500000. # in dollars\nmortgage_interest_rate_range_max = 0.2 # as a fraction of principal\nmortgage_interest_range_max = 1500000. # in dollars\nproperty_value_growth_rate_max = 0.2 # as a fraction of property value\ntime_range_max = 30. # in years\n\n\nclass MeCalculator:\n def __init__(self, mortgage_payment, mortgage_duration, mortgage_principal, mortgage_interest_rate, escrow_rate, property_value_growth_rate):\n self.mortgage_parameters = {\"mortgage_payment\": [mortgage_payment, mortgage_payment_range_min, mortgage_payment_range_max, \" [$]\"],\n \"mortgage_duration\": [mortgage_duration, mortgage_duration_range_min, mortgage_duration_range_max, \" [years]\"],\n \"mortgage_principal\": [mortgage_principal, mortgage_principal_range_min, mortgage_principal_range_max, \" [$]\"],\n \"mortgage_interest_rate\": [mortgage_interest_rate, mortgage_interest_rate_range_min, mortgage_interest_rate_range_max, \" [fraction of principal]\"],\n \"property_value_growth_rate\": [property_value_growth_rate, property_value_growth_rate_min, property_value_growth_rate_max, \" [fraction of property value]\"],\n \"time\": [None, time_range_min, time_range_max, \" [years]\"]}\n self.mortgage_plottables = {\"mortgage_payment\": [\" [$]\"],\n \"mortgage_duration\": [\" [years]\"],\n \"mortgage_principal\": [\" [$]\"],\n \"mortgage_interest_rate\": [\" [fraction of principal]\"],\n \"mortgage_interest\": [\" [$]\"],\n \"mortgage_escrow\": [\" [$]\"],\n \"mortgage\": [\" [$]\"],\n \"property_value\": [\" [$]\"],\n \"mortgage_principal_residual\": [\" [$]\"],\n \"mortgage_principal_paid\": [\" [$]\"],\n \"mortgage_interest_residual\": [\" [$]\"],\n \"mortgage_interest_paid\": [\" [$]\"],\n \"mortgage_escrow_residual\": [\" [$]\"],\n \"mortgage_escrow_paid\": [\" [$]\"],\n \"mortgage_residual\": [\" [$]\"],\n \"mortgage_paid\": [\" [$]\"]}\n self.functions = me_calculator_functions(escrow_rate, property_value_growth_rate)\n self.plot_colors = ['red', 'blue', 'black', 'green', 'cyan', 'orange']\n\n @argument_checker\n def plot_1d(self, x_parameter, y_plottables):\n plottable_functions = [getattr(self.functions, y_plottable) for y_plottable in y_plottables]\n for i, y_plottable in enumerate(y_plottables):\n selected_function = getattr(self.functions, y_plottable)\n x, y = self.data_1d(selected_function, x_parameter)\n plt.plot(x, y, lw=1.5, color=self.plot_colors[i], label=y_plottable + self.mortgage_plottables[y_plottable][0])\n plt.xlabel(x_parameter + self.mortgage_parameters[x_parameter][3], labelpad=8)\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8)\n plt.grid()\n plt.legend()\n plt.show()\n\n def data_1d(self, function, x_parameter):\n parameters = inspect.getfullargspec(function).args[1:]\n x_parameter_index = parameters.index(x_parameter)\n parameter_values = [self.mortgage_parameters[parameter][0] for parameter in parameters]\n x = []\n y = []\n x_parameter_range = self.mortgage_parameters[x_parameter][2] - self.mortgage_parameters[x_parameter][1]\n for x_parameter_step in range(0, 1000):\n x_parameter_value = (x_parameter_step * x_parameter_range / 1000.) + self.mortgage_parameters[x_parameter][1]\n parameter_values[x_parameter_index] = x_parameter_value\n plottable = 0.\n try:\n plottable = function(*parameter_values)\n except ValueError:\n continue\n x.append(x_parameter_value)\n y.append(plottable)\n return x, y\n\n @argument_checker\n def plot_2d(self, x_parameter, y_parameter, z_plottable):\n plottable_function = getattr(self.functions, z_plottable)\n parameters = inspect.getfullargspec(plottable_function).args[1:]\n x_parameter_index = parameters.index(x_parameter)\n y_parameter_index = parameters.index(y_parameter)\n parameter_values = [self.mortgage_parameters[parameter][0] for parameter in parameters]\n x_parameter_range = self.mortgage_parameters[x_parameter][2] - self.mortgage_parameters[x_parameter][1]\n y_parameter_range = self.mortgage_parameters[y_parameter][2] - self.mortgage_parameters[y_parameter][1]\n x = np.arange(self.mortgage_parameters[x_parameter][1], self.mortgage_parameters[x_parameter][2], x_parameter_range / 1000.)\n y = np.arange(self.mortgage_parameters[y_parameter][1], self.mortgage_parameters[y_parameter][2], y_parameter_range / 1000.)\n x, y = np.meshgrid(x, y)\n z = np.zeros((1000, 1000))\n for i in range(0, 1000):\n for j in range(0, 1000):\n x_parameter_value = x[i][j]\n parameter_values[x_parameter_index] = x_parameter_value\n y_parameter_value = y[i][j]\n parameter_values[y_parameter_index] = y_parameter_value\n plottable = 0.\n try:\n plottable = getattr(self.functions, z_plottable)(*parameter_values)\n except ValueError:\n continue\n z[i][j] = plottable\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(x, y, z, linewidth=0, cmap=plt.cm.coolwarm, antialiased=False)\n ax.set_zlim(-1, np.amax(z) + 1)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n plt.xlabel(x_parameter + self.mortgage_parameters[x_parameter][3], labelpad=8)\n plt.xticks(fontsize=7)\n plt.ylabel(y_parameter + self.mortgage_parameters[y_parameter][3], labelpad=8)\n plt.yticks(fontsize=7)\n plt.show()\n\nclass me_calculator_functions:\n def __init__(self, escrow_rate=None, property_value_growth_rate=0.):\n self.escrow_rate = escrow_rate\n self.include_escrow_expenses = self.escrow_rate is not None\n self.property_value_growth_rate = property_value_growth_rate\n\n def mortgage_payment(self, mortgage_duration, mortgage_principal, mortgage_interest_rate):\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n return mortgage_interest_rate * mortgage_principal * exp(mortgage_interest_rate * mortgage_duration) / (exp(mortgage_interest_rate * mortgage_duration) - 1.) + escrow_expenses\n\n def mortgage_principal(self, mortgage_payment, mortgage_duration, mortgage_interest_rate):\n escrow_rate = self.escrow_rate if self.include_escrow_expenses else 0.\n return mortgage_payment * (1. - exp(-1 * mortgage_interest_rate * mortgage_duration)) / (mortgage_interest_rate + escrow_rate * (1. - exp(-1 * mortgage_interest_rate * mortgage_duration)))\n\n def mortgage_interest_rate(self, mortgage_payment, mortgage_duration, mortgage_principal):\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n corrected_payment = mortgage_payment - escrow_expenses\n for interest_step in range(1, 20000):\n interest = interest_step * 0.001\n if exp(interest * mortgage_duration) - (corrected_payment / (corrected_payment - interest * mortgage_principal)) < 0.:\n return interest\n\n def mortgage_duration(self, mortgage_payment, mortgage_principal, mortgage_interest_rate):\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n corrected_payment = mortgage_payment - escrow_expenses\n return (1. / mortgage_interest_rate) * log(corrected_payment / (corrected_payment - mortgage_interest_rate * mortgage_principal))\n\n def mortgage_interest(self, mortgage_payment, mortgage_principal, mortgage_interest_rate):\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n corrected_payment = mortgage_payment - escrow_expenses\n return (corrected_payment / mortgage_interest_rate) * log(corrected_payment / (corrected_payment - mortgage_interest_rate * mortgage_principal)) - mortgage_principal\n\n def mortgage_escrow(self, mortgage_payment, mortgage_principal, mortgage_interest_rate):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n return duration * escrow_expenses\n\n def mortgage(self, mortgage_payment, mortgage_principal, mortgage_interest_rate):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n return duration * mortgage_payment\n\n def property_value(self, mortgage_principal, property_value_growth_rate, time):\n return mortgage_principal * pow(1. + property_value_growth_rate, time)\n\n def mortgage_principal_residual(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n corrected_payment = mortgage_payment - escrow_expenses\n return (corrected_payment / mortgage_interest_rate) * (1. - exp(mortgage_interest_rate * time)) + mortgage_principal * exp(mortgage_interest_rate * time)\n\n def mortgage_principal_paid(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n residual_principal = self.mortgage_principal_residual(mortgage_payment, mortgage_principal, mortgage_interest_rate, time)\n return mortgage_principal - residual_principal\n\n def mortgage_interest_residual(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n residual_principal = self.mortgage_principal_residual(mortgage_payment, mortgage_principal, mortgage_interest_rate, time)\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n mortgage_total = duration * (mortgage_payment - escrow_expenses)\n amount_paid_to_date = time * (mortgage_payment - escrow_expenses)\n residual_interest = mortgage_total - amount_paid_to_date - residual_principal\n return residual_interest\n\n def mortgage_interest_paid(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n residual_principal = self.mortgage_principal_residual(mortgage_payment, mortgage_principal, mortgage_interest_rate, time)\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n amount_paid_to_date = time * (mortgage_payment - escrow_expenses)\n paid_interest = amount_paid_to_date - mortgage_principal + residual_principal\n return paid_interest\n\n def mortgage_escrow_residual(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n return (duration - time) * escrow_expenses\n\n def mortgage_escrow_paid(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n escrow_expenses = self.escrow_rate * mortgage_principal if self.include_escrow_expenses else 0.\n return time * escrow_expenses\n\n def mortgage_residual(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n return (duration - time) * mortgage_payment\n\n def mortgage_paid(self, mortgage_payment, mortgage_principal, mortgage_interest_rate, time):\n duration = self.mortgage_duration(mortgage_payment, mortgage_principal, mortgage_interest_rate)\n if time > duration:\n raise ValueError\n return time * mortgage_payment\n\n#calculator = MeCalculator(mortgage_payment=67899.68888,\n# mortgage_duration=35.,\n# mortgage_principal=347906,\n# mortgage_interest_rate=0.03,\n# escrow_rate=0.0,\n# property_value_growth_rate=0.05)\n#calculator.plot_1d(\"time\", [\"mortgage_principal_paid\", \"mortgage_interest_paid\", \"mortgage_escrow_paid\", \"mortgage_paid\"])\n#calculator.plot_1d(\"mortgage_duration\", [\"mortgage_payment\"])\n#calculator.plot_2d(\"mortgage_principal\", \"time\", \"mortgage_interest_paid\")\n", "id": "5065395", "language": "Python", "matching_score": 2.451793670654297, "max_stars_count": 0, "path": "me_calculator/me_calculator.py" }, { "content": "import inspect\nfrom .me_calculator_errors import UnknownParameter, UnknownPlottable, PlottableNotDependentOnParameter\n\n\ndef argument_checker(func):\n def wrapper(self, *args):\n argument_names = inspect.getfullargspec(func).args[1:]\n for i, argument_name in enumerate(argument_names):\n if argument_name == \"x_parameter\":\n _check_parameter(self, args[i])\n if argument_name == \"y_parameter\":\n _check_parameter(self, args[i])\n if argument_name == \"y_plottable\":\n if \"x_parameter\" not in argument_names:\n raise KeyError\n _check_plottable(self, args[argument_names.index(\"x_parameter\")], args[i])\n if argument_name == \"z_plottable\":\n if \"x_parameter\" not in argument_names or \"y_parameter\" not in argument_names:\n raise KeyError\n _check_plottable(self, args[argument_names.index(\"x_parameter\")], args[i])\n _check_plottable(self, args[argument_names.index(\"y_parameter\")], args[i])\n if argument_name == \"y_plottables\":\n if \"x_parameter\" not in argument_names:\n raise KeyError\n _check_plottables(self, args[argument_names.index(\"x_parameter\")], args[i])\n return func(self, *args)\n return wrapper\n\n\ndef _check_parameter(self, parameter):\n if parameter not in self.mortgage_parameters:\n raise UnknownParameter\n\n\ndef _check_plottable(self, parameter, plottable):\n if plottable not in self.mortgage_plottables:\n raise UnknownPlottable\n argument_names = inspect.getfullargspec(getattr(self.functions, plottable)).args[1:]\n if parameter not in argument_names:\n raise PlottableNotDependentOnParameter\n\n\ndef _check_plottables(self, parameter, plottables):\n if not isinstance(plottables, list):\n raise TypeError\n for plottable in plottables:\n _check_plottable(self, parameter, plottable)\n", "id": "11129209", "language": "Python", "matching_score": 3.628591299057007, "max_stars_count": 0, "path": "me_calculator/me_calculator_decorators.py" }, { "content": "class UnknownParameter(Exception):\n pass\n\n\nclass UnknownPlottable(Exception):\n pass\n\n\nclass PlottableNotDependentOnParameter(Exception):\n pass\n", "id": "7044001", "language": "Python", "matching_score": 1.3954139947891235, "max_stars_count": 0, "path": "me_calculator/me_calculator_errors.py" } ]
2.451794
albrazeau
[ { "content": "from geopy.geocoders import Nominatim\nfrom headers import GEOPY_USRNAME\n\ndef add_geolocation(city_and_state: str) -> tuple:\n\n geolocator = Nominatim(user_agent=GEOPY_USRNAME)\n location = geolocator.geocode(city_and_state)\n\n if location:\n\n geoloc = (location.latitude, location.longitude)\n return geoloc\n \n else:\n return \"No location data found.\"", "id": "8997106", "language": "Python", "matching_score": 0.32345259189605713, "max_stars_count": 1, "path": "app/add_geodata.py" }, { "content": "from googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nimport pickle\nimport os.path\nimport base64\nimport email\nfrom email.mime.text import MIMEText\nfrom bs4 import BeautifulSoup\n\n# Define the SCOPES. If modifying it, delete the token.pickle file.\nSCOPES = ['https://mail.google.com/',\n'https://www.googleapis.com/auth/gmail.modify',\n'https://www.googleapis.com/auth/gmail.readonly']\n\ndef connectServer():\n # Variable creds will store the user access token.\n # If no valid token found, we will create one.\n creds = None\n \n # The file token.pickle contains the user access token.\n # Check if it exists\n if os.path.exists('token.pickle'):\n \n # Read the token from the file and store it in the variable creds\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n print(creds)\n\n # If credentials are not available or are invalid, ask the user to log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n \n # Save the access token in token.pickle file for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n # Connect to the Gmail API\n service = build('gmail', 'v1', credentials=creds)\n\n return service\n\n\ndef checkMessages(service, userId = 'me'):\n \n # request a list of all the messages\n result = service.users().messages().list(userId='me').execute()\n\n # We can also pass maxResults to get any number of emails. Like this:\n # result = service.users().messages().list(maxResults=200, userId='me').execute()\n messages = result.get('messages')\n \n # messages is a list of dictionaries where each dictionary contains a message id.\n \n # iterate through all the messages\n for msg in messages:\n\n msg_content = service.users().messages().get(userId=userId, id=msg['id']).execute()\n subject = ''\n sender = ''\n\n for part in msg_content['payload']['parts']:\n if 'attachmentId' in part['body'].keys():\n\n n = 1\n\n for element in msg_content['payload']['headers']:\n if element['name'] == 'Subject':\n subject = element['value']\n if element['name'] == 'From':\n sender = element['value']\n \n msg_body = msg_content['snippet']\n attachmentId = part['body']['attachmentId']\n\n pipeline_fields = extractMessageBody(msg_body)\n\n if pipeline_fields['Valid'] == True:\n\n attachmentObj = service.users().messages().attachments().get(\n userId=userId, \n messageId=msg['id']\n id=part['body']['attachmentId']\n ).execute()\n\n attachment = base64.urlsafe_b64decode(\n attachmentObj[\"data\"]\n )\n\n photo_filepath = f\"./data/raw/{subject}_{n}.JPG\"\n\n with open(photo_filepath, \"wb\") as f:\n f.write(attachment)\n\n return photo_filepath, pipeline_fields\n\ndef extractMessageBody(msg_body):\n\n msg_body = msg_body.lower().split(\",\")\n fields = {\n 'Location':'',\n 'Caption':'',\n 'Valid': False\n }\n\n if 'beepthisjeep' in msg_body.replace(\" \",\"\"):\n fields['Valid'] = True\n\n for msg in msg_body:\n if 'location' in msg:\n fields['Location'] = msg.replace('location','').capitalize().strip()\n if 'caption' in msg:\n fields['Caption'] = msg.replace('caption','').capitalize().strip()\n \n return fields\n\ndef generateResponse(sender, to, subject, message_text):\n \"\"\"Create a message for an email.\n\n Args:\n sender: Email address of the sender.\n to: Email address of the receiver.\n subject: The subject of the email message.\n message_text: The text of the email message.\n\n Returns:\n An object containing a base64url encoded email object.\n \"\"\"\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n return {'raw': base64.urlsafe_b64encode(message.as_string())}\n\n\ndef sendMessage(service, user_id, message):\n \"\"\"Send an email message.\n\n Args:\n service: Authorized Gmail API service instance.\n user_id: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n message: Message to be sent.\n\n Returns:\n Sent Message.\"\"\"\n\n try:\n message = service.users().messages().send(userId=user_id, body=message).execute()\n print('Message Id:', message['id'])\n return message\n except Exception as error:\n print('An error occurred: ', error)", "id": "7520977", "language": "Python", "matching_score": 1.075318455696106, "max_stars_count": 1, "path": "app/gmail.py" }, { "content": "import sys\nimport os\nimport subprocess\nfrom pprint import pprint\nfrom datetime import datetime\n\n\nclass TextLogger:\n \"\"\"\n An object to be used to log info, errors and results to a text file.\n \n When a new instance of this object is made, it will open a text \n file at the given path and immediately record the python version,\n the path to the python .exe, the current working directory, and \n the path to the python file that is being run (NOTE: this will \n not happen in a jupyter notebook). If you are using a conda \n environment, it will also log the modules and versions of the \n packages in the environment.\n\n Example:\n try:\n # do someting\n except Exception as e:\n log.log_error(e)\n \"\"\"\n\n def __init__(self, path=False):\n if not path:\n self.path = \"log_{}.txt\".format(\n str(datetime.now()).split(\" \")[0].replace(\"-\", \"\")\n )\n else:\n self.path = path\n self.executable = sys.executable\n self.version = sys.version\n self.cwd = os.getcwd()\n self.closed = False\n self.is_timing = False\n\n try:\n self.filepath = os.path.realpath(__file__)\n except:\n pass\n\n if \"Continuum\" in self.executable:\n try:\n self._conda_packages = subprocess.check_output(\n \"conda list -n {}\".format(sys.executable.split(\"\\\\\")[-2]),\n universal_newlines=True,\n )\n except:\n self._conda_packages = subprocess.check_output(\n \"conda list\",universal_newlines=True,\n )\n\n def start_log(self):\n with open(self.path, \"w\") as f:\n f.write(\"LOG INITIALIZED AT : {}\\n\\n\".format(str(datetime.now())))\n f.write(\"Python version: {}\\n\".format(self.version))\n f.write(\"Python executable path: {}\\n\".format(self.executable))\n f.write(\"Current working directory: {}\\n\".format(self.cwd))\n try:\n f.write(\"File path: {}\\n\".format(self.filepath))\n except:\n pass\n try:\n f.write(\"\\n{}{}\\n\".format(self._conda_packages, \"-\" * 72))\n except:\n pass\n return\n\n start_log(self)\n\n def conda_pkgs(self):\n \"\"\"\n Use this method to pretty print the packages in your conda env.\n \"\"\"\n if not self.closed:\n return pprint(self._conda_packages)\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def log_error(self, error):\n \"\"\"\n Use this method within \"except\" to log errors and move on.\n \"\"\"\n if not self.closed:\n with open(self.path, \"a\") as f:\n f.write(\"\\n--- {} ---\\n\".format(str(datetime.now())))\n f.write(\"ERROR: {}\\n\".format(error))\n print(error)\n return\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def log_output(self, output):\n \"\"\"\n Use this method to log the output of an operation to your text log.\n \"\"\"\n if not self.closed:\n with open(self.path, \"a\") as f:\n if isinstance(output, list):\n f.write(\"\\n--- {} ---\\nOUTPUT: list\\n\".format(str(datetime.now())))\n [f.write(str(x) + \"\\n\") for x in output]\n else:\n f.write(\"\\n--- {} ---\\n\".format(str(datetime.now())))\n f.write(\"OUTPUT: {}\\n\".format(str(output)))\n return\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def add_message(self, message, printed=False):\n \"\"\"\n Use this method to add a message to your log.\n \"\"\"\n if not self.closed:\n if not printed:\n assert isinstance(\n message, str\n ), \"Error: method add_message expects a string.\"\n with open(self.path, \"a\") as f:\n f.write(\"\\n--- {} ---\\n\".format(str(datetime.now())))\n f.write(\"MESSAGE: {}\\n\".format(message))\n return\n else:\n assert isinstance(\n message, str\n ), \"Error: method add_message expects a string.\"\n with open(self.path, \"a\") as f:\n f.write(\"\\n--- {} ---\\n\".format(str(datetime.now())))\n f.write(\"MESSAGE: {}\\n\".format(message))\n print(\"MESSAGE: {}\".format(message))\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def timestamp(self):\n \"\"\"\n Use this method to insert a timestamp into your log.\n \"\"\"\n if not self.closed:\n with open(self.path, \"a\") as f:\n f.write(\"\\n--------------\\n\")\n f.write(\"TIMESTAMP: {}\".format(str(datetime.now())))\n f.write(\"\\n--------------\\n\")\n return\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def start_timer(self):\n \"\"\"\n Use this method to begin a timer.\n \"\"\"\n if not self.closed:\n self.timing = True\n self.start = datetime.now()\n return\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def end_timer(self, message=False):\n \"\"\"\n Use this method to end a timer. The processing time will be \n printed and written to the log file.\n \"\"\"\n if not self.closed:\n if self.timing:\n if not message:\n proc_time = datetime.now() - self.start\n print(\"Processing time: {}\".format(proc_time))\n with open(self.path, \"a\") as f:\n f.write(\"\\nProcessing time: {}\\n\".format(proc_time))\n else:\n assert isinstance(\n message, str\n ), \"Error: end_timer expects no arguments or a message string.\"\n proc_time = datetime.now() - self.start\n print(\n \"MESSAGE: {}\\nProcessing time: {}\".format(message, proc_time)\n )\n with open(self.path, \"a\") as f:\n f.write(\n \"\\nMESSAGE: {}\\nProcessing time: {}\\n\".format(\n message, proc_time\n )\n )\n else:\n raise Exception(\n \"No timer has been started. Call the start_timer method to begin timing a process.\"\n )\n return\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n\n def close(self):\n \"\"\"\n Use this method to end the log. This will record the time it\n was closed. Making a new log with the same filepath will \n overwrite the old log.\n \"\"\"\n if not self.closed:\n self.closed = True\n with open(self.path, \"a\") as f:\n f.write(\"\\n\\n{}\\n\".format(\"-\" * 72))\n f.write(\"LOG CLOSED AT: {}\".format(str(datetime.now())))\n print(\"Log closed.\")\n return\n else:\n raise Exception(\n \"This log has been closed. See {} for the log file.\".format(self.path)\n )\n", "id": "8964765", "language": "Python", "matching_score": 1.2924846410751343, "max_stars_count": 2, "path": "src/textlogger.py" }, { "content": "from setuptools import setup\n\n# This call to setup() does all the work\nsetup(\n name=\"openlocationcode\",\n version=\"1.0.1\",\n description=\"Python library for Open Location Code (Plus Codes)\",\n url=\"https://github.com/google/open-location-code\",\n author=\"Google\",\n author_email=\"<EMAIL>\",\n license=\"Apache 2.0\",\n classifiers=[\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=[\"openlocationcode\"],\n include_package_data=True,\n install_requires=[],\n)", "id": "11760905", "language": "Python", "matching_score": 1, "max_stars_count": 3627, "path": "python/setup.py" }, { "content": "from openlocationcode import *", "id": "3543872", "language": "Python", "matching_score": 0, "max_stars_count": 3627, "path": "python/openlocationcode/__init__.py" }, { "content": "from flask import Flask, render_template, request, jsonify, Markup, url_for\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS, GPSTAGS\nimport folium\nfrom folium import IFrame\nimport folium.plugins as plugins\nimport base64\nimport glob\nimport os\nimport psycopg2\nimport pandas as pd\nfrom contextlib import closing\nfrom headers import DB_CONNECTION_LOCAL, DB_CONNECTION_DOCKER\n\n# local\n# DB_CONNECTION = DB_CONNECTION_LOCAL\n# clean_dir = \"/home/ubuntu/workbench/roadtrip-gate/data/ready\"\n\n# docker\nDB_CONNECTION = DB_CONNECTION_DOCKER\nclean_dir = \"/data/ready\"\n\nUS_CENTER = ('39.009734', '-97.555620')\n\napp = Flask(__name__)\n\n\[email protected](\"/\")\ndef home():\n\n return render_template(\"index.html\")\n\[email protected]('/map')\ndef map():\n\n # fetch_sql = f\"\"\"SELECT\n # attachment_id,\n # filepath,\n # caption,\n # date_taken,\n # ST_X(geom) AS lon_x,\n # ST_Y(geom) AS lat_y\n # FROM\n # roadtrip.images;\"\"\"\n\n # with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n # df = pd.read_sql(fetch_sql, conn)\n\n # folium_map = folium.Map(location=US_CENTER, \n # zoom_start=4, scrollWheelZoom=False, tiles=None)\n\n # folium_map.add_child(plugins.Geocoder())\n # folium.TileLayer('openstreetmap', name = 'Street').add_to(folium_map)\n # folium.TileLayer('Stamen Terrain', name = 'Terrain').add_to(folium_map)\n # folium.TileLayer(\n # tiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',\n # attr = 'Esri',\n # name = 'Satellite'\n # ).add_to(folium_map)\n # folium.LayerControl().add_to(folium_map)\n # folium_map.add_child(plugins.Fullscreen(position='topleft', title='Full Screen', title_cancel='Exit Full Screen', force_separate_button=False))\n\n # for idx in range(len(df)):\n\n # img_name = df.iloc[idx]['caption']\n # filepath = df.iloc[idx]['filepath']\n # lon = df.iloc[idx]['lon_x']\n # lat = df.iloc[idx]['lat_y']\n\n # # resize appropriately\n # image = Image.open(filepath)\n # width, height = image.size\n # width = width + 25\n # height = height + 25\n\n # # nesting the image loading and route mapping to a try except-- top candidate for future refactor\n\n # try:\n\n # encoded = base64.b64encode(open(filepath, 'rb').read())\n # html = '<img src=\"data:image/JPG;base64,{}\">'.format(encoded.decode(\"UTF-8\"))\n \n # iframe = IFrame(html, width=width, height=height)\n\n # popup = folium.Popup(iframe, max_width=width+25)\n # tooltip = img_name.replace(\"_\",\" \")\n \n # folium.Marker(location=(lat, lon), tooltip=tooltip, popup=popup, icon=folium.Icon(color='blue')).add_to(folium_map)\n\n # except:\n # pass\n\n # # try:\n # # draw_line = list(df.sort_values(by='date_taken')[['lat_y', 'lon_x']].apply(tuple, axis=1))\n # # folium.PolyLine(draw_line, color=\"#c20dff\", weight=2.5, opacity=1).add_to(folium_map)\n # # except:\n # # pass\n\n # folium_map.save('templates/map.html')\n\n return render_template('map.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "id": "10741797", "language": "Python", "matching_score": 6.320701599121094, "max_stars_count": 1, "path": "app/main.py" }, { "content": "from googleapiclient.discovery import build\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\n\nimport folium\nfrom folium import IFrame\nimport folium.plugins as plugins\n\nfrom PIL import ImageFile, Image\nImageFile.LOAD_TRUNCATED_IMAGES = True\nfrom PIL.ExifTags import TAGS\nfrom PIL.ExifTags import GPSTAGS\n\nfrom contextlib import closing\nimport os.path\nimport base64\nimport re\nimport psycopg2\nimport random\nimport pandas as pd\nfrom glob import glob\nimport logging\n\nfrom headers import GEOPY_USRNAME, VALID_EMAILS, DB_CONNECTION_DOCKER, DB_CONNECTION_LOCAL\nfrom geopy.geocoders import Nominatim\nfrom datetime import datetime\nfrom time import sleep\n\nfrom functools import partial\n\nprint = partial(print, flush=True)\n\n# local\n# DB_CONNECTION = DB_CONNECTION_LOCAL\n# RAW_PATH = '/home/ubuntu/workbench/roadtrip-gate/data/raw'\n# READY_PATH = '/home/ubuntu/workbench/roadtrip-gate/data/ready'\n\n# docker\nDB_CONNECTION = DB_CONNECTION_DOCKER\nRAW_PATH = '/data/raw'\nREADY_PATH = '/data/ready'\n\nDEFAULT_GEO = ('39.009734', '-97.555620')\n\nlogging.basicConfig(filename='/app/gotmail.log', level=logging.WARNING)\n\ndef get_geotagging(exif):\n if not exif:\n raise ValueError(\"No EXIF metadata found\")\n\n geotagging = {}\n for (idx, tag) in TAGS.items():\n if tag == \"GPSInfo\":\n if idx not in exif:\n raise ValueError(\"No EXIF geotagging found\")\n\n for (key, val) in GPSTAGS.items():\n if key in exif[idx]:\n geotagging[val] = exif[idx][key]\n\n return geotagging\n\n\ndef get_labeled_exif(exif):\n labeled = {}\n for (key, val) in exif.items():\n labeled[TAGS.get(key)] = val\n\n return labeled\n\n\ndef get_exif(filename):\n image = Image.open(filename)\n image.verify()\n return image._getexif()\n\n\ndef get_decimal_from_dms(dms, ref):\n\n degrees = dms[0]\n minutes = dms[1] / 60.0\n seconds = dms[2] / 3600.0\n\n if ref in [\"S\", \"W\"]:\n degrees = -degrees\n minutes = -minutes\n seconds = -seconds\n\n return round(degrees + minutes + seconds, 5)\n\n\ndef get_coordinates(geotags):\n lat = get_decimal_from_dms(geotags[\"GPSLatitude\"], geotags[\"GPSLatitudeRef\"])\n lon = get_decimal_from_dms(geotags[\"GPSLongitude\"], geotags[\"GPSLongitudeRef\"])\n\n return (lat, lon)\n\n\ndef insertImg(sql):\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()\n\ndef resize_and_save(old_filepath, new_filepath, m_data):\n\n image = Image.open(old_filepath)\n\n if m_data.get('Orientation') == 3:\n image=image.rotate(180, expand=True)\n elif m_data.get('Orientation') == 6:\n image=image.rotate(270, expand=True)\n elif m_data.get('Orientation') == 8:\n image=image.rotate(90, expand=True)\n\n width, height = image.size\n\n if width >= height:\n w = 292\n h = 219\n\n if height >= width:\n w = 219\n h = 292\n\n image = image.resize((w,h), Image.ANTIALIAS)\n\n data = list(image.getdata())\n\n no_exif = Image.new(image.mode, image.size)\n no_exif.putdata(data)\n\n no_exif.save(new_filepath)\n os.remove(old_filepath)\n\n\ndef extractEmail(text:str):\n match = re.search(r'[\\w.+-]+@[\\w-]+\\.[\\w.-]+', text)\n if match:\n return match.group(0)\n\ndef add_geolocation(city_and_state: str) -> tuple:\n\n geolocator = Nominatim(user_agent=GEOPY_USRNAME)\n location = geolocator.geocode(city_and_state)\n\n if location:\n return (location.latitude, location.longitude)\n \n else:\n return (None, None)\n\ndef imgExists(img_id):\n\n check_img = f\"\"\"\n SELECT * FROM roadtrip.images WHERE attachment_id = '{img_id}';\n \"\"\"\n\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n with conn:\n exists = pd.read_sql(check_img, conn)\n\n if len(exists) >= 1:\n return True\n\n return False\n\ndef getImgData(img_path):\n\n check_img = f\"\"\"\n SELECT * FROM roadtrip.images WHERE filepath = '{img_path}';\n \"\"\"\n\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n with conn:\n df = pd.read_sql(check_img, conn)\n return df\n\n return False\n\ndef connectGmail(token = '<PASSWORD>'):\n \n print('Establishing connection to GMAIL.')\n\n # Define the SCOPES. If modifying it, delete the token.pickle file.\n SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']\n\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.json'):\n creds = Credentials.from_authorized_user_file('token.json', SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('gmail', 'v1', credentials=creds)\n\n return service\n\ndef fetchEmailData(gmail_connection):\n\n # request a list of all the messages\n result = gmail_connection.users().messages().list(userId='me').execute()\n\n # can also pass maxResults to get any number of emails. Like this:\n # result = service.users().messages().list(maxResults=200, userId='me').execute()\n messages = result.get('messages')\n\n # object to hold valid image submission\n image_dictionary = {}\n\n # iterate through all the messages\n for msg in messages:\n \n check_content = f\"\"\"\n SELECT email_id FROM roadtrip.images;\n \"\"\"\n\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n with conn:\n df = pd.read_sql(check_content, conn)\n\n if not msg['id'] in df.email_id.to_list():\n\n msg_content = gmail_connection.users().messages().get(userId='me', id=msg['id']).execute()\n photo_information = {}\n n = 1\n\n if msg_content['snippet'] and 'date' in msg_content['snippet'].lower():\n date = msg_content['snippet'].lower().split('date:')[1].split(';')[0].strip().title()\n if not \":\" in date:\n date += ' 0:00 AM'\n photo_information['Date'] = datetime.strptime(date.replace(\",\", \"\"), '%B %d %Y %H:%M %p')\n\n\n\n # parse message object to pull subject data and validate sender in payload HEADERS\n for element in msg_content['payload']['headers']:\n if element['name'] == 'Subject':\n if ';' in element['value']:\n location, caption = element['value'].split(\";\")\n else:\n location, caption = element['value'], element['value']\n photo_information['Location'] = location\n photo_information['Caption'] = caption\n\n if element['name'] == 'From' and extractEmail(element['value']) in VALID_EMAILS:\n photo_information['Valid'] = True\n\n # parse message oject for attachments in payload PARTS\n for part in msg_content['payload']['parts']:\n if 'attachmentId' in part['body'].keys():\n\n if msg['id'] in image_dictionary.keys():\n image_dictionary[msg['id']]['Attachments'].append(part['body']['attachmentId'])\n else:\n photo_information['Attachments'] = []\n photo_information['Attachments'].append(part['body']['attachmentId'])\n image_dictionary[msg['id']] = photo_information\n if 'filename' in part['body'].keys():\n print(part['body']['filename'])\n \n return image_dictionary\n\n\n\ndef insertImg(attachment_ID, email_id, photo_location, caption, filepath, date_taken, lat, lon):\n \n sql = f\"\"\"\n INSERT INTO roadtrip.images VALUES (\n '{attachment_ID}',\n '{email_id}',\n '{photo_location}',\n '{caption}',\n '{filepath}',\n TO_TIMESTAMP('{date_taken}', 'YYYY:MM:DD HH24:MI:SS')::timestamp,\n ST_SetSRID(ST_Point({lon}, {lat}), 4326)\n )\n \"\"\"\n\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()\n\ndef loadRawImages(image_dictionary, service):\n\n for message_key, message_value in image_dictionary.items():\n n = 1\n\n for img_id in message_value['Attachments']:\n\n attachment_ID = img_id[:25]\n\n if not imgExists(attachment_ID):\n\n print('Processing Image- loading to raw folder.')\n\n attachmentObj = service.users().messages().attachments().get(\n userId='me', \n messageId=message_key,\n id=img_id\n ).execute()\n\n img_name = re.sub(\"[^a-zA-Z]+\", \"\", attachment_ID) + '.JPG'\n\n filepath = os.path.join(RAW_PATH, img_name)\n\n attachmentImg = base64.urlsafe_b64decode(\n attachmentObj['data']\n )\n if not os.path.exists(filepath):\n with open(filepath, \"wb\") as f:\n f.write(attachmentImg)\n\n # establish variables to be fed into sql insertion func\n email_id = message_key\n photo_location = message_value.get('Location', None)\n caption = message_value.get('Caption', None)\n filepath = filepath\n\n if \"'\" in caption and caption.count(\"'\")%2:\n idx = caption.index(\"'\")\n caption = caption[:idx] + \"'\" + caption[idx:]\n\n if \"'\" in photo_location and photo_location.count(\"'\")%2:\n idx = photo_location.index(\"'\")\n photo_location = photo_location[:idx] + \"'\" + photo_location[idx:]\n\n exif = get_exif(filepath)\n metadata = get_labeled_exif(exif)\n\n try:\n geotags = get_geotagging(exif)\n lat, lon = get_coordinates(geotags)\n date_taken = metadata[\"DateTimeOriginal\"]\n \n except:\n lat, lon = add_geolocation(photo_location)\n date_taken = message_value.get('Date', datetime.now())\n\n if not lat and not lon:\n lon, lat = DEFAULT_GEO\n resize_and_save(filepath, filepath.replace(\"raw\", \"nogeodata\"), metadata)\n insertImg(attachment_ID, email_id, photo_location, caption, filepath.replace(\"raw\", \"nogeodata\"), date_taken, lat, lon)\n\n if lat and lon:\n resize_and_save(filepath, filepath.replace(\"raw\", \"ready\"), metadata)\n insertImg(attachment_ID, email_id, photo_location, caption, filepath.replace(\"raw\", \"ready\"), date_taken, lat, lon)\n\ndef generateMap():\n\n US_CENTER = ('39.009734', '-97.555620')\n\n fetch_sql = f\"\"\"SELECT\n attachment_id,\n filepath,\n caption,\n date_taken,\n ST_X(geom) AS lon_x,\n ST_Y(geom) AS lat_y\n FROM\n roadtrip.images;\"\"\"\n\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n df = pd.read_sql(fetch_sql, conn)\n\n folium_map = folium.Map(location=US_CENTER, \n zoom_start=4, scrollWheelZoom=False, tiles=None)\n\n folium_map.add_child(plugins.Geocoder())\n folium.TileLayer('openstreetmap', name = 'Street').add_to(folium_map)\n folium.TileLayer('Stamen Terrain', name = 'Terrain').add_to(folium_map)\n folium.TileLayer(\n tiles = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}',\n attr = 'Esri',\n name = 'Satellite'\n ).add_to(folium_map)\n folium.LayerControl().add_to(folium_map)\n folium_map.add_child(plugins.Fullscreen(position='topleft', title='Full Screen', title_cancel='Exit Full Screen', force_separate_button=False))\n\n for idx in range(len(df)):\n\n img_name = df.iloc[idx]['caption']\n filepath = df.iloc[idx]['filepath']\n lon = df.iloc[idx]['lon_x']\n lat = df.iloc[idx]['lat_y']\n\n # resize appropriately\n image = Image.open(filepath)\n width, height = image.size\n width = width + 25\n height = height + 25\n\n # nesting the image loading and route mapping to a try except-- top candidate for future refactor\n\n try:\n\n encoded = base64.b64encode(open(filepath, 'rb').read())\n html = '<img src=\"data:image/JPG;base64,{}\">'.format(encoded.decode(\"UTF-8\"))\n \n iframe = IFrame(html, width=width, height=height)\n\n popup = folium.Popup(iframe, max_width=width+25)\n tooltip = img_name.replace(\"_\",\" \")\n \n folium.Marker(location=(lat, lon), tooltip=tooltip, popup=popup, icon=folium.Icon(color='blue')).add_to(folium_map)\n\n except:\n pass\n\n # try:\n # draw_line = list(df.sort_values(by='date_taken')[['lat_y', 'lon_x']].apply(tuple, axis=1))\n # folium.PolyLine(draw_line, color=\"#c20dff\", weight=2.5, opacity=1).add_to(folium_map)\n # except:\n # pass\n\n folium_map.save('templates/map.html')\n\nif __name__ == \"__main__\":\n\n print(\"Starting: Generating default map\")\n sleep(60)\n generateMap()\n\n while True:\n\n try:\n print(\"Begin data pull from Gmail\")\n server = connectGmail()\n img_dict = fetchEmailData(server)\n loadRawImages(img_dict, server)\n generateMap()\n print(\"Data pull successful\")\n\n except Exception as e:\n logging.error(\"This is the error: %s\", e, exc_info=1)\n print(e)\n\n print(\"Sleeping\")\n sleep(60*60*4)", "id": "1327167", "language": "Python", "matching_score": 6.421755790710449, "max_stars_count": 1, "path": "app/gotmail.py" }, { "content": "from PIL import ImageFile, Image\nImageFile.LOAD_TRUNCATED_IMAGES = True\nfrom PIL.ExifTags import TAGS\nfrom PIL.ExifTags import GPSTAGS\nimport os\nfrom glob import glob\nimport psycopg2\nfrom contextlib import closing\nfrom time import sleep\nfrom headers import DB_CONNECTION_LOCAL, DB_CONNECTION_DOCKER\n\n# local\n# DB_CONNECTION = DB_CONNECTION_LOCAL\n\n# docker\nDB_CONNECTION = DB_CONNECTION_DOCKER\n\ndef get_geotagging(exif):\n if not exif:\n raise ValueError(\"No EXIF metadata found\")\n\n geotagging = {}\n for (idx, tag) in TAGS.items():\n if tag == \"GPSInfo\":\n if idx not in exif:\n raise ValueError(\"No EXIF geotagging found\")\n\n for (key, val) in GPSTAGS.items():\n if key in exif[idx]:\n geotagging[val] = exif[idx][key]\n\n return geotagging\n\n\ndef get_labeled_exif(exif):\n labeled = {}\n for (key, val) in exif.items():\n labeled[TAGS.get(key)] = val\n\n return labeled\n\n\ndef get_exif(filename):\n image = Image.open(filename)\n image.verify()\n return image._getexif()\n\n\ndef get_decimal_from_dms(dms, ref):\n\n degrees = dms[0]\n minutes = dms[1] / 60.0\n seconds = dms[2] / 3600.0\n\n if ref in [\"S\", \"W\"]:\n degrees = -degrees\n minutes = -minutes\n seconds = -seconds\n\n return round(degrees + minutes + seconds, 5)\n\n\ndef get_coordinates(geotags):\n lat = get_decimal_from_dms(geotags[\"GPSLatitude\"], geotags[\"GPSLatitudeRef\"])\n lon = get_decimal_from_dms(geotags[\"GPSLongitude\"], geotags[\"GPSLongitudeRef\"])\n\n return (lat, lon)\n\n\ndef insert_pg(sql):\n with closing(psycopg2.connect(DB_CONNECTION)) as conn:\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()\n\ndef clean_and_resize(img):\n\n image = Image.open(img.replace(\"ready\",\"raw\"))\n width, height = image.size\n\n w, h = int(width/7), int(height/7)\n image = image.resize((w,h), Image.ANTIALIAS)\n\n data = list(image.getdata())\n\n no_exif = Image.new(image.mode, image.size)\n no_exif.putdata(data)\n\n no_exif.save(img)\n\n return image\n\n\nif __name__ == \"__main__\":\n \n while True:\n \n # print(\"Dude, suh!!!!\")\n sleep(5)\n\n# docker\n raw_img_dir = \"/data/raw\"\n clean_dir = \"/data/ready\"\n\n# local\n # raw_img_dir = \"/home/ubuntu/workbench/roadtrip-gate/data/raw\"\n # clean_dir = \"/home/ubuntu/workbench/roadtrip-gate/data/ready\"\n\n images = list(set(glob(os.path.join(raw_img_dir, \"*.JPG\")) + glob(os.path.join(raw_img_dir, \"*.jpg\"))))\n\n for img in images:\n\n exif = get_exif(img)\n metadata = get_labeled_exif(exif)\n\n try:\n\n geotags = get_geotagging(exif)\n lat, lon = get_coordinates(geotags)\n\n date_taken = metadata[\"DateTimeOriginal\"]\n guid_num = (\n float(metadata[\"ApertureValue\"])\n * float(metadata[\"BrightnessValue\"])\n * float(metadata[\"ExposureTime\"])\n * lat\n * lon\n )\n guid = f\"{guid_num}_{date_taken}\"\n\n clean_filename = f\"clean_{os.path.basename(img)}\"\n output_img_path = os.path.join(clean_dir, clean_filename)\n print(clean_dir, clean_filename)\n\n if not os.path.exists(output_img_path):\n\n final_img = clean_and_resize(img)\n\n # clean_img(img, output_img_path)\n\n insert_sql = f\"\"\"\n INSERT INTO roadtrip.images VALUES (\n '{guid}',\n '{clean_filename}',\n TO_TIMESTAMP('{date_taken}', 'YYYY:MM:DD HH24:MI:SS')::timestamp,\n ST_SetSRID(ST_Point({lon}, {lat}), 4326),\n '{orientation}'\n )\n ON CONFLICT (guid) DO UPDATE SET\n file_name = EXCLUDED.file_name,\n date_taken = EXCLUDED.date_taken,\n geom = EXCLUDED.geom,\n orientation = EXCLUDED.orientation\n ;\n \"\"\"\n\n insert_pg(insert_sql)\n \n except ValueError:\n\n image = Image.open(img)\n newpath = img.replace(\"raw\",\"nogeodata\")\n image.save(newpath)\n pass\n\n\n", "id": "4678249", "language": "Python", "matching_score": 1.244407296180725, "max_stars_count": 1, "path": "app/pipeline.py" }, { "content": "from flask import Flask, redirect, url_for, send_file, request, flash, render_template, after_this_request\nfrom flask_login import LoginManager, login_required, current_user, logout_user, login_user\nfrom werkzeug.utils import secure_filename\nfrom wtforms import Form, FileField, validators, TextField, StringField, PasswordField, BooleanField, SubmitField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf import FlaskForm\nimport pathlib as pl\nimport os\nimport subprocess\nfrom shutil import make_archive\nimport logging\nfrom datetime import datetime\nfrom modules import mount_bkt, validate_dir_name, make_temp_dir, upload_file, dir_contents, pretty_size\nfrom functools import partial\nfrom db import db_init_app, User\n\n\nMOUNT_POINT = mount_bkt()\nAWS_BUCKET = os.getenv(\"AWS_S3_BUCKET\")\nFLASK_LOG = \"/var/log/nginx/flask.log\"\n\napp = Flask(__name__)\n\nlogging.basicConfig(filename=FLASK_LOG, level=logging.WARN)\n\napp.config[\"SECRET_KEY\"] = \"a super secret key\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = f\"sqlite:///{os.getenv('SQLITE_DB')}\"\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\napp.config[\"TEMP_DIR\"] = \"/temp-flask-dir\"\nmake_temp_dir(app.config[\"TEMP_DIR\"])\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = \"login\"\n\ndb = db_init_app(app)\n\nprint = partial(print, flush=True)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n\nclass ReusableForm(Form):\n input_file = FileField(\"input_file:\")\n create_dir = TextField(\"create_dir:\")\n\n\nclass LoginForm(FlaskForm):\n email = StringField(\"Email\", validators=[DataRequired()])\n password = PasswordField(\"Password\", validators=[DataRequired()])\n remember_me = BooleanField(\"Remember Me\")\n submit = SubmitField(\"Sign In\")\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for(\"index\"))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user is None or not user.check_password(form.password.data):\n flash(\"Invalid username or password\", \"error\")\n app.logger.warn(f\" {str(datetime.now())}: {form.email.data} login failed\")\n return redirect(url_for(\"login\"))\n login_user(user, remember=form.remember_me.data)\n app.logger.warn(f\" {str(datetime.now())}: {user.email} successfully logged in\")\n return redirect(url_for(\"index\"))\n return render_template(\"login.html\", title=\"Sign In\", form=form)\n\n\[email protected](\"/\")\n@login_required\ndef index():\n return redirect(f\"/explorer/{AWS_BUCKET}\")\n\n\[email protected](f\"/explorer/{AWS_BUCKET}\", methods=[\"GET\", \"POST\"])\n@login_required\ndef explorer():\n\n form = ReusableForm(request.form)\n # print(form.errors)\n\n dir_path = pl.Path(MOUNT_POINT)\n\n if request.method == \"GET\":\n html_content_list = dir_contents(dir_path)\n nav_parts = [x for x in dir_path.parts if x != \"/\"]\n return render_template(\n \"main.html\",\n form=form,\n bucket_content=html_content_list,\n aws_bucket=AWS_BUCKET,\n nav_parts=nav_parts,\n nav_len=len(nav_parts),\n )\n\n else:\n create_dir = request.form[\"create_dir\"]\n if create_dir and validate_dir_name(create_dir):\n new_dir = os.path.join(str(dir_path), create_dir)\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n flash(f\"Successfully created {create_dir}!\", \"success\")\n app.logger.warn(f\" {str(datetime.now())}: {current_user.email} successfully created a directory: {create_dir}\")\n return redirect(request.url)\n else:\n flash(f\"{create_dir} already exists!\", \"error\")\n app.logger.warn(f\" {str(datetime.now())}: {current_user.email} failed to create directory: {create_dir} - it already exists\")\n return redirect(request.url)\n elif create_dir and not validate_dir_name(create_dir):\n illegal_chars = r\"\"\"`~!@#$%^&*()=+[{]}\\|:;\"'<,>.?/\"\"\"\n flash(\n f\"Error creating {create_dir}, cannot contain a space or the following characters: {illegal_chars}\",\n \"error\",\n )\n app.logger.warn(\n f\" {str(datetime.now())}: {current_user.email} failed to create directory: {create_dir} - it contains a special character\"\n )\n return redirect(request.url)\n\n input_file = request.files[\"input_file\"]\n filename = secure_filename(input_file.filename)\n if filename:\n tmpfile = os.path.join(app.config[\"TEMP_DIR\"], filename)\n input_file.save(tmpfile)\n uploaded_target_file = os.path.join(str(dir_path), filename)\n if upload_file(tmpfile, AWS_BUCKET, uploaded_target_file.split(MOUNT_POINT + \"/\")[1]):\n flash(f\"Successfully uploaded {filename}\", \"success\")\n app.logger.warn(\n f\" {str(datetime.now())}: {current_user.email} successfully uploaded a {pretty_size(os.stat(tmpfile).st_size)} file: {uploaded_target_file}\"\n )\n else:\n flash(f\"Error uploading {filename}\", \"error\")\n app.logger.error(\n f\" {str(datetime.now())}: {current_user.email} error uploading a {pretty_size(os.stat(tmpfile).st_size)} file to s3: {uploaded_target_file}\"\n )\n os.remove(tmpfile)\n return redirect(request.url)\n return redirect(request.url)\n\n\[email protected](f\"/explorer/<path:dir_path>\", methods=[\"GET\", \"POST\"])\n@login_required\ndef within_dir(dir_path):\n\n form = ReusableForm(request.form)\n # print(form.errors)\n\n dir_path = pl.Path(\"/\" + dir_path)\n\n if request.method == \"GET\":\n html_content_list = dir_contents(dir_path)\n nav_parts = [x for x in dir_path.parts if x != \"/\"]\n return render_template(\n \"main.html\",\n form=form,\n bucket_content=html_content_list,\n aws_bucket=AWS_BUCKET,\n nav_parts=nav_parts,\n nav_len=len(nav_parts),\n )\n\n else:\n create_dir = request.form[\"create_dir\"]\n if create_dir and validate_dir_name(create_dir):\n new_dir = os.path.join(str(dir_path), create_dir)\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n flash(f\"Successfully created {create_dir}!\", \"success\")\n app.logger.warn(f\" {str(datetime.now())}: {current_user.email} successfully created a directory: {create_dir}\")\n return redirect(request.url)\n else:\n flash(f\"{create_dir} already exists!\", \"error\")\n app.logger.warn(f\" {str(datetime.now())}: {current_user.email} failed to create directory: {create_dir} - it already exists\")\n return redirect(request.url)\n elif create_dir and not validate_dir_name(create_dir):\n illegal_chars = r\"\"\"`~!@#$%^&*()=+[{]}\\|:;\"'<,>.?/\"\"\"\n flash(\n f\"Error creating {create_dir}, cannot contain a space or the following characters: {illegal_chars}\",\n \"error\",\n )\n app.logger.warn(\n f\" {str(datetime.now())}: {current_user.email} failed to create directory: {create_dir} - it contains a special character\"\n )\n return redirect(request.url)\n\n input_file = request.files[\"input_file\"]\n filename = secure_filename(input_file.filename)\n if filename:\n tmpfile = os.path.join(app.config[\"TEMP_DIR\"], filename)\n input_file.save(tmpfile)\n uploaded_target_file = os.path.join(str(dir_path), filename)\n if upload_file(tmpfile, AWS_BUCKET, uploaded_target_file.split(MOUNT_POINT + \"/\")[1]):\n flash(f\"Successfully uploaded {filename}\", \"success\")\n app.logger.warn(\n f\" {str(datetime.now())}: {current_user.email} successfully uploaded a {pretty_size(os.stat(tmpfile).st_size)} file: {uploaded_target_file}\"\n )\n else:\n flash(f\"Error uploading {filename}\", \"error\")\n app.logger.error(\n f\" {str(datetime.now())}: {current_user.email} error uploading a {pretty_size(os.stat(tmpfile).st_size)} file to s3: {uploaded_target_file}\"\n )\n os.remove(tmpfile)\n return redirect(request.url)\n return redirect(request.url)\n\n\[email protected](f\"/download/<path:filepath>\")\n@login_required\ndef download_file(filepath):\n app.logger.warn(f\" {str(datetime.now())}: {current_user.email} downloaded file: {filepath}\")\n return send_file(\"/\" + filepath, as_attachment=True)\n\n\[email protected](f\"/download/dir/<path:dir_path>\")\n@login_required\ndef download_dir(dir_path):\n\n dir_path = pl.Path(\"/\" + dir_path)\n\n output_zipfile = os.path.join(app.config[\"TEMP_DIR\"], dir_path.stem)\n\n make_archive(output_zipfile, \"zip\", dir_path)\n\n @after_this_request\n def remove_file(response):\n os.remove(output_zipfile + \".zip\")\n return response\n\n app.logger.warn(f\" {str(datetime.now())}: {current_user.email} downloaded directory: {str(dir_path) + '.zip'}\")\n return send_file(output_zipfile + \".zip\", as_attachment=True)\n\n\[email protected](\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for(\"index\"))\n", "id": "10264404", "language": "Python", "matching_score": 4.077126502990723, "max_stars_count": 1, "path": "app/main.py" }, { "content": "import subprocess\nimport os\nfrom functools import partial\nimport pathlib as pl\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom flask import url_for\nfrom datetime import datetime\nimport time\n\n\ndef upload_file(file_name, bucket, object_name):\n s3_client = boto3.client(\"s3\")\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\nprint = partial(print, flush=True)\n\n# bytes pretty-printing\nUNITS_MAPPING = [\n (1 << 50, \" PB\"),\n (1 << 40, \" TB\"),\n (1 << 30, \" GB\"),\n (1 << 20, \" MB\"),\n (1 << 10, \" KB\"),\n (1, (\" byte\", \" bytes\")),\n]\n\n\ndef pretty_size(bytes, units=UNITS_MAPPING):\n \"\"\"Get human-readable file sizes.\n simplified version of https://pypi.python.org/pypi/hurry.filesize/\n \"\"\"\n for factor, suffix in units:\n if bytes >= factor:\n break\n amount = int(bytes / factor)\n\n if isinstance(suffix, tuple):\n singular, multiple = suffix\n if amount == 1:\n suffix = singular\n else:\n suffix = multiple\n return str(amount) + suffix\n\n\ndef mount_bkt():\n s3_bkt = os.getenv(\"AWS_S3_BUCKET\")\n mnt_pt = f\"/{s3_bkt}\"\n if not os.path.exists(mnt_pt):\n try:\n os.mkdir(mnt_pt)\n except FileExistsError:\n print(\"fixing the mount point from mount_bkt()...\")\n subprocess.call([\"fusermount\", \"-u\", mnt_pt, \"&&\", \"fusermount\", \"-u\", mnt_pt])\n subprocess.check_output([\"goofys\", s3_bkt, mnt_pt])\n return mnt_pt\n if len(list(pl.Path(mnt_pt).glob(\"*\"))) == 0:\n print(f\"mounting bucket {s3_bkt}\")\n subprocess.check_output([\"goofys\", s3_bkt, mnt_pt])\n return mnt_pt\n\n\ndef validate_dir_name(dir_name):\n for char in r\"\"\"`~!@#$%^&*()=+[{]}\\|:;\"'<,>.?/ \"\"\":\n if char in dir_name:\n return False\n return True\n\n\ndef make_temp_dir(tmp_dir):\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n return\n\n\ndef dir_contents(pth: pl.Path):\n try:\n dir_content = pth.glob(\"*\")\n except OSError:\n print(\"fixing the mount mount from dir_contents()...\")\n mount_bkt()\n dir_content = pth.glob(\"*\")\n dir_list = []\n file_list = []\n for x in dir_content:\n stat = x.stat()\n if x.is_file():\n download_url = url_for(\"download_file\", filepath=str(x))\n size = pretty_size(stat.st_size)\n mod = datetime.fromtimestamp(stat.st_mtime).strftime(\"%A, %B %d, %Y %H:%M:%S\") + f\" {time.tzname[0]}\"\n item = {\n \"nav_url\": None,\n \"download_url\": download_url,\n \"link_name\": x.name,\n \"size\": size,\n \"last_modified\": mod,\n \"is_dir\": False,\n }\n file_list.append(item)\n else:\n download_url = url_for(\"download_dir\", dir_path=str(x))\n nav_url = url_for(\"within_dir\", dir_path=str(x))\n item = {\n \"nav_url\": nav_url,\n \"download_url\": download_url,\n \"link_name\": x.name,\n \"size\": \"--\",\n \"last_modified\": \"--\",\n \"is_dir\": True,\n }\n dir_list.append(item)\n\n return sorted(dir_list, key=lambda d: d[\"link_name\"]) + sorted(file_list, key=lambda d: d[\"link_name\"])\n", "id": "10848803", "language": "Python", "matching_score": 0.10672471672296524, "max_stars_count": 1, "path": "app/modules.py" }, { "content": "from main import db, app\nfrom db import User\nfrom email_validator import validate_email\nimport sys\n\nif __name__ == \"__main__\":\n email = validate_email(sys.argv[1]).email\n password = sys.argv[2]\n\n input(\n f\"Do you wish to create a new user with the email: {email} and the password: {password}?\"\n \"\\nPress Enter to continue, or CTRL+C to quit.\"\n )\n\n print(\"Creating user...\")\n\n with app.app_context():\n user = User(email=email)\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n print(f\"Successfully created user: {email} with the password: {password}\")\n", "id": "11229309", "language": "Python", "matching_score": 3.7789266109466553, "max_stars_count": 1, "path": "app/add_user.py" }, { "content": "from main import db, app\nfrom db import User\nfrom email_validator import validate_email\nimport sys\n\nif __name__ == \"__main__\":\n email = validate_email(sys.argv[1]).email\n password = sys.argv[2]\n\n input(f\"Do you wish to create a update the password of: {email}?\" \"\\nPress Enter to continue, or CTRL+C to quit.\")\n\n print(\"Updating password...\")\n\n with app.app_context():\n user = User.query.get(email)\n user.set_password(password)\n db.session.commit()\n\n print(f\"Successfully updated user: {email} with the password: {password}\")\n", "id": "8621811", "language": "Python", "matching_score": 3.9119791984558105, "max_stars_count": 1, "path": "app/update_user.py" }, { "content": "from main import db, app\nfrom db import User\nfrom email_validator import validate_email\nimport sys\n\nif __name__ == \"__main__\":\n email = validate_email(sys.argv[1]).email\n\n input(f\"Do you wish to delete the following user: {email}?\\nPress Enter to continue, or CTRL+C to quit.\")\n\n print(\"Deleting user...\")\n\n with app.app_context():\n User.query.filter(User.email == email).delete()\n db.session.commit()\n\n print(f\"Successfully deleted user: {email}\")\n", "id": "511935", "language": "Python", "matching_score": 0.4636666476726532, "max_stars_count": 1, "path": "app/delete_user.py" }, { "content": "from flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.security import generate_password_hash, check_password_hash\n\n\ndb = SQLAlchemy()\n\n\nclass User(db.Model):\n \"\"\"An admin user capable of viewing reports.\n\n :param str email: email address of user\n :param str password: <PASSWORD> the user\n\n \"\"\"\n\n __tablename__ = \"user\"\n\n email = db.Column(db.String, primary_key=True)\n password_hash = db.Column(db.String)\n authenticated = db.Column(db.Boolean, default=False)\n\n def is_active(self):\n \"\"\"True, as all users are active.\"\"\"\n return True\n\n def get_id(self):\n \"\"\"Return the email address to satisfy Flask-Login's requirements.\"\"\"\n return self.email\n\n def is_authenticated(self):\n \"\"\"Return True if the user is authenticated.\"\"\"\n return self.authenticated\n\n def is_anonymous(self):\n \"\"\"False, as anonymous users aren't supported.\"\"\"\n return False\n\n def set_password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def check_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n\ndef db_init_app(app):\n db.init_app(app)\n return db\n\n\nif __name__ == \"__main__\":\n from main import app\n\n db.create_all(app=app)", "id": "11193229", "language": "Python", "matching_score": 0.6749365329742432, "max_stars_count": 1, "path": "app/db.py" }, { "content": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\ndef search_error(error):\n \"\"\"Opens chrome, searchs for the error and opens each stackoverflow answer in a new tab\"\"\"\n driver = webdriver.Chrome()\n driver.get(\"https://www.google.com\")\n driver.maximize_window()\n search = driver.find_element_by_name(\"q\")\n search.send_keys(f\"stackoverflow {error}\")\n search.send_keys(Keys.RETURN)\n elems = driver.find_elements_by_xpath(\"//a[@href]\")\n all_results = []\n for elem in elems:\n link = elem.get_attribute(\"href\")\n if \"stackoverflow.com/questions\" in link:\n if not \"webcache\" in link:\n all_results.append(link)\n for url in all_results:\n driver.execute_script(f\"\"\"window.open(\"{url}\",\"_blank\");\"\"\")\n return\n", "id": "6646794", "language": "Python", "matching_score": 0.24316389858722687, "max_stars_count": 1, "path": "search.py" } ]
1.075318
sami0596
[ { "content": "print('''\r\n _ ___ ____ ___ __ \r\n ___ __ _ _ __ ___ (_)/ _ \\| ___|/ _ \\ / /_ \r\n/ __|/ _` | '_ ` _ \\| | | | |___ \\ (_) | '_ \\ \r\n\\__ \\ (_| | | | | | | | |_| |___) \\__, | (_) |\r\n|___/\\__,_|_| |_| |_|_|\\___/|____/ /_/ \\___/ \r\n''')\r\nimport time\r\nimport pyautogui\r\nusernames = []\r\namounts = 0;\r\npause = 5;\r\n\r\nwhile True:\r\n user_input = input(\"How many do you want to kick?: \")\r\n try:\r\n amounts = int(user_input)\r\n break\r\n except:\r\n print(\"please, enter a number\")\r\n continue\r\n\r\norder = 0;\r\nfor user in range(amounts):\r\n order += 1;\r\n name = str(input(str(order) + \": \"))\r\n\r\n usernames.append(name)\r\nprint(usernames)\r\n\r\n\r\nwhile True:\r\n user_input = input(\"When should the program run (in seconds)?: \")\r\n try:\r\n pause = int(user_input)\r\n break\r\n except:\r\n print(\"please, enter a number\")\r\n continue\r\n\r\n\r\ninput(\"The program will begin to execute in \" + str(pause) + \" seconds, from the moment you press Enter\")\r\n\r\ntime.sleep(pause)\r\nfor x in usernames:\r\n pyautogui.typewrite('/kick ' + x)\r\n pyautogui.press('enter')\r\n pyautogui.press('enter')\r\n \r\ninput(\"Made by: sami0596\")\r\n", "id": "8536169", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "kickem.py" } ]
0
afbaron10
[ { "content": "import sqlite3\r\nimport urllib.request, urllib.parse, urllib.error\r\nfrom bs4 import BeautifulSoup\r\nimport ssl\r\nimport csv, xlsxwriter\r\nimport smtplib\r\nimport os\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nfrom email import encoders\r\n\r\nconn = sqlite3.connect('news.db')\r\ncur = conn.cursor()\r\nemail_user = 'EMAIL'\r\nemail_password = 'PASSWORD'\r\nemail_send = 'EMAIL SENDER'\r\nsubject = 'subject'\r\n\r\nmsg = MIMEMultipart()\r\nmsg['From'] = email_user\r\nmsg['To'] = email_send\r\nmsg['Subject'] = subject\r\n\r\ncur.executescript('''\r\nDROP TABLE IF EXISTS Tiempo;\r\nCREATE TABLE Tiempo (\r\n titulo TEXT,\r\n Noticia TEXT\r\n);\r\n''')\r\n\r\nctx = ssl.create_default_context()\r\nctx.check_hostname = False\r\nctx.verify_mode = ssl.CERT_NONE\r\n\r\nurl = \"https://www.eltiempo.com/\"\r\nhtml = urllib.request.urlopen(url, context=ctx).read()\r\nsopa = BeautifulSoup(html, 'html.parser')\r\n\r\nlst = list()\r\n# Recuperar todas las etiquetas de anclaje\r\nfor article in sopa.find_all(\"h3\", class_=\"listing-title box-title\"):\r\n ##headline = article.h3.a\r\n ##print(headline.text)\r\n headline = (article.text)\r\n lst.append(headline)\r\nfor item in lst:\r\n print(item)\r\nlst2 = list()\r\nfor i in sopa.find_all(\"h3\", class_=\"listing-title box-title\"):\r\n ##headline = article.h3.a\r\n ##print(headline.text)\r\n link = i.find('a',href=True)\r\n respuesta= str(link['href'])\r\n lst2.append(respuesta)\r\nlst3 = list()\r\nfor link in lst2:\r\n url = \"https://www.eltiempo.com/\"+link\r\n html = urllib.request.urlopen(url, context=ctx).read()\r\n sopa = BeautifulSoup(html, 'html.parser')\r\n\r\n article = sopa.find(\"div\", class_=\"articulo-contenido\")\r\n if article is None:\r\n continue\r\n #print(article.text)\r\n lst3.append(article.text)\r\n\r\nlst4 = list()\r\nfor noticia in range(6):\r\n #print(lst[noticia] + lst3[noticia].split('\\n')[4])\r\n lst4.append((str(noticia + 1)+'.'+'Noticia') + lst[noticia] + lst3[noticia].split('\\n')[4])\r\n x = \"\\n\\n\".join(lst4)\r\n\r\n cur.execute('''INSERT INTO Tiempo (titulo ,Noticia)\r\n VALUES (?,?)''', (lst[noticia],lst3[noticia].split('\\n')[4],))\r\n\r\n conn.commit()\r\ncur.close()\r\n\r\nbody = x\r\nmsg.attach(MIMEText(body,'plain'))\r\n\r\ntext = msg.as_string()\r\nserver = smtplib.SMTP('smtp.gmail.com',587)\r\nserver.starttls()\r\nserver.login(email_user,email_password)\r\n\r\n\r\nserver.sendmail(email_user,email_send,text)\r\nserver.quit()\r\n", "id": "629667", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "WebscrappingElTiempo.py" } ]
0
fabiangal
[ { "content": "\"\"\" Adapted GPT-2 model for kg_mask support\n\nThis code is based on the GPT-2 model implementation from the transformers library.\nWe used version 4.5.1\n\nThe original code can be found here:\nhttps://huggingface.co/transformers/v4.5.1/_modules/transformers/models/gpt2/modeling_gpt2.html\n\nChanges marked with additional comments.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom typing import Tuple\n\nfrom transformers.utils import logging\nfrom transformers.activations import ACT2FN\nfrom transformers.models.gpt2 import GPT2PreTrainedModel\nfrom transformers.utils.model_parallel_utils import assert_device_map, get_device_map\nfrom transformers.modeling_outputs import CausalLMOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions\nfrom transformers.modeling_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer\n\nlogger = logging.get_logger(__name__)\n\n\n#######################################################################################################################\n# Attention class #\n#######################################################################################################################\nclass Attention(nn.Module):\n def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):\n super().__init__()\n\n n_state = nx # in Attention: n_state=768 (nx=n_embd)\n # [switch nx => n_state from Block to Attention to keep identical to TF implem]\n assert n_state % config.n_head == 0\n self.register_buffer(\n \"bias\", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)\n )\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4))\n self.n_head = config.n_head\n self.split_size = n_state\n self.scale = scale\n self.is_cross_attention = is_cross_attention\n if self.is_cross_attention:\n self.c_attn = Conv1D(2 * n_state, nx)\n self.q_attn = Conv1D(n_state, nx)\n else:\n self.c_attn = Conv1D(3 * n_state, nx)\n self.c_proj = Conv1D(n_state, nx)\n self.attn_dropout = nn.Dropout(config.attn_pdrop)\n self.resid_dropout = nn.Dropout(config.resid_pdrop)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n heads, index = find_pruneable_heads_and_indices(\n heads, self.n_head, self.split_size // self.n_head, self.pruned_heads\n )\n index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])\n\n # Prune conv1d layers\n self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)\n self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)\n\n # Update hyper params\n self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))\n self.n_head = self.n_head - len(heads)\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def _attn(self, q, k, v, kg_masks, attention_mask=None, head_mask=None, output_attentions=False):\n w = torch.matmul(q, k)\n if self.scale:\n w = w / (float(v.size(-1)) ** 0.5)\n bs, nd, ns = w.size(0), w.size(-2), w.size(-1) # Changed by authors: Batch size is required.\n\n if not self.is_cross_attention:\n # if only \"normal\" attention layer implements causal mask\n mask = self.bias[:, :, ns - nd : ns, :ns]\n w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))\n\n # --- Manipulating the attention weights according to kg_mask -------------------------------------------------\n\n # Create (dynamic) future mask mask\n mask = torch.stack(bs * [self.bias[:, :, ns - nd: ns, :ns]], dim=0).view(-1, 1, ns, ns)\n\n # Replace the kg sequence part with the kg_masks\n amd, ams = kg_masks.size(-2), kg_masks.size(-1)\n mask[:, 0, 0:amd, 0:ams] = kg_masks\n\n # Mask zeros out\n w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))\n # -------------------------------------------------------------------------------------------------------------\n\n if attention_mask is not None:\n # Apply the attention mask\n w = w + attention_mask\n\n w = nn.Softmax(dim=-1)(w)\n w = self.attn_dropout(w)\n\n # Mask heads if we want to\n if head_mask is not None:\n w = w * head_mask\n\n outputs = (torch.matmul(w, v),)\n if output_attentions:\n outputs += (w,)\n return outputs\n\n def merge_heads(self, x):\n x = x.permute(0, 2, 1, 3).contiguous()\n new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)\n return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states\n\n def split_heads(self, x, k=False):\n new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)\n x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states\n if k:\n return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)\n else:\n return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)\n\n def forward(\n self,\n hidden_states,\n kg_masks, # Added by the authors.\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n if encoder_hidden_states is not None:\n assert hasattr(\n self, \"q_attn\"\n ), \"If class is used as cross attention, the weights `q_attn` have to be defined. \" \\\n \"Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`.\"\n query = self.q_attn(hidden_states)\n key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)\n attention_mask = encoder_attention_mask\n else:\n query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)\n\n query = self.split_heads(query)\n key = self.split_heads(key, k=True)\n value = self.split_heads(value)\n if layer_past is not None:\n past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below\n key = torch.cat((past_key, key), dim=-1)\n value = torch.cat((past_value, value), dim=-2)\n\n if use_cache is True:\n present = (key.transpose(-2, -1), value) # transpose to have same shapes\n else:\n present = None\n\n attn_outputs = self._attn(query, key, value, kg_masks, attention_mask, head_mask, output_attentions)\n a = attn_outputs[0]\n\n a = self.merge_heads(a)\n a = self.c_proj(a)\n a = self.resid_dropout(a)\n\n return (a, present) + attn_outputs[1:] # a, present, (attentions)\n\n\n#######################################################################################################################\n# MLP class #\n#######################################################################################################################\nclass MLP(nn.Module):\n def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)\n super().__init__()\n nx = config.n_embd\n self.c_fc = Conv1D(n_state, nx)\n self.c_proj = Conv1D(nx, n_state)\n self.act = ACT2FN[config.activation_function]\n self.dropout = nn.Dropout(config.resid_pdrop)\n\n def forward(self, x):\n h = self.act(self.c_fc(x))\n h2 = self.c_proj(h)\n return self.dropout(h2)\n\n\n#######################################################################################################################\n# Block class #\n#######################################################################################################################\nclass Block(nn.Module):\n def __init__(self, n_ctx, config, scale=False):\n super().__init__()\n hidden_size = config.n_embd\n inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size\n self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.attn = Attention(hidden_size, n_ctx, config, scale)\n self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n if config.add_cross_attention:\n self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)\n self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)\n self.mlp = MLP(inner_dim, config)\n\n def forward(\n self,\n hidden_states,\n kg_masks, # Added by the authors.\n layer_past=None,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=False,\n output_attentions=False,\n ):\n attn_outputs = self.attn(\n self.ln_1(hidden_states),\n kg_masks=kg_masks, # Added by the authors.\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs[0] # output_attn: a, present, (attentions)\n outputs = attn_outputs[1:]\n # residual connection\n hidden_states = attn_output + hidden_states\n\n if encoder_hidden_states is not None:\n # add one self-attention block for cross-attention\n assert hasattr(\n self, \"crossattention\"\n ), f\"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention \" \\\n f\"layers by setting `config.add_cross_attention=True`\"\n cross_attn_outputs = self.crossattention(\n self.ln_cross_attn(hidden_states),\n attention_mask=attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n output_attentions=output_attentions,\n )\n attn_output = cross_attn_outputs[0]\n # residual connection\n hidden_states = hidden_states + attn_output\n outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights\n\n feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))\n # residual connection\n hidden_states = hidden_states + feed_forward_hidden_states\n\n if use_cache:\n outputs = (hidden_states,) + outputs\n else:\n outputs = (hidden_states,) + outputs[1:]\n\n return outputs # hidden_states, present, (attentions, cross_attentions)\n\n\n#######################################################################################################################\n# GPT2 Model class (with kg_masks support) #\n#######################################################################################################################\nclass GPT2Model(GPT2PreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n self.wpe = nn.Embedding(config.n_positions, config.n_embd)\n self.drop = nn.Dropout(config.embd_pdrop)\n self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])\n self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n def parallelize(self, device_map=None):\n # Check validity of device_map\n self.device_map = (\n get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map\n )\n assert_device_map(self.device_map, len(self.h))\n self.model_parallel = True\n self.first_device = \"cpu\" if \"cpu\" in self.device_map.keys() else \"cuda:\" + str(min(self.device_map.keys()))\n self.last_device = \"cuda:\" + str(max(self.device_map.keys()))\n self.wte = self.wte.to(self.first_device)\n self.wpe = self.wpe.to(self.first_device)\n # Load onto devices\n for k, v in self.device_map.items():\n for block in v:\n cuda_device = \"cuda:\" + str(k)\n self.h[block] = self.h[block].to(cuda_device)\n # ln_f to last\n self.ln_f = self.ln_f.to(self.last_device)\n\n def deparallelize(self):\n self.model_parallel = False\n self.device_map = None\n self.first_device = \"cpu\"\n self.last_device = \"cpu\"\n self.wte = self.wte.to(\"cpu\")\n self.wpe = self.wpe.to(\"cpu\")\n for index in range(len(self.h)):\n self.h[index] = self.h[index].to(\"cpu\")\n self.ln_f = self.ln_f.to(\"cpu\")\n torch.cuda.empty_cache()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.h[layer].attn.prune_heads(heads)\n\n def forward(\n self,\n input_ids=None,\n kg_masks=None, # Required new parameter for the kg masks.\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n # Attention mask.\n if attention_mask is not None:\n assert batch_size > 0, \"batch_size has to be defined and > 0\"\n attention_mask = attention_mask.view(batch_size, -1)\n # We create a 3D attention mask from a 2D tensor mask.\n # Sizes are [batch_size, 1, 1, to_seq_length]\n # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n # this attention mask is more simple than the triangular masking of causal attention\n # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n attention_mask = attention_mask[:, None, None, :]\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility\n attention_mask = (1.0 - attention_mask) * -10000.0\n\n # If a 2D ou 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if self.config.add_cross_attention and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # head_mask has shape n_layer x batch x n_heads x N x N\n head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n position_embeds = self.wpe(position_ids)\n hidden_states = inputs_embeds + position_embeds\n\n if token_type_ids is not None:\n token_type_embeds = self.wte(token_type_ids)\n hidden_states = hidden_states + token_type_embeds\n\n hidden_states = self.drop(hidden_states)\n\n output_shape = input_shape + (hidden_states.size(-1),)\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n # Model parallel\n if self.model_parallel:\n torch.cuda.set_device(hidden_states.device)\n # Ensure layer_past is on same device as hidden_states (might not be correct)\n if layer_past is not None:\n layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)\n # Ensure that attention_mask is always on the same device as hidden_states\n if attention_mask is not None:\n attention_mask = attention_mask.to(hidden_states.device)\n if isinstance(head_mask, torch.Tensor):\n head_mask = head_mask.to(hidden_states.device)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if getattr(self.config, \"gradient_checkpointing\", False) and self.training:\n\n if use_cache:\n logger.warn(\n \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n \"`use_cache=False`...\"\n )\n use_cache = False\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n None,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n kg_masks=kg_masks,\n layer_past=layer_past,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n if self.config.add_cross_attention:\n all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)\n\n # Model Parallel: If it's the last layer for that device, put things on the next device\n if self.model_parallel:\n for k, v in self.device_map.items():\n if i == v[-1] and \"cuda:\" + str(k) != self.last_device:\n hidden_states = hidden_states.to(\"cuda:\" + str(k + 1))\n\n hidden_states = self.ln_f(hidden_states)\n\n hidden_states = hidden_states.view(*output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n return BaseModelOutputWithPastAndCrossAttentions(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n cross_attentions=all_cross_attentions,\n )\n\n\n#######################################################################################################################\n# GPT2 LM Head Class #\n#######################################################################################################################\nclass GPT2LMHeadModel(GPT2PreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.masked_bias\", r\"lm_head\\.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.transformer = GPT2Model(config)\n self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n self.init_weights()\n\n # Model parallel\n self.model_parallel = False\n self.device_map = None\n\n def parallelize(self, device_map=None):\n self.device_map = (\n get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))\n if device_map is None\n else device_map\n )\n assert_device_map(self.device_map, len(self.transformer.h))\n self.transformer.parallelize(self.device_map)\n self.lm_head = self.lm_head.to(self.transformer.first_device)\n self.model_parallel = True\n\n def deparallelize(self):\n self.transformer.deparallelize()\n self.transformer = self.transformer.to(\"cpu\")\n self.lm_head = self.lm_head.to(\"cpu\")\n self.model_parallel = False\n torch.cuda.empty_cache()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n # only last token for inputs_ids if past is defined in kwargs\n if past:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n return {\n \"input_ids\": input_ids,\n \"past_key_values\": past,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n\n def forward(\n self,\n input_ids=None,\n kg_masks=None, # Parameter added by authors. Simply forwarded to GPTModel\n past_key_values=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n ``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to\n ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n transformer_outputs = self.transformer(\n input_ids,\n kg_masks=kg_masks, # forwarded here\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.transformer.first_device)\n hidden_states = hidden_states.to(self.lm_head.weight.device)\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithCrossAttentions(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n cross_attentions=transformer_outputs.cross_attentions,\n )\n\n @staticmethod\n def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:\n \"\"\"\n This function is used to re-order the :obj:`past_key_values` cache if\n :meth:`~transformers.PreTrainedModel.beam_search` or :meth:`~transformers.PreTrainedModel.beam_sample` is\n called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.\n \"\"\"\n return tuple(\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n for layer_past in past\n )\n\n", "id": "3897338", "language": "Python", "matching_score": 3.4652647972106934, "max_stars_count": 3, "path": "model.py" }, { "content": "\"\"\" This class trains the moviecorpus corpus on a GPT-2 model.\n\n\"\"\"\nimport math\nimport torch\nfrom torch.nn.parallel import DataParallel\nimport logging\nimport transformers\n\nfrom ignite.contrib.handlers import PiecewiseLinear\nfrom ignite.engine import Events\nfrom ignite.metrics import Loss, Accuracy, MetricsLambda\nfrom transformers import AdamW\n\nimport train_base\nimport dataset\n\n\nclass KomodisTrainer(train_base.BaseTrainer):\n def __init__(self, path_to_pretrained_model, # A string. The directory of the pretrained model.\n path_to_vocab_file, # A string. The directory of the tokenizer vocabularies file.\n path_to_merges_file, # A string. The directory of the tokenizer merges file.\n hparams, # A dict. The training hyperparameters.\n path_to_encoded_data=None): # A string. The path to the already processed dataset.\n super().__init__(hparams)\n self.path_to_encoded_data = path_to_encoded_data\n\n self.logger = logging.getLogger(__file__)\n\n # create tokenizer and pretrained model\n self.model = transformers.GPT2DoubleHeadsModel.from_pretrained(path_to_pretrained_model)\n self.tokenizer = transformers.GPT2Tokenizer(vocab_file=path_to_vocab_file,\n merges_file=path_to_merges_file)\n self.model.to(self.hps.device)\n\n # prepare dataset\n self.dataset_obj = dataset.KomodisDataset(tokenizer=self.tokenizer, hparams=hparams,\n path_to_data=self.hps.path_to_data,\n debug=self.hps.debug)\n if path_to_encoded_data is None:\n self.dataset_obj.load_txt_dataset()\n self.dataset_obj.tokenize_dataset()\n else:\n self.dataset_obj.load_dataset(path_to_encoded_data)\n\n self.optimizer = AdamW(self.model.parameters(), lr=self.hps.lr, correct_bias=True)\n\n num_of_tokens = len(self.tokenizer.encoder)\n self.model.resize_token_embeddings(new_num_tokens=num_of_tokens + self.dataset_obj.num_added_tokens)\n\n # TODO: Currently this is fixed to the use of 4 gpus. This should be more generic!\n self.model = DataParallel(self.model, device_ids=[0, 1, 2, 3])\n\n def learningrate(self, trainer, args):\n \"\"\" Linear decreasing learning rate. \"\"\"\n scheduler = PiecewiseLinear(self.optimizer, \"lr\",\n [(0, self.hps.lr), (self.hps.n_epochs * len(args[\"loader_train\"]), 0.0)])\n trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\n\n def metrics(self):\n \"\"\" Metrics.\n nll negative log-likelihood\n acc classification accuracy for next-response selection\n ppl perplexity\n \"\"\"\n metrics = {\n \"nll\": Loss(torch.nn.CrossEntropyLoss(ignore_index=-1), output_transform=lambda x: (x[0][0], x[1][0])),\n \"acc\": Accuracy(output_transform=lambda x: (x[0][1], x[1][1]))}\n metrics[\"ppl\"] = MetricsLambda(math.exp, metrics[\"nll\"])\n return metrics\n\n def update(self, engine, batch):\n \"\"\" Update step for PyTorch. \"\"\"\n self.model.train()\n batch = tuple(input_tensor.to(self.hps.device) for input_tensor in batch)\n input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch\n (lm_loss), (mc_loss), *_ = self.model(\n input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,\n mc_labels=mc_labels, lm_labels=lm_labels)\n loss = (lm_loss * self.hps.lm_coef + mc_loss * self.hps.mc_coef) / self.hps.gradient_accumulation_steps\n loss.sum().backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.hps.max_norm)\n if engine.state.iteration % self.hps.gradient_accumulation_steps == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n return loss.mean().item()\n\n def inference(self, engine, batch):\n \"\"\" Inference step for PyTorch. \"\"\"\n self.model.eval()\n with torch.no_grad():\n batch = tuple(input_tensor.to(self.hps.device) for input_tensor in batch)\n input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch\n self.logger.info(self.tokenizer.decode(input_ids[0, -1, :].tolist()))\n # if we dont send labels to model, it doesnt return losses\n lm_logits, mc_logits, *_ = self.model(\n input_ids, token_type_ids=token_type_ids, mc_token_ids=mc_token_ids,\n )\n lm_logits_flat_shifted = lm_logits[..., :-1, :].contiguous().view(-1, lm_logits.size(-1))\n lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)\n\n return (lm_logits_flat_shifted, mc_logits), (lm_labels_flat_shifted, mc_labels)\n", "id": "4840655", "language": "Python", "matching_score": 4.408681392669678, "max_stars_count": 4, "path": "model/komodis.py" }, { "content": "\"\"\" train\n\nThis script is supposed to show how the authors trained a GPT-2 model.\n\nPlease use transformers 4.5.1 to train the model. Other versions might work as well.\n\nTo feed the knowledge graph edges and nodes into the transformer model, some changes of the model were needed. They\ncan be found with full explanation in model.py\n\n\"\"\"\n\nimport os\nimport sys\nimport json\nimport math\nimport pickle\n\nfrom pprint import pformat\nfrom datetime import datetime\n\nimport torch\n\nimport transformers\nfrom transformers import AdamW\nfrom argparse import ArgumentParser\n\nfrom komodis import Komodis\nfrom opendialkg import OpenDialKG\nfrom model import GPT2LMHeadModel\n\nfrom ignite.engine import Engine, Events\nfrom ignite.handlers import ModelCheckpoint\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.contrib.handlers import PiecewiseLinear\nfrom ignite.metrics import Loss, MetricsLambda, RunningAverage\n\n\n# --- argument parsing ------------------------------------------------------------------------------------------------\nparser = ArgumentParser()\nparser.add_argument(\"--dataset\", type=str, help=\"Name of the dataset (komodis or opendialkg).\")\nparser.add_argument(\"--depth\", type=int, help=\"Graph depth (0, 1 or 2). See paper for more information.\")\nparser.add_argument(\"--encoding\", type=str, help=\"Encoding type (series or parallel) See paper for more information.\")\nparser.add_argument(\"--lr\", type=float, default=6.0e-5, help=\"Learning rate for training.\")\nparser.add_argument(\"--epochs\", type=int, default=3, help=\"Number of epochs for training.\")\nparser.add_argument(\"--batch_size\", type=int, default=4, help=\"Train and valid batch size for training.\")\nparser.add_argument(\"--device\", type=str, default=\"cpu\", help=\"Only cpu support in with this repository.\")\nargs = parser.parse_args()\n\nif args.dataset not in [\"komodis\", \"opendialkg\"]:\n print(\"Argument dataset={} is not valid!\".format(args.dataset))\n sys.exit()\n\nif args.depth not in [0, 1, 2]:\n print(\"Argument depth={} is not valid!\".format(args.dataset))\n sys.exit()\n\nif args.encoding not in [\"series\", \"parallel\"]:\n print(\"Argument encoding={} is not valid!\".format(args.dataset))\n sys.exit()\n\n# --- load data -------------------------------------------------------------------------------------------------------\nif not os.path.exists(\"data/datasets/\"):\n print(\"Please create a datasets directory in ./data, download and unpack the datasets.\")\n sys.exit()\n\nif args.dataset == \"komodis\":\n # --- open original dataset ---\n dataset = {}\n for split in [\"train\", \"valid\", \"test\"]:\n if not os.path.isfile(\"data/datasets/komodis/komodis_dialogues_{}.json\".format(split)):\n print(\"Please unpack the komodis dataset in './data/datasets/'.\")\n sys.exit()\n with open(\"data/datasets/komodis/komodis_dialogues_{}.json\".format(split), \"r\") as f:\n dataset[split] = json.load(f)\nelif args.dataset == \"opendialkg\":\n # --- open original dataset ---\n dataset = {}\n for split in [\"train\", \"valid\", \"test\"]:\n if not os.path.isfile(\"data/datasets/opendialkg/{}_opendialkg.json\".format(split)):\n print(\"Please unpack the opendialkg dataset in './data/datasets/'.\")\n sys.exit()\n with open(\"data/datasets/opendialkg/{}_opendialkg.json\".format(split), \"r\") as f:\n dataset[split] = json.load(f)\nelse:\n print(\"Argument dataset={} is not valid!\".format(args.dataset))\n sys.exit()\n\n# --- create and/or open knowledge graphs ---\nfile = \"data/processed/{}_graphs_d{}_{}-enc.pkl\".format(args.dataset, args.depth, args.encoding)\nif not os.path.isfile(file):\n import process_graphs\n exec(open(\"process_graphs.py\").read())\n\nwith open(file, \"rb\") as f:\n graphs = pickle.load(f)\n\n# --- prepare training ------------------------------------------------------------------------------------------------\ntokenizer = transformers.GPT2Tokenizer.from_pretrained(\"gpt2\")\nif args.dataset == \"komodis\":\n dataset_helper = Komodis(tokenizer=tokenizer)\nelif args.dataset == \"opendialkg\":\n dataset_helper = OpenDialKG(tokenizer=tokenizer)\nelse:\n print(\"Argument dataset={} is not valid!\".format(args.dataset))\n sys.exit()\n\n# data preparation: see komodis.py or opendialkg.py for explanations\ndataset_helper.prepare_dataset(dataset=dataset, graphs=graphs)\ntrain_loader = dataset_helper.get_torch_features(split=\"train\", batch_size=args.batch_size)\nvalid_loader = dataset_helper.get_torch_features(split=\"valid\", batch_size=args.batch_size)\n\n# loads the pretrained gpt-2 model weights and adapts the embedding matrix to the new (bigger) vocab size\nmodel = GPT2LMHeadModel.from_pretrained(\"gpt2\")\nmodel.resize_token_embeddings(len(dataset_helper.tokenizer.encoder) + dataset_helper.num_added_tokens)\n\n# simple optimizer and learning rate schedule initialization. Used like this in our experiments.\noptimizer = AdamW(model.parameters(), lr=args.lr, correct_bias=True)\nscheduler = PiecewiseLinear(optimizer, \"lr\", [(0, args.lr), (args.epochs * len(train_loader), 0.0)])\n\n\ndef average_distributed_scalar(scalar, targs):\n \"\"\" Average a scalar over the nodes if we are in distributed training. We use this for distributed evaluation. \"\"\"\n if targs.local_rank == -1:\n return scalar\n scalar_t = torch.tensor(scalar, dtype=torch.float, device=targs.device) / torch.distributed.get_world_size()\n torch.distributed.all_reduce(scalar_t, op=torch.distributed.ReduceOp.SUM)\n return scalar_t.item()\n\n\n# metrics initialization. ignore_index is set to -100 and is used for masking out tokens in the label, that should not\n# contribute to the loss. This needs to be aligned with the data preprocessing.\nmetrics = {\n \"nll\": Loss(torch.nn.CrossEntropyLoss(ignore_index=-100), output_transform=lambda x: (x[0], x[1]))\n}\nmetrics[\"average_nll\"] = MetricsLambda(average_distributed_scalar, metrics[\"nll\"], args)\nmetrics[\"average_ppl\"] = MetricsLambda(math.exp, metrics[\"average_nll\"])\n\n\n# --- update and inference functions ----------------------------------------------------------------------------------\ndef update(engine, batch):\n \"\"\" Pytorch update function.\n\n Decoding one batch needs to be aligned with the get_torch_features processing function!\n \"\"\"\n model.train()\n\n batch = tuple(input_tensor.to(args.device) for input_tensor in batch)\n input_ids, token_type_ids, kg_attn_matrix, lm_labels = batch\n\n output = model(\n input_ids, token_type_ids=token_type_ids,\n labels=lm_labels, kg_masks=kg_attn_matrix\n )\n\n loss = output[\"loss\"]\n loss.sum().backward()\n\n optimizer.step()\n optimizer.zero_grad()\n\n return loss.mean().item()\n\n\ndef inference(engine, batch):\n \"\"\" Pytorch inference function.\n\n \"\"\"\n model.eval()\n\n batch = tuple(input_tensor.to(args.device) for input_tensor in batch)\n input_ids, token_type_ids, kg_attn_matrix, lm_labels = batch\n\n output = model(\n input_ids, token_type_ids=token_type_ids, kg_masks=kg_attn_matrix\n )\n\n lm_logits_flat_shifted = output[\"logits\"][..., :-1, :].contiguous().view(-1, output[\"logits\"].size(-1))\n lm_labels_flat_shifted = lm_labels[..., 1:].contiguous().view(-1)\n\n return lm_logits_flat_shifted, lm_labels_flat_shifted\n# ---------------------------------------------------------------------------------------------------------------------\n\n\n# prepare training with the Ignite package\ntrainer = Engine(update)\nevaluator = Engine(inference)\n\ntrainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(valid_loader))\ntrainer.add_event_handler(Events.ITERATION_STARTED, scheduler)\nRunningAverage(output_transform=lambda x: x).attach(trainer, \"loss\")\nfor name, metric in metrics.items():\n metric.attach(evaluator, name)\n\nvalidation_status = {}\npbar = ProgressBar(persist=True)\npbar.attach(trainer, metric_names=[\"loss\"])\n\nevaluator.add_event_handler(Events.COMPLETED,\n lambda _: pbar.log_message(\"Validation: %s\" % pformat(evaluator.state.metrics)))\n\ncurrent_time = datetime.now().strftime('%b%d_%H-%M-%S')\nlog_dir = os.path.join('runs', current_time + '_gpt2')\n\ncheckpoint_handler = ModelCheckpoint(log_dir, \"checkpoint\", n_saved=3)\n\ntrainer.add_event_handler(Events.EPOCH_COMPLETED,\n checkpoint_handler,\n {\"mymodel\": getattr(model, \"module\", model)})\n\ntorch.save(args, log_dir + \"/model_training_args.bin\")\n\nmodel.to(args.device)\n\n# Ignite training\ntrainer.run(train_loader, max_epochs=args.epochs)\n", "id": "6992860", "language": "Python", "matching_score": 5.602876663208008, "max_stars_count": 3, "path": "train.py" }, { "content": "\"\"\" Base class for a trainer.\n\nThis class includes some basic PyTorch code for multi GPU training, metrics, train and validation code.\n\n\n\n\"\"\"\nimport os\nimport torch\nfrom datetime import datetime as dt\nfrom pathlib import Path\nfrom pprint import pformat\nfrom ignite.contrib.handlers import ProgressBar\nfrom ignite.handlers import ModelCheckpoint\nfrom ignite.engine import Engine, Events\nfrom ignite.metrics import RunningAverage\nfrom transformers import CONFIG_NAME, WEIGHTS_NAME\n\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseTrainer(metaclass=ABCMeta):\n def __init__(self, hparams):\n self.hps = hparams # A dict. Model parameters, see train.py for more information.\n self.dataset_obj = None # An instance of class KomodisDataset.\n self.model = None # The PyTorch model.\n self.tokenizer = None # The transformers tokenizer.\n self.optimizer = None # A PyTorch optimizer.\n\n def train(self):\n \"\"\" \"\"\"\n loader_train, sampler_train = self.dataset_obj.get_torch_features(split=\"train\",\n batch_size=self.hps.train_batch_size,\n num_wrong_utts=self.hps.num_candidates - 1)\n loader_valid, sampler_valid = self.dataset_obj.get_torch_features(split=\"valid\",\n batch_size=self.hps.valid_batch_size,\n num_wrong_utts=self.hps.num_candidates - 1)\n\n # Print (readable) values of the first three samples, if debug mode.\n if self.hps.debug:\n print(\"*** SAMPLES ***\")\n input_ids = loader_train.dataset.tensors[0].tolist()\n token_type_ids = loader_train.dataset.tensors[4].tolist()\n mc_labels = loader_train.dataset.tensors[3].tolist()\n\n for sample in range(3):\n print(\"SAMPLE {}:\".format(sample))\n input_ids_txt = [self.tokenizer.decode(input_ids[sample][i],\n clean_up_tokenization_spaces=False)\n for i in range(self.hps.num_candidates)]\n token_type_ids_txt = [self.tokenizer.decode(token_type_ids[sample][i])\n for i in range(self.hps.num_candidates)]\n\n for num in range(self.hps.num_candidates):\n print(\"{}. candidate:\".format(num+1))\n print(\"INPUT IDS: {}\".format(input_ids_txt[num]))\n print(\"TOKEN TYPE IDS: {}\".format(token_type_ids_txt[num]))\n print(\"Correct utterance: {}\".format(mc_labels[sample] + 1))\n\n trainer = Engine(self.update)\n evaluator = Engine(self.inference)\n\n # Evaluation at the end of each epoch, as well as at the beginning of the training.\n trainer.add_event_handler(Events.EPOCH_COMPLETED, lambda _: evaluator.run(loader_valid))\n if self.hps.n_epochs < 1:\n trainer.add_event_handler(Events.COMPLETED, lambda _: evaluator.run(loader_valid))\n if self.hps.eval_before_start:\n trainer.add_event_handler(Events.STARTED, lambda _: evaluator.run(loader_valid))\n\n # Adding learning rate.\n args = {\n \"loader_train\": loader_train\n }\n self.learningrate(trainer=trainer, args=args)\n\n # Adding metrics to the trainer\n RunningAverage(output_transform=lambda x: x).attach(trainer, \"loss\")\n metrics = self.metrics()\n for name, metric in metrics.items():\n metric.attach(evaluator, name)\n\n # Printing evaluation results at the end of each epoch.\n pbar = ProgressBar(persist=True)\n pbar.attach(trainer, metric_names=[\"loss\"])\n evaluator.add_event_handler(Events.COMPLETED,\n lambda _: pbar.log_message(\"Validation: %s\" % pformat(evaluator.state.metrics)))\n\n # Save model weights after each epoch.\n log_dir = self.mk_logdir(self.hps.model_checkpoint)\n checkpoint_handler = ModelCheckpoint(log_dir, \"checkpoint\", save_interval=1, n_saved=3)\n trainer.add_event_handler(Events.EPOCH_COMPLETED,\n checkpoint_handler,\n {\"mymodel\": getattr(self.model, \"module\", self.model)})\n torch.save(self.hps, log_dir / \"model_training_args.bin\")\n getattr(self.model, \"module\", self.model).config.to_json_file(os.path.join(log_dir, CONFIG_NAME))\n self.tokenizer.save_pretrained(log_dir)\n\n # Run the training\n trainer.run(loader_train, max_epochs=self.hps.n_epochs)\n\n os.rename(checkpoint_handler._saved[-1][1][-1], os.path.join(log_dir, WEIGHTS_NAME))\n\n @staticmethod\n def mk_logdir(name_model):\n \"\"\" \"\"\"\n timestamp = dt.now().strftime(\"%b%d_%H-%M-%S\")\n logdir = Path(\"results\", timestamp + \"_\" + name_model)\n return logdir\n\n @abstractmethod\n def update(self, engine, batch):\n raise NotImplementedError\n\n @abstractmethod\n def inference(self, engine, batch):\n raise NotImplementedError\n\n @abstractmethod\n def learningrate(self, trainer, args):\n raise NotImplementedError\n\n @abstractmethod\n def metrics(self):\n return {}\n", "id": "4327075", "language": "Python", "matching_score": 0.7225735187530518, "max_stars_count": 4, "path": "model/train_base.py" }, { "content": "\"\"\"\n\n\"\"\"\nimport json\nimport copy\nimport numpy as np\nfrom numpy import random\nfrom itertools import chain\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nSPECIAL_TOKENS = [\"<SST>\", \"<END>\", \"<PAD>\", \"<SPK:S>\", \"<SPK:O>\"]\n\nATTR_TO_SPECIAL_TOKENS = {\"bos_token\": \"<SST>\", \"eos_token\": \"<END>\", \"pad_token\": \"<PAD>\",\n \"additional_special_tokens\": [\"<SPK:S>\", \"<SPK:O>\", \"<DEL:MOVIE>\",\"<DEL:ACTOR0>\",\n \"<DEL:ACTOR1>\", \"<DEL:WRITER>\", \"<DEL:DIRECTOR>\",\n \"<FACT:ACTOR0>\", \"<DEL:CERTIFICATE>\", \"<DEL:BUDGET>\",\n \"<DEL:COUNTRY>\", \"<DEL:YEAR>\", \"<DEL:GENRE0>\", \"<FACT:PLOT>\",\n \"<FACT:OBJECT>\", \"<OPINION:MOVIE>\", \"<OPINION:MOVIE>\"]}\n\nKG_ENCODING_MAPPING = {\n \"movie\": \"<DEL:MOVIE>\",\n \"actor\": \"<DEL:ACTOR0>\",\n \"person\": \"<DEL:ACTOR1>\",\n \"writer\": \"<DEL:WRITER>\",\n \"director\": \"<DEL:DIRECTOR>\",\n \"role\": \"<FACT:ACTOR0>\",\n \"age restriction\": \"<DEL:CERTIFICATE>\",\n \"certificate\": \"<DEL:CERTIFICATE>\",\n \"budget\": \"<DEL:BUDGET>\",\n \"shot location\": \"<DEL:COUNTRY>\",\n \"release year\": \"<DEL:YEAR>\",\n \"genre\": \"<DEL:GENRE0>\",\n \"plot\": \"<FACT:PLOT>\",\n \"trivia\": \"<FACT:OBJECT>\",\n \"attitude\": \"<OPINION:MOVIE>\",\n \"random_attitude\": \"<OPINION:MOVIE>\"\n}\n\n\nclass Komodis:\n def __init__(self, tokenizer):\n self.tokenizer = tokenizer\n self.kg_encoding_mapping = {}\n self.num_added_tokens = self.tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKENS)\n for key, value in KG_ENCODING_MAPPING.items():\n value_id = self.tokenizer.convert_tokens_to_ids(value)\n self.kg_encoding_mapping[key] = value_id\n self._dataset = None\n\n self.spk_s, self.spk_o = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[3:5])\n\n def prepare_dataset(self, dataset, graphs):\n \"\"\" \"\"\"\n self._dataset = {}\n for split in [\"train\", \"valid\", \"test\"]:\n self._dataset[split] = []\n for dialogue_id, dialogue in dataset[split].items():\n graph = graphs[split][dialogue_id]\n dialogue = [d.lower() for d in dialogue[\"dialogue\"]]\n dialogue = Komodis._replace_special_moviecorpus_tokens(dialogue)\n dialogue = [self.tokenizer.encode(d) for d in dialogue]\n self._dataset[split].append({\n \"dialogue\": dialogue,\n \"sequence\": graph[\"sequence\"],\n \"attn_matrix\": graph[\"attn_matrix\"]\n })\n\n def _convert_dialogue_to_samples(self, dialogue, history_length, max_length):\n \"\"\" \"\"\"\n samples = []\n\n for num in range(len(dialogue[\"dialogue\"]) - 1):\n # determine which speaker is system for current sample\n if num % 2 == 0:\n speaker = \"second_speaker\"\n else:\n speaker = \"first_speaker\"\n\n # number of previous utterances\n lower = num + 1 - history_length\n if lower < 0:\n lower = 0\n\n # check for max length\n t = 0\n skip = False\n len_context = len(list(chain(*[x[0] for x in dialogue[\"sequence\"][speaker]])))\n while True:\n len_hist = len(list(chain(*dialogue[\"dialogue\"][lower + t:num + 1])))\n len_label = len(dialogue[\"dialogue\"][num + 1])\n\n # 3 tokens: start token, end token, token for reply\n # (num + 1 - lower -t): plus one token per utterance in the history\n num_special_tokens = 3 + (num + 1 - lower - t)\n\n if (len_hist + len_label + len_context + num_special_tokens) <= max_length:\n break\n\n t += 1\n\n if lower + t == num + 1:\n skip = True\n break\n\n # --- knowledge graph encoding ---\n nodes_shuffled, indices = Komodis._shuffle_nodes(dialogue[\"sequence\"][speaker])\n node_lengths = [len(x[0]) for x in nodes_shuffled]\n matrix_shuffled = Komodis._shuffle_matrix(dialogue[\"attn_matrix\"][speaker], indices)\n kg_nodes = nodes_shuffled\n kg_attn_matrix = Komodis._expand_matrix(matrix_shuffled, node_lengths)\n\n if not skip:\n samples.append({\n \"label\": [dialogue[\"dialogue\"][num + 1]],\n \"history\": dialogue[\"dialogue\"][lower + t:num + 1],\n \"kg_sequence\": kg_nodes,\n \"kg_attn_matrix\": kg_attn_matrix,\n \"len_hist\": len_hist,\n \"len_context\": len_context\n })\n\n return samples\n\n def get_torch_features(self, split, batch_size):\n \"\"\" \"\"\"\n samples = []\n for dialogue in self._dataset[split]:\n samples += self._convert_dialogue_to_samples(dialogue, history_length=3, max_length=256)\n\n features = {\n \"input_ids\": [],\n \"token_type_ids\": [],\n \"kg_attn_matrix\": [],\n \"lm_labels\": []\n }\n\n for sample in samples:\n seqs = self._convert_sample_to_sequences(history=sample[\"history\"],\n reply=sample[\"label\"][0],\n kg_nodes=sample[\"kg_sequence\"],\n kg_attn_matrix=sample[\"kg_attn_matrix\"])\n\n features[\"input_ids\"].append(seqs[\"input_ids\"])\n features[\"token_type_ids\"].append(seqs[\"token_type_ids\"])\n features[\"kg_attn_matrix\"].append(seqs[\"kg_attn_matrix\"])\n features[\"lm_labels\"].append(seqs[\"lm_labels\"])\n\n features_padded = Komodis._pad_features(features=features, padding=self.tokenizer.pad_token_id)\n\n torch_features = []\n for key, value in features_padded.items():\n torch_features.append(torch.tensor(value))\n dataset = TensorDataset(*torch_features)\n\n loader = DataLoader(dataset, sampler=None, batch_size=batch_size, shuffle=True, drop_last=True)\n\n return loader\n\n def process_subgraph(self, subgraph, encoding, inference=False, max_clen=-1):\n \"\"\" \"\"\"\n\n def process_series():\n \"\"\" Uses the specific encoding for komodis, where relations are not explicitly encoded.\n kg encoding (1) in context_length_evaluation.xlsx\n \"\"\"\n ss = self.tokenizer.encode(node[\"content\"])\n subgraph_sequence[speaker].append((\n ss,\n len(ss) * [self.kg_encoding_mapping[node[\"type\"]]],\n node[\"content\"],\n node[\"type\"]\n ))\n\n def process_parallel():\n \"\"\" Converts each node into a type-content sequence, where the type is added on the token-type dimension.\n kg encoding (4) in context_length_evaluation.xlsx\n \"\"\"\n c_node = self.tokenizer.encode(node[\"content\"])\n c_type = self.tokenizer.encode(node[\"type\"])\n node_length = max(len(c_node), len(c_type))\n\n c_node = c_node + [self.tokenizer.pad_token_id] * (node_length - len(c_node))\n c_type = c_type + [self.tokenizer.pad_token_id] * (node_length - len(c_type))\n\n subgraph_sequence[speaker].append((\n c_node,\n c_type,\n node[\"content\"],\n node[\"type\"]\n ))\n\n def sequence_length(seq):\n \"\"\" Returns the length of the whole sequence. \"\"\"\n full_length = 0\n for item in seq:\n full_length += len(item[0])\n return full_length\n\n def shorten_context(sequence, matrix):\n \"\"\" \"\"\"\n while sequence_length(sequence) > max_clen:\n remove_candidates = []\n for idx, item in enumerate(sequence):\n if item[3] not in [\"movie\", \"attitude\"]:\n remove_candidates.append(idx)\n remove_candidate = random.choice(remove_candidates)\n matrix = np.delete(matrix, remove_candidate, axis=0)\n matrix = np.delete(matrix, remove_candidate, axis=1)\n sequence.pop(remove_candidate)\n\n for idx, item in enumerate(sequence):\n if item[3] == \"attitude\":\n att_rels = 0\n for att_rel in matrix[idx]:\n att_rels += att_rel\n if att_rels == 1:\n matrix = np.delete(matrix, idx, axis=0)\n matrix = np.delete(matrix, idx, axis=1)\n sequence.pop(idx)\n break\n\n return sequence, matrix\n\n if inference:\n subgraph_sequence = {\"inference\": []}\n subgraph_attn_mask = {\"inference\": []}\n speakers = [\"inference\"]\n else:\n subgraph_sequence = {\"first_speaker\": [], \"second_speaker\": []}\n subgraph_attn_mask = {\"first_speaker\": [], \"second_speaker\": []}\n speakers = [\"first_speaker\", \"second_speaker\"]\n\n for speaker in speakers:\n for node in subgraph[speaker][\"nodes\"]:\n if str(node[\"content\"]) == \"-1\" and node[\"type\"] == \"age restriction\":\n node[\"content\"] = \"unknown\"\n if encoding == \"series\":\n process_series()\n elif encoding == \"parallel\":\n process_parallel()\n else:\n raise ValueError(\"Could not find a kg_encoding_type \"\n \"with name: {}\".format(encoding))\n subgraph_attn_mask[speaker] = np.array(json.loads(subgraph[speaker][\"matrix\"]))\n\n # make knowledge graphs shorter, if needed\n if max_clen > -1:\n for speaker in speakers:\n subgraph_sequence[speaker], subgraph_attn_mask[speaker] = \\\n shorten_context(subgraph_sequence[speaker], subgraph_attn_mask[speaker])\n\n return {\"sequence\": subgraph_sequence, \"attn_matrix\": subgraph_attn_mask}\n\n def _convert_sample_to_sequences(self, history, reply, kg_nodes, kg_attn_matrix):\n \"\"\" \"\"\"\n context_input_ids = list(chain(*[x[0] for x in kg_nodes]))\n context_token_type_ids = list(chain(*[x[1] for x in kg_nodes]))\n\n hist_length = len(history)\n if hist_length % 2 == 0:\n first_utt_type = self.spk_s\n second_utt_type = self.spk_o\n else:\n first_utt_type = self.spk_o\n second_utt_type = self.spk_s\n\n sequence = copy.deepcopy([[self.tokenizer.bos_token_id] + context_input_ids] + history + [reply])\n\n sequence[-1] += [self.tokenizer.eos_token_id]\n sequence = [sequence[0]] + [[second_utt_type if i % 2\n else first_utt_type] + s\n for i, s in enumerate(sequence[1:])]\n\n seqs = {\n \"input_ids\": list(chain(*sequence)),\n \"lm_labels\": ([-100] * sum(len(s) for s in sequence[:-1])) + [-100] + sequence[-1][1:],\n \"kg_attn_matrix\": kg_attn_matrix\n }\n\n def cond(i):\n if i % 2:\n return second_utt_type\n return first_utt_type\n\n seqs[\"token_type_ids\"] = [self.tokenizer.bos_token_id] + context_token_type_ids + \\\n [cond(i) for i, s in enumerate(sequence[1:]) for _ in s]\n\n return seqs\n\n @staticmethod\n def _pad_features(features, padding):\n \"\"\" \"\"\"\n keys = [\"input_ids\", \"token_type_ids\", \"lm_labels\"]\n max_l = max(len(feature) for feature in features[\"input_ids\"])\n for name in keys:\n features[name] = [x + [padding if name != \"lm_labels\" else -100] * (max_l - len(x)) for x in\n features[name]]\n\n max_l = max(m.shape[0] for m in features[\"kg_attn_matrix\"])\n for num, matrix in enumerate(features[\"kg_attn_matrix\"]):\n back = np.tril(max_l * [1])\n d1, d2 = matrix.shape\n back[:d1, :d2] = matrix\n features[\"kg_attn_matrix\"][num] = back\n\n return features\n\n @staticmethod\n def _replace_special_moviecorpus_tokens(dialogue):\n \"\"\" Replaces [eou] tokens and add [end] tokens.\n \"\"\"\n new_dialogue = []\n for utterance in dialogue:\n tokens = utterance.split(\" \")\n new_tokens = []\n for i in range(len(tokens)):\n if i == 0:\n new_tokens.append(tokens[i])\n else:\n if tokens[i] in [\"[eou]\", \"[EOU]\"]:\n if tokens[i - 1] in [\"?\", \".\", \",\", \"!\", \";\", \":\"]:\n continue\n else:\n new_tokens.append(\".\")\n else:\n new_tokens.append(tokens[i])\n new_dialogue.append(\" \".join(new_tokens))\n return new_dialogue\n\n @staticmethod\n def _shuffle_nodes(nodes):\n \"\"\" Shuffles nodes and returns shuffle-indices. \"\"\"\n indices = list(range(len(nodes)))\n nodes_indices = list(zip(nodes, indices))\n random.shuffle(nodes_indices)\n return zip(*nodes_indices)\n\n @staticmethod\n def _shuffle_matrix(matrix, indices):\n \"\"\" Shuffles an attention-matrix based on shuffled indices. \"\"\"\n temp_matrix_1 = np.ndarray((matrix.shape[0], matrix.shape[1]))\n temp_matrix_2 = np.ndarray((matrix.shape[0], matrix.shape[1]))\n\n for n, i in enumerate(indices):\n temp_matrix_1[:, n] = matrix[:, i]\n for n, i in enumerate(indices):\n temp_matrix_2[n, :] = temp_matrix_1[i, :]\n\n return temp_matrix_2\n\n @staticmethod\n def _expand_matrix(matrix, lengths, fix_length=None):\n \"\"\" Expands the attention-matrix based on the node-lengths \"\"\"\n temp_matrix_1 = np.ndarray((sum(lengths), matrix.shape[0]))\n temp_matrix_2 = np.ndarray((sum(lengths), sum(lengths)))\n\n curr = 0\n for n, i in enumerate(lengths):\n temp_matrix_1[curr:curr+i, :] = np.repeat([matrix[n, :]], repeats=i, axis=0)\n curr += i\n curr = 0\n for n, i in enumerate(lengths):\n temp_matrix_2[:, curr:curr+i] = np.transpose(np.repeat([temp_matrix_1[:, n]], repeats=i, axis=0))\n curr += i\n\n if fix_length is not None:\n background_matrix = np.zeros((fix_length, fix_length), dtype=int)\n background_matrix[0:temp_matrix_2.shape[0], 0:temp_matrix_2.shape[1]] = temp_matrix_2\n temp_matrix_2 = background_matrix\n\n return temp_matrix_2\n", "id": "2709835", "language": "Python", "matching_score": 6.553938865661621, "max_stars_count": 3, "path": "komodis.py" }, { "content": "\"\"\" Reading, processing and converting the KOMODIS dataset into torch features\n\n\"\"\"\nimport os\nimport json\nimport copy\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom itertools import chain\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\n\nSPECIAL_TOKENS = [\"<SST>\", \"<END>\", \"<PAD>\", \"<SPK:S>\", \"<SPK:O>\", \"<DEL:MOVIE>\", \"<DEL:ACTOR>\", \"<DEL:DIRECTOR>\",\n \"<DEL:WRITER>\", \"<DEL:YEAR>\", \"<DEL:BUDGET>\", \"<DEL:CERTIFICATE>\", \"<DEL:COUNTRY>\", \"<DEL:GENRE0>\",\n \"<DEL:GEMRE1>\", \"<FACT:MOVIE>\", \"<FACT:ACTOR>\", \"<FACT:DIRECTOR>\", \"<FACT:WRITER>\", \"<FACT:PLOT>\",\n \"<OPINION:MOVIE>\", \"<OPINION:ACTOR>\", \"<OPINION:DIRECTOR>\", \"<OPINION:WRITER>\", \"<OPINION:COUNTRY>\",\n \"<OPINION:GENRE>\", \"<OPINION:BUDGET>\", \"<OPINION:CERTIFICATE>\", \"<OPRATE:0>\", \"<OPRATE:1>\",\n \"<OPRATE:2>\", \"<OPRATE:3>\", \"<OPRATE:4>\", \"<OPRATE:5>\"]\nATTR_TO_SPECIAL_TOKENS = {\"bos_token\": \"<SST>\", \"eos_token\": \"<END>\", \"pad_token\": \"<PAD>\",\n \"additional_special_tokens\": (\"<SPK:S>\",\n \"<SPK:O>\",\n \"<DEL:MOVIE>\",\n \"<DEL:ACTOR0>\",\n \"<DEL:ACTOR1>\",\n \"<DEL:DIRECTOR>\",\n \"<DEL:WRITER>\",\n \"<DEL:YEAR>\",\n \"<DEL:BUDGET>\",\n \"<DEL:CERTIFICATE>\",\n \"<DEL:COUNTRY>\",\n \"<DEL:GENRE0>\",\n \"<DEL:GEMRE1>\",\n \"<FACT:MOVIE>\",\n \"<FACT:ACTOR0>\",\n \"<FACT:ACTOR1>\",\n \"<FACT:DIRECTOR>\",\n \"<FACT:WRITER>\",\n \"<FACT:PLOT>\",\n \"<OPINION:MOVIE>\",\n \"<OPINION:ACTOR0>\",\n \"<OPINION:ACTOR1>\",\n \"<OPINION:DIRECTOR>\",\n \"<OPINION:WRITER>\",\n \"<OPINION:COUNTRY>\",\n \"<OPINION:GENRE>\",\n \"<OPINION:BUDGET>\",\n \"<OPINION:CERTIFICATE>\",\n \"<OPRATE:0>\",\n \"<OPRATE:1>\",\n \"<OPRATE:2>\",\n \"<OPRATE:3>\",\n \"<OPRATE:4>\",\n \"<OPRATE:5>\")}\n\nTOKEN_ENCODING_MAPPING = {\n \"movie#0\": \"<DEL:MOVIE>\",\n \"actor#0\": \"<DEL:ACTOR0>\",\n \"actor#1\": \"<DEL:ACTOR1>\",\n \"director#0\": \"<DEL:DIRECTOR>\",\n \"writer#0\": \"<DEL:WRITER>\",\n \"year#0\": \"<DEL:YEAR>\",\n \"budget#0\": \"<DEL:BUDGET>\",\n \"certificate#0\": \"<DEL:CERTIFICATE>\",\n \"country#0\": \"<DEL:COUNTRY>\",\n \"genre#0\": \"<DEL:GENRE0>\",\n \"genre#1\": \"<DEL:GEMRE1>\",\n}\n\n\nclass KomodisDataset:\n \"\"\" The KOMODIS dataset class.\n\n INFO: At some point of creating the scripts, we accidently used the term \"to binarize\" instead of \"to tokenize\".\n If some functions use that term (\"binarize\") we always mean \"tokenize\". TODO: Fix the \"binarize\"-names :-)\n\n \"\"\"\n def __init__(self,\n tokenizer, # A tokenizer like the GPT-2 tokenizer from the transformers package.\n hparams, # A dict of training properties. See train.py!\n path_to_data, # A Path object or string to the current directory of the dataset.\n debug=False): # A Boolean, if True only a slice of the data is loaded.\n self.path_to_data = os.path.join(path_to_data, \"dataset.json\")\n self.debug = debug\n self.tokenizer = tokenizer\n self.hparams = hparams\n self.num_added_tokens = self.tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKENS)\n ids = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[3:])\n self.special_tokens_dict = dict(zip(SPECIAL_TOKENS[3:], ids))\n self.dataset = None\n\n def load_txt_dataset(self):\n \"\"\" Loads the raw dataset.\n\n \"\"\"\n with open(self.path_to_data, \"r\") as f:\n data = json.load(f)\n\n # split into train, validation and test sets.\n self.dataset = KomodisDataset._split_binarized_corpus(data)\n\n def tokenize_dataset(self, path_to_save=None):\n \"\"\" Preprocessing of the raw data.\n\n This includes:\n - tokenization\n - fact encoding and preparation\n - attitude encoding and preparation\n\n Args:\n path_to_save A Boolean. If True the processed data will be saved on hard disc. Authors note: The idea is\n to save some time between trainings, e. g. for hyperparameter search. Don't use the pre-\n processed data with different properties, that require different preprocessing!\n \"\"\"\n assert self.dataset is not None # Dataset needs to be loaded first!\n\n # iterates over all dialogues separated in train, valid and test sets\n for split, dialogues in self.dataset.items():\n print(\"Processing {} data ...\".format(split))\n for num, dialogue in enumerate(tqdm(dialogues)):\n # tokenize dialogue\n utterances = [d[\"utterance\"] for d in dialogue[\"dialogue\"]]\n dialogue[\"dialogue_processed\"] = KomodisDataset._replace_special_tokens(utterances)\n dialogue[\"dialogue_processed\"] = \\\n KomodisDataset._replace_special_moviecorpus_tokens(dialogue[\"dialogue_processed\"])\n dialogue[\"dialogue_binarized\"] = [self.tokenizer.encode(d) for d in dialogue[\"dialogue_processed\"]]\n\n # process facts\n dialogue[\"facts_binarized\"] = self._process_and_binarize_facts(dialogue[\"facts\"])\n\n # process attitudes\n dialogue[\"attitudes_binarized\"] = \\\n self._process_and_binarize_attitudes(dialogue[\"attitudes\"],\n attitude_sentences=self.hparams.attitude_sentences)\n\n if self.debug and num > 32:\n break\n\n # --- saving data ---\n if path_to_save is not None:\n path = os.path.join(self.path_to_data, path_to_save)\n if not os.path.exists(os.path.dirname(path)):\n os.mkdir(os.path.dirname(path))\n with open(path, \"wb\") as f:\n pickle.dump(self.dataset, f)\n\n def get_torch_features(self, split, batch_size, num_wrong_utts=1, distributed=False):\n \"\"\" Creates torch features for training.\n\n Args:\n split A string. Determins which split (train, valid or test) you want to use.\n batch_size An Integer. Simply feed the batchsize of your model.\n num_wrong_utts An Integer. The number of random distractors for the classification loss.\n distributed A Boolean. Not implemented yet.\n\n \"\"\"\n if distributed:\n print(\"WARNING: Not implemented yet!\")\n\n # load preprocessed data (or preprocess if needed)\n if \"dialogue_binarized\" not in self.dataset[split][0]:\n self.tokenize_dataset()\n samples = self._convert_dialogues_to_samples(split=split, num_wrong_utts=num_wrong_utts)\n\n # create features\n features = {\n \"input_ids\": [],\n \"mc_token_ids\": [],\n \"lm_labels\": [],\n \"mc_labels\": [],\n \"token_type_ids\": []\n }\n\n # Converts all samples into processed sequences for the gpt-2 model.\n for sample in samples:\n num_wrongs = 0\n # Randomly choose the position of the correct answer.\n true_answer_id = int(np.random.rand() * (1 + num_wrong_utts))\n # Generate all sequences (the correct one and all wrong ones).\n for num in range(1 + num_wrong_utts):\n if num == true_answer_id:\n seqs = self._convert_sample_to_sequences(facts=sample[\"facts\"],\n attitudes=sample[\"attitudes\"],\n history=sample[\"dialogue_history\"],\n reply=sample[\"label_utterance\"][0],\n lm_labels=True)\n else:\n seqs = self._convert_sample_to_sequences(facts=sample[\"facts\"],\n attitudes=sample[\"attitudes\"],\n history=sample[\"dialogue_history\"],\n reply=sample[\"wrong_utterances\"][num_wrongs],\n lm_labels=False)\n num_wrongs += 1\n\n features[\"input_ids\"].append(seqs[\"input_ids\"])\n features[\"token_type_ids\"].append(seqs[\"token_type_ids\"])\n features[\"mc_token_ids\"].append(seqs[\"mc_token_ids\"])\n features[\"lm_labels\"].append(seqs[\"lm_labels\"])\n features[\"mc_labels\"].append(true_answer_id)\n\n # padding\n features_padded = self._pad_features(features, padding=self.tokenizer.pad_token_id)\n\n features_combined = {\n \"input_ids\": [],\n \"mc_token_ids\": [],\n \"lm_labels\": [],\n \"mc_labels\": [],\n \"token_type_ids\": []\n }\n\n # Reformat the sequences\n for num in tqdm(range(int(len(features[\"input_ids\"])/(1 + num_wrong_utts)))):\n sst = num * (1 + num_wrong_utts)\n end = sst + 1 + num_wrong_utts\n input_ids = features_padded[\"input_ids\"][sst:end]\n token_type_ids = features_padded[\"token_type_ids\"][sst:end]\n mc_token_ids = features_padded[\"mc_token_ids\"][sst:end]\n lm_labels = features_padded[\"lm_labels\"][sst:end]\n mc_labels = features_padded[\"mc_labels\"][sst]\n features_combined[\"input_ids\"].append(input_ids)\n features_combined[\"token_type_ids\"].append(token_type_ids)\n features_combined[\"mc_token_ids\"].append(mc_token_ids)\n features_combined[\"lm_labels\"].append(lm_labels)\n features_combined[\"mc_labels\"].append(mc_labels)\n\n # PyTorch conversion\n torch_features = []\n for key, value in features_combined.items():\n torch_features.append(torch.tensor(value))\n dataset = TensorDataset(*torch_features)\n\n if distributed:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n else:\n sampler = None\n\n # Only shuffle the train set. Ignore the distributed flag here.\n if not distributed or split != \"train\":\n shuffle = False\n else:\n shuffle = True\n\n loader = DataLoader(dataset, sampler=sampler, batch_size=batch_size, shuffle=shuffle)\n\n return loader, sampler\n\n def load_dataset(self, path):\n \"\"\" Loads a (already processed) dataset into the object.\n \"\"\"\n with open(path, \"rb\") as f:\n ds = pickle.load(f)\n for split in [\"train\", \"valid\", \"test\"]:\n if split not in ds:\n raise Exception(\"Dataset does not contain {} data.\".format(split))\n if \"dialogue_binarized\" not in ds[split][0]:\n raise Exception(\"Data is not binarized!\")\n self.dataset = ds\n\n def _process_and_binarize_facts(self, facts, inference=False):\n \"\"\" Processes and tokenizes the facts from the dataset.\n \"\"\"\n def create_seqs_with_delex():\n \"\"\" The sequences if delexicalisation is used \"\"\"\n if fact[\"relation\"] in [\"has_trivia\", \"has_plot\"]:\n facts_processed[speaker].append((\n fact_tokenized,\n len_fact * self.tokenizer.encode(token)\n ))\n\n # For inference we don't need first- and second-speaker separation.\n if inference:\n facts_processed = {\"inference\": []}\n speakers = [\"inference\"]\n facts = {\"inference\": facts}\n else:\n facts_processed = {\"first_speaker\": [], \"second_speaker\": []}\n speakers = [\"first_speaker\", \"second_speaker\"]\n\n for speaker in speakers:\n ss_facts = facts[speaker]\n for fact in ss_facts:\n fact_tokenized = self.tokenizer.encode(fact[\"object\"])\n len_fact = len(fact_tokenized)\n token = KomodisDataset._get_correct_fact_token(subject=fact[\"subject\"], relation=fact[\"relation\"])\n create_seqs_with_delex()\n\n if inference:\n facts_processed = facts_processed[\"inference\"]\n\n return facts_processed\n\n def _process_and_binarize_attitudes(self, attitudes, attitude_sentences, inference=False):\n \"\"\" Processes and tokenizes the attitudes from the dataset.\n \"\"\"\n # For inference we don't need first- and second-speaker separation.\n if inference:\n attitudes_processed = {\"inference\": []}\n speakers = [\"inference\"]\n attitudes = {\"inference\": attitudes}\n else:\n attitudes_processed = {\"first_speaker\": [], \"second_speaker\": []}\n speakers = [\"first_speaker\", \"second_speaker\"]\n\n for speaker in speakers:\n ss_atts = attitudes[speaker]\n for num, attitude in enumerate(ss_atts):\n token = KomodisDataset._get_correct_att_token(subject=attitude[\"subject\"],\n relation=attitude[\"relation\"])\n if attitude_sentences:\n repl_att_sent = KomodisDataset._replace_special_tokens([attitude[\"source\"]])\n ss = self.tokenizer.encode(repl_att_sent[0])\n attitudes_processed[speaker].append((\n ss,\n len(ss) * self.tokenizer.encode(token)\n ))\n else:\n attitudes_processed[speaker].append((\n self.tokenizer.encode(\"<OPRATE:{}>\".format(attitude[\"object\"])),\n self.tokenizer.encode(token)\n ))\n\n if inference:\n attitudes_processed = attitudes_processed[\"inference\"]\n\n return attitudes_processed\n\n @staticmethod\n def _get_correct_fact_token(subject, relation):\n if relation == \"has_plot\":\n return \"<FACT:PLOT>\"\n if subject == \"movie#0\":\n return \"<FACT:MOVIE>\"\n if subject == \"actor#0\":\n return \"<FACT:ACTOR0>\"\n if subject in [\"actor#1\", \"actor#2\"]:\n return \"<FACT:ACTOR1>\"\n if subject == \"writer#0\":\n return \"<FACT:WRITER>\"\n if subject == \"director#0\":\n return \"<FACT:DIRECTOR>\"\n\n @staticmethod\n def _get_correct_att_token(subject, relation):\n if relation == \"has_bot_certificate_attitude\":\n return \"<OPINION:CERTIFICATE>\"\n if relation == \"has_bot_budget_attitude\":\n return \"<OPINION:BUDGET>\"\n if subject == \"movie#0\":\n return \"<OPINION:MOVIE>\"\n if subject == \"actor#0\":\n return \"<OPINION:ACTOR0>\"\n if subject in [\"actor#1\", \"actor#2\"]:\n return \"<OPINION:ACTOR1>\"\n if subject == \"writer#0\":\n return \"<OPINION:WRITER>\"\n if subject == \"director#0\":\n return \"<OPINION:DIRECTOR>\"\n if subject in [\"genre#0\", \"genre#1\"]:\n return \"<OPINION:GENRE>\"\n if subject == \"country#0\":\n return \"<OPINION:COUNTRY>\"\n\n @staticmethod\n def _replace_special_tokens(utterances):\n \"\"\" In the original dataset, the special tokens differ from the ones defined here. This function replaces\n the original tokens with the ones from the tokenizer.\n \"\"\"\n utterances_fixed = []\n for utterance in utterances:\n for original, new in TOKEN_ENCODING_MAPPING.items():\n utterance = utterance.replace(original, new)\n utterances_fixed.append(utterance)\n return utterances_fixed\n\n @staticmethod\n def _replace_special_moviecorpus_tokens(dialogue):\n \"\"\" Replaces [eou] tokens and add [end] tokens.\n \"\"\"\n new_dialogue = []\n for utterance in dialogue:\n tokens = utterance.split(\" \")\n new_tokens = []\n for i in range(len(tokens)):\n if i == 0:\n new_tokens.append(tokens[i])\n else:\n if tokens[i] in [\"[eou]\", \"[EOU]\"]:\n if tokens[i - 1] in [\"?\", \".\", \",\", \"!\", \";\", \":\"]:\n continue\n else:\n new_tokens.append(\".\")\n else:\n new_tokens.append(tokens[i])\n new_dialogue.append(\" \".join(new_tokens))\n return new_dialogue\n\n def convert_clear_txt_to_sequences(self, facts, attitudes, history, reply=None):\n \"\"\" For inference only! \"\"\"\n facts = self._process_and_binarize_facts(facts, inference=True)\n attitudes = self._process_and_binarize_attitudes(attitudes,\n attitude_sentences=self.hparams.attitude_sentences,\n inference=True)\n history = self._replace_special_tokens(history)\n history = [self.tokenizer.encode(x) for x in history]\n if reply is None:\n reply = []\n else:\n reply = [reply]\n seqs = self._convert_sample_to_sequences(facts, attitudes, history, reply, lm_labels=False, inference=True)\n return seqs\n\n def _convert_sample_to_sequences(self,\n facts,\n attitudes,\n history,\n reply,\n lm_labels=True,\n inference=False):\n facts_input_ids = list(chain(*[x[0] for x in facts]))\n facts_token_type_ids = list(chain(*[x[1] for x in facts]))\n atts_input_ids = list(chain(*[x[0] for x in attitudes]))\n atts_token_type_ids = list(chain(*[x[1] for x in attitudes]))\n\n sequence = copy.deepcopy([[self.tokenizer.bos_token_id] + facts_input_ids + atts_input_ids] + history + [reply])\n if not inference:\n sequence[-1] += [self.tokenizer.eos_token_id]\n sequence = [sequence[0]] + [[self.special_tokens_dict[\"<SPK:S>\"] if (len(sequence) - i) % 2\n else self.special_tokens_dict[\"<SPK:O>\"]] + s\n for i, s in enumerate(sequence[1:])]\n seqs = {\n \"input_ids\": list(chain(*sequence))\n }\n\n def cond(i):\n if i % 2:\n return self.special_tokens_dict[\"<SPK:O>\"]\n return self.special_tokens_dict[\"<SPK:S>\"]\n\n seqs[\"token_type_ids\"] = [self.tokenizer.bos_token_id] + facts_token_type_ids + atts_token_type_ids + \\\n [cond(i) for i, s in enumerate(sequence[1:]) for _ in s]\n\n seqs[\"mc_token_ids\"] = len(seqs[\"input_ids\"]) - 1\n if lm_labels:\n seqs[\"lm_labels\"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]\n else:\n seqs[\"lm_labels\"] = [-1] * len(seqs[\"input_ids\"])\n\n return seqs\n\n def _convert_dialogue_to_samples(self,\n dialogue,\n split,\n num_wrong_utts=1,\n history_length=(1, 15),\n max_length=32):\n \"\"\" Converts one dialogue in all possible samples, given some settings.\n\n history_length (a, b) Number of history utterances between a and b, if possible.\n\n \"\"\"\n # create wrong utterances\n num_needed_wrong_utts = (len(dialogue[\"dialogue_binarized\"]) - 1) * num_wrong_utts\n wrong_utts = self._compute_wrong_dialogues(split=split,\n num=num_needed_wrong_utts,\n only_movie=dialogue[\"movie_title\"],\n exclude_id=dialogue[\"dialogue_id\"])\n\n samples = []\n for num in range(len(dialogue[\"dialogue_binarized\"]) - 1):\n # determine which speaker is system for current sample\n if num % 2 == 0:\n speaker = \"second_speaker\"\n else:\n speaker = \"first_speaker\"\n\n # boundaries for wrong utterances\n sst = num_wrong_utts * num\n end = sst + num_wrong_utts\n\n # number of previous utterances\n r = np.random.randint(history_length[0], history_length[1] + 1)\n lower = num + 1 - r\n if lower < 0:\n lower = 0\n\n # check for max length\n t = 0\n skip = False\n while True:\n len_hist = len(list(chain(*dialogue[\"dialogue_binarized\"][lower + t:num + 1])))\n len_label = len(dialogue[\"dialogue_binarized\"][num + 1])\n len_facts = len(list(chain(*[x[0] for x in dialogue[\"facts_binarized\"][speaker]])))\n len_atts = len(list(chain(*[x[0] for x in dialogue[\"attitudes_binarized\"][speaker]])))\n len_wut = max([len(x) for x in wrong_utts[sst:end]])\n\n # 3 tokens: start token, end token, token for reply\n # (num + 1 - lower -t): plus one token per utterance in the history\n num_special_tokens = 3 + (num + 1 - lower - t)\n\n # check both length: correct utterance and the longest wrong utterance\n if (len_hist + len_facts + len_atts + len_wut + num_special_tokens) <= max_length and \\\n (len_hist + len_label + len_facts + len_atts + num_special_tokens) <= max_length:\n break\n\n t += 1\n\n if lower + t == num + 1:\n skip = True\n break\n\n if not skip:\n samples.append({\n \"label_utterance\": [dialogue[\"dialogue_binarized\"][num + 1]],\n \"dialogue_history\": dialogue[\"dialogue_binarized\"][lower + t:num + 1],\n \"facts\": dialogue[\"facts_binarized\"][speaker],\n \"attitudes\": dialogue[\"attitudes_binarized\"][speaker],\n \"wrong_utterances\": wrong_utts[sst:end],\n })\n\n return samples\n\n def _convert_dialogues_to_samples(self, split, num_wrong_utts=1):\n samples = []\n print(\"Processing {}-data.\".format(split))\n for dialogue in tqdm(self.dataset[split]):\n if self.debug and (\"dialogue_binarized\" not in dialogue or len(samples) > 64):\n break\n samples += self._convert_dialogue_to_samples(dialogue, split, num_wrong_utts, (3, 5),\n self.hparams.max_input_length)\n return samples\n\n def _compute_wrong_dialogues(self, split, num, only_movie=None, exclude_id=None):\n \"\"\" Returns random wrong utterances\n\n Args:\n split A string. One of: \"train\", \"valid\", \"test\".\n num An integer. Number of wrong utterances.\n only_movie A string. If given, only utterances of the given movie are returned.\n exclude_id An integer. If given, the movie with that prepared_id is ignored.\n\n \"\"\"\n if only_movie is not None:\n candidates = [movie for movie in self.dataset[split] if movie['movie_title'] == only_movie]\n else:\n candidates = self.dataset[split]\n\n utterances = []\n for movie in candidates:\n if exclude_id is not None:\n if movie[\"dialogue_id\"] == exclude_id:\n continue\n # --- if not processed (can happen in debug mode), process: ---\n if \"dialogue_binarized\" not in movie:\n new_utterances = [d[\"utterance\"] for d in movie[\"dialogue\"]]\n movie[\"dialogue_processed\"] = KomodisDataset._replace_special_tokens(new_utterances)\n movie[\"dialogue_processed\"] = \\\n KomodisDataset._replace_special_moviecorpus_tokens(movie[\"dialogue_processed\"])\n movie[\"dialogue_binarized\"] = [self.tokenizer.encode(d) for d in movie[\"dialogue_processed\"]]\n # --------------------------------------------------------------\n utterances += movie['dialogue_binarized']\n np.random.shuffle(utterances)\n\n return utterances[:num]\n\n @staticmethod\n def _split_binarized_corpus(dataset):\n \"\"\" Splits the corpus in train, eval and test. \"\"\"\n\n # collect movie titles\n titles = []\n for dialogue in dataset:\n if dialogue['movie_title'] not in titles:\n titles.append(dialogue['movie_title'])\n num_train = int(len(titles) * 0.8)\n num_eval = int(len(titles) * 0.1)\n np.random.shuffle(titles)\n titles_split = {\n \"train\": titles[0:num_train],\n \"valid\": titles[num_train:num_train + num_eval],\n \"test\": titles[num_train + num_eval:]\n }\n dataset_splitted = {\n \"train\": [],\n \"valid\": [],\n \"test\": []\n }\n for dialogue in dataset:\n if dialogue['movie_title'] in titles_split['train']:\n dataset_splitted[\"train\"].append(dialogue)\n elif dialogue['movie_title'] in titles_split['valid']:\n dataset_splitted[\"valid\"].append(dialogue)\n elif dialogue['movie_title'] in titles_split['test']:\n dataset_splitted[\"test\"].append(dialogue)\n\n return dataset_splitted\n\n def _pad_features(self, features, padding):\n max_l = max(len(feature) for feature in features[\"input_ids\"])\n if self.debug:\n # If debug mode, we want to see if the maximum size of input data fits into the vram for training.\n max_l = self.hparams.max_input_length\n for name in [\"input_ids\", \"token_type_ids\", \"lm_labels\"]:\n features[name] = [x + [padding if name != \"lm_labels\" else -1] * (max_l - len(x)) for x in features[name]]\n return features\n", "id": "11023908", "language": "Python", "matching_score": 1.1771223545074463, "max_stars_count": 4, "path": "model/dataset.py" }, { "content": "\"\"\" process graphs\n\nThis script is supposed to show how the authors encoded the knowledge graphs.\n\nProcessed data is stored in ./data/processed/ for each set of (dataset, depth, encoding).\n\n\"\"\"\nimport os\nimport sys\nimport json\nimport pickle\nimport transformers\nfrom argparse import ArgumentParser\n\nfrom komodis import Komodis\nfrom opendialkg import OpenDialKG\n\n\nparser = ArgumentParser()\nparser.add_argument(\"--dataset\", type=str, help=\"Name of the dataset (komodis or opendialkg).\")\nparser.add_argument(\"--depth\", type=int, help=\"Graph depth (0, 1 or 2). See paper for more information.\")\nparser.add_argument(\"--encoding\", type=str, help=\"Encoding type (series or parallel) See paper for more information.\")\nparser.add_argument(\"--lr\", type=float, default=6.0e-5, help=\"Learning rate for training.\")\nparser.add_argument(\"--epochs\", type=int, default=3, help=\"Number of epochs for training.\")\nparser.add_argument(\"--batch_size\", type=int, default=4, help=\"Train and valid batch size for training.\")\nparser.add_argument(\"--device\", type=str, default=\"cpu\", help=\"Only cpu support in with this repository.\")\nargs = parser.parse_args()\n\n\ndef run():\n if args.dataset not in [\"komodis\", \"opendialkg\"]:\n print(\"Argument dataset={} is not valid!\".format(args.dataset))\n sys.exit()\n\n if args.depth not in [0, 1, 2]:\n print(\"Argument depth={} is not valid!\".format(args.dataset))\n sys.exit()\n\n if args.encoding not in [\"series\", \"parallel\"]:\n print(\"Argument encoding={} is not valid!\".format(args.dataset))\n sys.exit()\n\n # read knowledge graphs\n graphs_raw = {}\n for split in [\"train\", \"valid\", \"test\"]:\n file_path = \"data/knowledge_graphs/{}_graphs_d{}_{}.json\".format(args.dataset, args.depth, split)\n\n if not os.path.isfile(file_path):\n print(\"{} does not exist. Please unzip data first or ask authors for graphs with depth > 1.\")\n sys.exit()\n\n graphs_raw[split] = json.load(open(file_path, \"r\"))\n\n # process graphs\n tokenizer = transformers.GPT2Tokenizer.from_pretrained(\"gpt2\")\n\n if args.dataset == \"komodis\":\n dataset_helper = Komodis(tokenizer=tokenizer)\n elif args.dataset == \"opendialkg\":\n dataset_helper = OpenDialKG(tokenizer=tokenizer)\n else:\n print(\"Argument dataset={} is not valid!\".format(args.dataset))\n sys.exit()\n\n graphs_processed = {}\n for split in [\"train\", \"valid\", \"test\"]:\n graphs_processed[split] = {k: dataset_helper.process_subgraph(subgraph=v, encoding=args.encoding)\n for k, v in graphs_raw[split].items()}\n\n # save graphs\n if not os.path.exists(\"data/processed\"):\n os.mkdir(\"data/processed/\")\n with open(\"data/processed/{}_graphs_d{}_{}-enc.pkl\".format(args.dataset, args.depth, args.encoding), \"wb\") as f:\n pickle.dump(graphs_processed, f)\n\n\nif __name__ == \"__main__\":\n run()\n", "id": "10821370", "language": "Python", "matching_score": 2.112048864364624, "max_stars_count": 3, "path": "process_graphs.py" }, { "content": "\"\"\" Script to run the training.\n\"\"\"\nimport os\nimport sys\nimport torch\nfrom argparse import ArgumentParser\n\nfrom model import komodis\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--dataset\", type=str)\n parser.add_argument(\"--path_to_data\", type=str, default=\"data/\", help=\"Directory of the dataset\")\n parser.add_argument(\"--model_checkpoint\", type=str, default=\"gpt\",\n help=\"Short name of the model\")\n parser.add_argument(\"--num_candidates\", type=int, default=2, help=\"Number of candidates for training\")\n parser.add_argument(\"--max_history\", type=int, default=2, help=\"Number of previous exchanges to keep in history\")\n parser.add_argument(\"--train_batch_size\", type=int, default=4, help=\"Batch size for training\")\n parser.add_argument(\"--valid_batch_size\", type=int, default=4, help=\"Batch size for validation\")\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=8,\n help=\"Accumulate gradients on several steps\")\n parser.add_argument(\"--lr\", type=float, default=6.25e-5, help=\"Learning rate\")\n parser.add_argument(\"--lm_coef\", type=float, default=1.0, help=\"LM loss coefficient\")\n parser.add_argument(\"--mc_coef\", type=float, default=1.0, help=\"Multiple-choice loss coefficient\")\n parser.add_argument(\"--max_norm\", type=float, default=1.0, help=\"Clipping gradient norm\")\n parser.add_argument(\"--n_epochs\", type=int, default=3, help=\"Number of training epochs\")\n parser.add_argument(\"--eval_before_start\", action='store_true',\n help=\"If true start with a first evaluation before training\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n help=\"Device (cuda or cpu)\")\n parser.add_argument(\"--max_input_length\", type=int, default=256, help=\"The maximum length of sequences for \"\n \"training. All samples are padded to that \"\n \"length.\")\n parser.add_argument(\"--attitude_sentences\", action=\"store_true\", help=\"If set, the attitudes are generated as \"\n \"real sentences instead of single tokens.\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"If true only a slice of the data is processed and \"\n \"some samples are displayed on console.\")\n args = parser.parse_args()\n\n # Make sure that the current working directory equals the directory of this script.\n os.chdir(os.path.dirname(__file__))\n\n if args.dataset == \"komodis\":\n trainer = komodis.KomodisTrainer(path_to_pretrained_model=\"data/pretrained_models/gpt2/\",\n path_to_vocab_file=\"data/tokenizers/gpt2-vocab.json\",\n path_to_merges_file=\"data/tokenizers/gpt2-merges.txt\",\n hparams=args)\n else:\n print(\"{} not implemented.\".format(args.dataset))\n sys.exit()\n\n trainer.train()\n", "id": "866100", "language": "Python", "matching_score": 1.996037244796753, "max_stars_count": 4, "path": "train.py" } ]
2.788657
hse-econ-data-science
[ { "content": "import telebot\nfrom telebot import types\nimport requests\n\ntoken = \"<KEY>\"\n\nbot = telebot.TeleBot(token)\n\n# напишем, что делать нашему боту при команде старт\[email protected]_handler(commands=['start'])\ndef send_keyboard(message, text=\"Привет, чем я могу тебе помочь?\"):\n \n keyboard = types.ReplyKeyboardMarkup(row_width=1) # наша клавиатура\n itembtn1 = types.KeyboardButton('Сделать красиво') # создадим кнопку\n itembtn2 = types.KeyboardButton('Пока все!')\n keyboard.add(itembtn1, itembtn2) \n\n # пришлем это все сообщением и запишем выбранный вариант\n msg = bot.send_message(message.from_user.id,\n text=text, reply_markup=keyboard)\n\n # отправим этот вариант в функцию, которая его обработает\n bot.register_next_step_handler(msg, callback_worker)\n \n\ndef get_infa(num):\n url = f\"https://api.isevenapi.xyz/api/iseven/{num}\"\n res = requests.get(url)\n infa = res.json()\n return infa['ad'], infa['iseven']\n\n\ndef send_number(msg):\n try:\n ad, res = get_infa(msg.text)\n except:\n ad, res = 'error', 'error'\n \n bot.send_message(msg.chat.id, ad)\n bot.send_message(msg.chat.id, res)\n bot.send_message(msg.chat.id, 'Покупай премиум: https://isevenapi.xyz/')\n \n send_keyboard(msg, \"Чем еще могу помочь?\")\n\n# привязываем функции к кнопкам на клавиатуре\ndef callback_worker(call):\n \n if call.text == 'Сделать красиво':\n msg = bot.send_message(call.chat.id, 'Напиши число между 0 и 999 999 в чат.')\n bot.register_next_step_handler(msg, send_number)\n\n\n elif call.text == \"Пока все!\":\n bot.send_message(call.chat.id, 'Хорошего дня! Когда захотите продолжнить нажмите на команду /start')\n \n \[email protected]_handler(content_types=['text'])\ndef handle_docs_audio(message):\n send_keyboard(message, text=\"Я не понимаю :-( Выберите один из пунктов меню:\")\n \n \nbot.polling(none_stop=True)\n\n", "id": "5459310", "language": "Python", "matching_score": 0, "max_stars_count": 9, "path": "sem14_bot/main_201&204.py" } ]
0
ItokawaK
[ { "content": "import re\n\nclass Alignment():\n def __init__(self, sam_line):\n sam_line_split = sam_line.split('\\t')\n self.read_name = sam_line_split[0]\n self.flag = int(sam_line_split[1])\n self.ref_name = sam_line_split[2]\n self.ref_start = int(sam_line_split[3]) - 1 # SAM has 1-based coordinate\n self.mapq = int(sam_line_split[4])\n self.cigar = sam_line_split[5]\n self.next_ref_name = sam_line_split[6]\n self.next_ref_start = int(sam_line_split[7]) - 1\n self.fragment_len = int(sam_line_split[8])\n self.read_seq = sam_line_split[9]\n self.read_q = sam_line_split[10]\n self.opt_tags = sam_line_split[11:]\n\n cigar_sub = [\n (int(tmp[:-1]), tmp[-1])\n for tmp in re.findall(r'\\d+\\w', self.cigar)\n ]\n self.cigar_expand = \"\".join([\n tmp[1] * tmp[0]\n for tmp in cigar_sub\n ])\n\n if self.flag & 64:\n self.read_order = 0\n if self.flag & 128:\n self.read_order = 1\n if self.flag & 16:\n self.strand = '-'\n else:\n self.strand = '+'\n\n self.read2refcorr = []\n\n walk = 0\n for cigletter in self.cigar_expand:\n if cigletter in \"S\":\n self.read2refcorr.append(\"S\")\n if cigletter in 'MN=X':\n self.read2refcorr.append(self.ref_start + walk)\n walk += 1\n if cigletter in 'D':\n walk += 1\n if cigletter in 'I':\n self.read2refcorr.append(self.ref_start + walk)\n\n self.ref_end = self.ref_start + walk\n\n def get_samline():\n pass\n", "id": "8866554", "language": "Python", "matching_score": 2.268031120300293, "max_stars_count": 5, "path": "tools/trim_primers/Alignment.py" }, { "content": "import Alignment\n\nclass Fragment():\n '''\n This object represents a molecule inserted between the adapters.\n '''\n def __init__(self, alignment_l, alignment_r):\n\n self.alignments = (alignment_l, alignment_r)\n self.read_name = alignment_l.read_name\n\n self.ref_start = alignment_l.ref_start\n self.ref_end = alignment_r.ref_end\n self.size = self.ref_end - self.ref_start\n self.left_trimmed = 0\n self.right_trimmed = 0\n\n def get_fastqlines(self):\n '''\n Returns a list of sequences and\n qualities of both reads.\n '''\n\n def revcomp_dna(DNA_SEQ):\n comp_base_dict = {\"A\":\"T\", \"T\":\"A\",\n \"G\":\"C\", \"C\":\"G\",\n \"N\":\"N\"}\n return \"\".join([comp_base_dict[letter] for letter in DNA_SEQ[::-1]])\n\n out_list = [None, None]\n\n for ali in self.alignments:\n if ali.strand == \"+\":\n seq = ali.read_seq\n qual = ali.read_q\n else:\n seq = revcomp_dna(ali.read_seq)\n qual = ali.read_q[::-1]\n\n out_list[ali.read_order] = [\"@\" + self.read_name,\n seq,\n \"+\",\n qual]\n return out_list\n\n def slice(self, range):\n '''\n Slices the fragment at the pos.\n cut_pos value represents the 0-based index of the left most base of the\n right part.\n Returns tuple for clipped lengths on read1 and 2.\n '''\n\n if range == None:\n return 0\n\n direction = range[2]\n\n if direction == \"+\":\n cut_pos = range[1]\n else:\n cut_pos = range[0]\n\n if not (self.ref_start < cut_pos and cut_pos <= self.ref_end):\n return 0\n\n def walk_along(alignment, cut_pos):\n walk = 0\n\n for pos in alignment.read2refcorr:\n if pos == \"S\":\n walk += 1\n elif pos >= cut_pos:\n break\n else:\n walk += 1\n return walk\n\n walk0 = walk_along(self.alignments[0], cut_pos)\n walk1 = walk_along(self.alignments[1], cut_pos)\n\n if direction == \"+\":\n self.left_trimmed = (walk0, walk1)\n if direction == \"-\":\n self.right_trimmed = (len(self.alignments[0].read_seq) - walk0,\n len(self.alignments[1].read_seq) - walk1)\n\n if direction == \"+\":\n self.alignments[0].read_seq = self.alignments[0].read_seq[walk0:]\n self.alignments[0].read_q = self.alignments[0].read_q[walk0:]\n self.alignments[0].read2refcorr = self.alignments[0].read2refcorr[walk0:]\n self.alignments[1].read_seq = self.alignments[1].read_seq[walk1:]\n self.alignments[1].read_q = self.alignments[1].read_q[walk1:]\n self.alignments[1].read2refcorr = self.alignments[1].read2refcorr[walk1:]\n\n if direction == \"-\":\n self.alignments[0].read_seq = self.alignments[0].read_seq[:walk0]\n self.alignments[0].read_q = self.alignments[0].read_q[:walk0]\n self.alignments[0].read2refcorr = self.alignments[0].read2refcorr[:walk0]\n self.alignments[1].read_seq = self.alignments[1].read_seq[:walk1]\n self.alignments[1].read_q = self.alignments[1].read_q[:walk1]\n self.alignments[1].read2refcorr = self.alignments[1].read2refcorr[:walk1]\n", "id": "9478710", "language": "Python", "matching_score": 1.8735973834991455, "max_stars_count": 5, "path": "tools/trim_primers/Fragment.py" }, { "content": "#!/usr/bin/env python3\n\nimport sys\nimport argparse\nimport re\nimport gzip\n\nfrom Alignment import Alignment\nfrom Fragment import Fragment\nfrom Primer_range import Primer_range\n\nparser = argparse.ArgumentParser(description='This tool trims portion of primer from aligned reads. '\n 'Input name sorted sam or output of the bwa mem directry by PIPE.')\nparser.add_argument('primer_bed', help='bed file describing primer coorinates')\nparser.add_argument('fastq_1', help='name of output fastq file 1')\nparser.add_argument('fastq_2', help='name of output fastq file 2')\nparser.add_argument('--gzip', action='store_true', help='gzip output fastq files. Adds .gz extention automatically.')\nparser.add_argument('--verbose', action='store_true', help='output detail infomation for debugging')\n\n\nargs = parser.parse_args()\n\nBED_FILE = args.primer_bed\n\nprimer_range = Primer_range(BED_FILE)\n\n# [+strand alignment, -strand alignment]\nalignment_bucket = [None, None]\n\nout_buffer1 = []\nout_buffer2 = []\ncnt = 0\ncurrent_read = 'default' # Current read name\nfor sam_line in sys.stdin:\n # Skip header\n if sam_line.startswith('@'):\n continue\n\n alignment = Alignment(sam_line.rstrip())\n\n # Initialize alignment backet\n if current_read != alignment.read_name:\n alignment_bucket = [None, None]\n current_read = alignment.read_name\n\n # Skip non-primary and supplemental alignments\n if alignment.flag & (256 + 2048):\n continue\n\n if alignment.strand == '+':\n alignment_bucket[0] = alignment\n elif alignment.strand == '-':\n alignment_bucket[1] = alignment\n\n # process if the bucket is full\n if alignment_bucket[0] and alignment_bucket[1]:\n\n fragment = Fragment(alignment_bucket[0], alignment_bucket[1])\n\n # Check if fragment ends are conteined in a primer region\n # Return range if True, None otherwise.\n range_left = primer_range.is_contained(fragment.ref_start, 'left')\n range_right = primer_range.is_contained(fragment.ref_end, 'right')\n\n # Chop ends of the fragment overlapping to primer\n fragment.slice(range_left)\n fragment.slice(range_right)\n\n if args.verbose:\n sys.stderr.write(current_read + ':')\n sys.stderr.write(' Fragment interval: {}-{}\\n'.format(fragment.ref_start, fragment.ref_end))\n sys.stderr.write(' Left part overlapped with: {}\\n'.format(range_left))\n sys.stderr.write(' Right part overlapped with: {}\\n'.format(range_right))\n sys.stderr.write(' Left clipped: {}\\n'.format(fragment.left_trimmed))\n sys.stderr.write(' Right clipped: {}\\n'.format(fragment.right_trimmed))\n\n # Get fasta string list\n fastq_lines = fragment.get_fastqlines()\n\n # Create fasta string\n out_read1 = \"\\n\".join(fastq_lines[0]) + \"\\n\"\n out_read2 = \"\\n\".join(fastq_lines[1]) + \"\\n\"\n\n # Push fasta string to output buffer\n if args.gzip:\n out_read1 = out_read1.encode()\n out_read2 = out_read2.encode()\n\n out_buffer1.append(out_read1)\n out_buffer2.append(out_read2)\n\n# Write fasta\nif args.gzip:\n f1 = gzip.open(args.fastq_1 + '.gz', 'wb', compresslevel=3)\n f2 = gzip.open(args.fastq_2 + '.gz', 'wb', compresslevel=3)\n f1.write(b\"\".join(out_buffer1))\n f2.write(b\"\".join(out_buffer2))\nelse:\n f1 = open(args.fastq_1, 'w')\n f2 = open(args.fastq_2, 'w')\n f1.write(\"\".join(out_buffer1))\n f2.write(\"\".join(out_buffer2))\n\nf1.close()\nf2.close()\n", "id": "1250532", "language": "Python", "matching_score": 3.125544786453247, "max_stars_count": 5, "path": "tools/trim_primers/trim_primer_parts.py" }, { "content": "#!/usr/bin/env python\n\ntry:\n import matplotlib\n matplotlib.use('Agg')\nfinally:\n import matplotlib.pyplot as plt\n import matplotlib.lines as lines\n import matplotlib.patches as patches\n from matplotlib.ticker import NullFormatter\n\nimport subprocess\nimport sys\nimport os\nimport pandas as pd\nimport numpy as np\nimport re\nfrom collections import defaultdict\nfrom concurrent.futures import ProcessPoolExecutor\nimport signal\nimport matplotlib.transforms as transforms\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\ncolor_scheme = {'plot_fc': 'gray',\n 'primerP1_fc':'#ff5353',\n 'primerP2_fc':'#5fefc7',\n 'gene_fc':'#ffe17f',\n 'mismatch_normal':'#ffe17f',\n 'mismatch_primer':'red',\n }\n\nCDS_DATA = (\n (\"orf1a\", 265, 13468), # (product, start offset, end offset)\n (\"orf1b\", 13467, 21555),\n (\"S\" , 21562, 25384),\n (\"ORF3a\", 25392, 26220),\n (\"E\" , 26244, 26472),\n (\"M\" , 26522, 27191),\n (\"ORF6\" , 27201, 27387),\n (\"ORF7a\", 27393, 27759),\n (\"ORF8\" , 27893, 28259),\n (\"N\" , 28273, 29533),\n (\"ORF10\", 29557, 29674)\n)\n\n\ndef translate(seq):\n TRANS_CODE = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',\n 'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W'\n }\n\n aa_seq = ''\n if len(seq) < 3:\n return ''\n while len(seq) >= 3:\n aa_seq += TRANS_CODE[seq[0:3]]\n seq = seq[3:]\n return aa_seq\n\n\n# samtools depth analysis runner\ndef filter_softclipped(bam_file):\n p0 = subprocess.Popen(['samtools','view','-h', bam_file],\n stdout = subprocess.PIPE)\n p1 = subprocess.Popen(['awk', '/^@/ || $6!~/S/'],\n stdin = p0.stdout,\n stdout = subprocess.PIPE)\n\n return p1\n\ndef samtools_depth(bam_file, remove_softclipped=False, min_readlen=0):\n\n if remove_softclipped:\n p1 = filter_softclipped(bam_file)\n p2 = subprocess.Popen(['samtools','depth','-a', '-l', str(min_readlen),'-'],\n stdin = p1.stdout,\n stdout = subprocess.PIPE)\n else:\n p2 = subprocess.Popen(['samtools','depth','-a','-l', str(min_readlen), bam_file],\n stdout = subprocess.PIPE)\n\n out = p2.communicate()[0]\n out = [l.split('\\t') for l in out.decode().rstrip().split('\\n')]\n try:\n out_tbl = pd.DataFrame({'POS': [int(i[1]) for i in out],\n 'DEPTH': [int(i[2]) if int(i[2]) > 0 else 0.9 for i in out]\n })\n except:\n print(f'No mapped read in {bam_file}', file=sys.stderr)\n return None\n\n return out_tbl\n\n# samtools stats analysis runner\ndef samtools_stats(bam_file):\n p1 = subprocess.Popen(['samtools','stats', bam_file],\n stdout = subprocess.PIPE)\n\n out= p1.communicate()[0]\n total_l = 0\n\n for l in out.decode().rstrip().split(\"\\n\"):\n l = l.split('\\t')\n if l[0] != 'SN':\n continue\n if l[1] == 'total length:':\n total_l = int(l[2])\n elif l[1] == 'percentage of properly paired reads (%):':\n per_paired = float(l[2]) / 100\n\n return((total_l, per_paired))\n\nclass Mismatch:\n\n cds = CDS_DATA\n refseq_str = None\n\n def __init__(self, pos, refbase, altbase, count, type):\n self.pos = pos\n self.refbase = refbase\n self.altbase = altbase\n self.count = count\n self.type = type # snp, del. ins\n\n @property\n def show_mismatch(self):\n\n MAX_CHAR = 20\n if self.type == 'snp':\n return f'{self.refbase}{self.pos}{self.altbase}'\n elif self.type == 'del':\n if len(self.altbase) < MAX_CHAR:\n _alt = self.altbase\n else:\n _alt = self.altbase[0:MAX_CHAR] + '...'\n return f'del_{self.pos}{_alt}'\n elif self.type == 'ins':\n if len(self.altbase) < MAX_CHAR:\n _alt = self.altbase\n else:\n _alt = self.altbase[0:MAX_CHAR] + '...'\n return f'ins_{self.pos}{_alt}'\n @property\n def length(self):\n return len(self.altbase)\n\n @property\n def annotation(self):\n\n gene = None\n aa_pos = None\n\n offset_v = self.pos - 1\n if self.type == 'snp':\n for g, s, e in __class__.cds:\n if s < self.pos < e:\n gene = g\n gene_seq = __class__.refseq_str[s:e]\n q, mod = divmod(offset_v - s, 3)\n aa_pos = q + 1\n codon_offset = s + q*3\n triplet = list(gene_seq[(q*3):(q*3 + 3)])\n ref_codon = ''.join(triplet)\n ref_aa = translate(ref_codon)\n triplet[mod] = self.altbase\n sample_codon = ''.join(triplet)\n sample_aa = translate(sample_codon)\n if ref_aa != sample_aa:\n return f'{gene}:{ref_aa}{aa_pos}{sample_aa}'\n\n if self.type == 'del':\n for g, s, e in __class__.cds:\n if s < self.pos < e:\n gene = g\n gene_seq = __class__.refseq_str[s:e]\n q1, mod1 = divmod(offset_v - s, 3)\n aa_pos = q1 + 1\n q2, mod2 = divmod(offset_v + self.length - s, 3)\n if mod2 == 0:\n add = 0\n else:\n add = 3\n triplets = list(gene_seq[(q1*3):(q2*3 + add)])\n ref_codon = ''.join(triplets)\n deleted_aa = translate(ref_codon)\n if (self.length % 3) != 0:\n return 'FrameShifting'\n else:\n l_remained = triplets[:mod1]\n if mod2 == 0:\n r_remained = ['']\n else:\n r_remained = triplets[-(3-mod2):]\n\n remained = ''.join(l_remained + r_remained)\n\n remained_aa = translate(remained)\n\n\n return f'{gene}:{deleted_aa}{aa_pos}{remained_aa}'\n\n if self.type == 'ins':\n for g, s, e in __class__.cds:\n if s <= self.pos < e:\n gene = g\n gene_seq = __class__.refseq_str[s:e]\n q, mod = divmod(offset_v - s, 3)\n aa_pos = q + 1\n if (self.length % 3) != 0:\n return 'FrameShifting'\n\n if mod == 0:\n ref_aa = ''\n sample_aa = translate(self.altbase)\n aa_pos = f'{aa_pos}^{aa_pos+1}'\n else:\n triplet = list(gene_seq[(q*3):(q*3 + 3)])\n ref_codon = ''.join(triplet)\n ref_aa = translate(ref_codon)\n triplet[mod:mod] = list(self.altbase)\n sample_codon = ''.join(triplet)\n sample_aa = translate(sample_codon)\n\n return f'{gene}:{ref_aa}{aa_pos}{sample_aa}'\n\n return None\n\n# samtools mpileup runner\ndef samtools_mpileup(bam_file, ref_fa, threashold=0.8):\n\n def readbase_parser(mpileup_line):\n read_base_str = mpileup_line[4]\n pos = int(mpileup_line[1])\n refbase = mpileup_line[2]\n depth = int(mpileup_line[3])\n MIN_NUM_MISMATH = 2\n MIN_MISMATH_RATIO = threashold\n MIN_INDEL_RATIO = threashold * 0.9\n\n i = 0\n # split_bases = []\n mismatches = defaultdict(int)\n indels = defaultdict(int)\n while i < len(read_base_str):\n first_char = read_base_str[i]\n if first_char in 'n.,*$':\n i += 1\n elif first_char in 'ATGCatgc':\n # split_bases.append(first_char.upper())\n mismatches[first_char.upper()] += 1\n i += 1\n elif first_char == '^':\n i += 2\n # split_bases.append(read_base_str[i])\n # elif read_base_str[i] == '$':\n # i += 1\n elif read_base_str[i] in '+-':\n indel_type, indel_len_str = re.findall('^([\\+-])(\\d+)', read_base_str[i:])[0]\n i += 1\n i += len(indel_len_str)\n indel_seq = read_base_str[i:(i + int(indel_len_str))]\n if indel_type == '-':\n indel_str = 'del_' + indel_seq.upper()\n elif indel_type == '+':\n indel_str = 'ins_' + indel_seq.upper()\n indels[indel_str] += 1\n i += int(indel_len_str)\n else:\n print('Unknown char: ' + read_base_str[i], file = sys.stderr)\n print('Unknown char: ' + read_base_str, file = sys.stderr)\n # return split_bases\n\n # return split_bases\n\n out_mismatch = None\n out_indel = None\n if mismatches:\n max_mismatch = max([(cnt, base) for base, cnt in mismatches.items()])\n mismatch_cnt = max_mismatch[0]\n altbase = max_mismatch[1]\n if (mismatch_cnt >= MIN_NUM_MISMATH) and (mismatch_cnt > (depth * MIN_MISMATH_RATIO)):\n out_mismatch = Mismatch(pos, refbase, altbase, mismatch_cnt, 'snp')\n # else:\n # out_mismatch = None\n\n if indels:\n max_indel = max([(cnt, base) for base, cnt in indels.items()])\n indel_cnt = max_indel[0]\n indel_str = max_indel[1]\n if (indel_cnt >= MIN_NUM_MISMATH) and (indel_cnt > (depth * MIN_INDEL_RATIO)):\n indel_type, indel_seq = indel_str.split(\"_\")\n out_indel = Mismatch(pos+1, refbase, indel_seq, indel_cnt, indel_type)\n\n return (out_mismatch, out_indel)\n\n\n p1 = subprocess.Popen(['samtools','mpileup', '-B', '-f', ref_fa, '-ax', bam_file],\n stdout = subprocess.PIPE)\n out = p1.communicate()[0]\n out = [l.split('\\t') for l in out.decode().rstrip().split('\\n')]\n\n # Return none if there is no output from mpileup\n if not out:\n print(f'No mapped read in {bam_file}', file=sys.stderr)\n return None\n #return out\n try:\n out_tbl = pd.DataFrame({'POS': [int(row[1]) for row in out],\n 'REF_BASE': [row[2] for row in out],\n 'DEPTH': [int(row[3]) if int(row[3]) > 0 else 0.9 for row in out],\n 'READ_BASE': [row[4] for row in out],\n 'MISMATCHES': [readbase_parser(row) for row in out]\n })\n except:\n print(f'No mapped read in {bam_file}', file=sys.stderr)\n return None\n\n return out_tbl\n\n# Adding lines for mismatches in plot\n# Detecting mismatches contained in primer region\ndef add_mismatch(tbl,\n ax,\n threashold=0.8,\n primer_bed=None,\n seq_name=None,\n refseq_vector=None,\n only_primer_mismatch=False):\n\n # MIN_NUM_MISMATH = 2\n\n if primer_bed:\n df = pd.read_csv(primer_bed, sep='\\t', header = None)\n def is_contained(pos, df):\n for index, row in df.iterrows():\n start = row[1]\n end = row[2]\n # if pos -1 < start:\n # return False\n if pos -1 >= start and pos <= end:\n return True\n\n return False\n\n xy = []\n count = 0\n labe_y_bin = 10\n for index, row in tbl.iterrows():\n mismatch_obj = row['MISMATCHES'][0]\n indel_obj = row['MISMATCHES'][1]\n\n if mismatch_obj:\n x = mismatch_obj.pos\n y = mismatch_obj.count\n mismatch_str = mismatch_obj.show_mismatch\n ano = mismatch_obj.annotation\n if ano:\n mismatch_str += f'({ano})'\n\n col = color_scheme['mismatch_normal']\n mismatch_on_primer = False\n if primer_bed and is_contained(x, df):\n col = color_scheme['mismatch_primer']\n mismatch_on_primer = True\n\n if only_primer_mismatch and not mismatch_on_primer:\n pass\n else:\n ax.plot([x,x],[1,y],\n color = col,\n linewidth=0.5,\n zorder= 120)\n # Adding a label for the mismatch base and position\n mismatch_label_y = 10**((np.log10(y)/labe_y_bin) * (count % labe_y_bin + 1))\n ax.text(x=x,\n y=mismatch_label_y,\n s=mismatch_str,\n fontsize=2,\n zorder=120,\n alpha=0.8,\n color='0')\n count += 1\n\n if indel_obj:\n x = indel_obj.pos\n y = indel_obj.count\n mismatch_str = indel_obj.show_mismatch\n ano = indel_obj.annotation\n if ano:\n mismatch_str += f'({ano})'\n\n col = 'blue'\n mismatch_on_primer = False\n if primer_bed and is_contained(x, df):\n col = color_scheme['mismatch_primer']\n mismatch_on_primer = True\n\n if only_primer_mismatch and not mismatch_on_primer:\n pass\n else:\n r = patches.Rectangle(xy=(x, 1),\n width=indel_obj.length,\n height = y,\n fc=col,\n ec=col,\n linewidth=0.8,\n zorder=120)\n ax.add_patch(r)\n\n # Adding a label for the mismatch base and position\n mismatch_label_y = 10**((np.log10(y)/labe_y_bin) * (count % labe_y_bin + 1))\n ax.text(x=x,\n y=mismatch_label_y,\n s=mismatch_str,\n fontsize=2,\n zorder=120,\n alpha=0.7,\n color='0')\n count += 1\n\ndef mutate_genome(seq_name, refseq, tbl, min_depth=10):\n MIN_DEPTH_CONSENSUS = min_depth\n\n refseq_vector = list(refseq)\n\n _tbl = tbl.sort_values(by='POS', ascending=False)\n for index, row in _tbl.iterrows():\n mismatch_obj = row['MISMATCHES'][0]\n indel_obj = row['MISMATCHES'][1]\n\n if row.DEPTH < MIN_DEPTH_CONSENSUS:\n try:\n refseq_vector[row.POS - 1] = 'N'\n except:\n pass\n\n continue\n\n if mismatch_obj:\n idx = (mismatch_obj.pos - 1)\n refseq_vector[idx] = mismatch_obj.altbase\n if indel_obj:\n idx = (indel_obj.pos - 1)\n if indel_obj.type == 'del':\n del refseq_vector[idx:(idx + len(indel_obj.altbase))]\n if indel_obj.type == 'ins':\n refseq_vector[idx:idx] = indel_obj.altbase\n\n fasta_writer(seq_name,''.join(refseq_vector))\n # return ''.join(refseq_vector)\n\n\n# Adding gene boxes\ndef add_genes(ax):\n gene_tbl = pd.DataFrame(\n {\n 'Name':[\"ORF1ab\", \"S\", \"ORF3a\" , \"E\", \"M\", \"ORF6\", \"ORF7a\" , \"ORF8\", \"N\", \"ORF10\"],\n 'Start':[265,21562,25392,26244,26522,27201,27393,27893,28273,29557],\n 'End':[21555,25384,26220,26472,27191,27387,27759,28259,29533,29674]\n }\n )\n\n for index, row in gene_tbl.iterrows():\n g_start = row['Start']\n g_end = row['End']\n g_name = row['Name']\n ymin = 1/6 * 0.2\n height = 1/6 * 0.15\n if index % 2 == 1:\n ymin += 1/6 *0.05\n text_y = ymin + height\n text_va = 'bottom'\n else:\n text_y = ymin - 1/6 * 0.02\n text_va = 'top'\n\n trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)\n\n r = patches.Rectangle(xy=(g_start, ymin),\n width= g_end - g_start,\n height = height,\n fc=color_scheme['gene_fc'],\n ec='k',\n linewidth=0.5,\n zorder=101,\n transform=trans)\n ax.add_patch(r)\n ax.text(x=(g_start + g_end)/2,\n y=text_y,\n s=g_name,\n ha='center',\n va=text_va,\n fontsize=3,\n weight='bold',\n zorder=102,\n transform=trans\n )\n\ndef add_amplicons(primer_bed, ax, highlights=[], ymax=1/6 *0.92, ymin=1/6*0.55):\n\n df = pd.read_csv(primer_bed, sep='\\t', header = None)\n\n n_primer = len(df)\n\n # Primer regions\n amplicon_dict = {}\n for index, row in df.iterrows():\n start = row[1]\n end = row[2]\n primer_name = row[3]\n prefix, amplicom_id, primer_direction, *_ = primer_name.split('_')\n if amplicom_id not in amplicon_dict:\n amplicon_dict[amplicom_id] = {}\n amplicon_dict[amplicom_id]['start'] = 0\n amplicon_dict[amplicom_id]['end'] = 0\n if 'LEFT' in primer_direction:\n amplicon_dict[amplicom_id]['start'] = start\n else:\n amplicon_dict[amplicom_id]['end'] = end\n\n amplicon_ids = amplicon_dict.keys()\n\n '''\n ______ __ top2\n | 2 |\n |____| __ base2\n ______ __ top1\n | 3 |\n |____| __ base1\n '''\n\n base1 = ymin\n top1 = (ymax + ymin)/2\n base2 = (ymax + ymin)/2\n top2 = ymax\n\n trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)\n\n for id in amplicon_ids:\n id_num = int(id)\n\n if id_num % 2 == 1:\n base = base1\n top = top1\n h = top1 - base1\n fc = color_scheme['primerP1_fc']\n else:\n base = base2\n top = top2\n h = top2 - base2\n fc = color_scheme['primerP2_fc']\n\n x1 = amplicon_dict[id]['start']\n x2 = amplicon_dict[id]['end']\n\n r = patches.Rectangle(xy=(x1, base),\n width=x2-x1,\n height=h,\n linewidth = 0.5,\n fc=fc,\n ec='k',\n zorder=101,\n transform = trans)\n ax.add_patch(r)\n ax.text(x=(x1+x2)/2,\n y=(base+top)/2,\n s=id,\n ha='center',\n va='center',\n fontsize=3.5,\n weight='bold',\n zorder=102,\n transform= trans)\n\n if id in highlights:\n\n r = patches.Rectangle(xy=(x1, 1/6),\n width=x2-x1,\n height=5/6,\n linewidth = 0,\n fc=fc,\n alpha=0.5,\n zorder=50,\n transform = trans)\n ax.add_patch(r)\n\n if id_num%2==1:\n y = 0.97\n else:\n y = 1.0\n\n ax.text(\n x=(x1+x2)/2,\n y=y,\n s=id,\n ha='center',\n va='top',\n fontsize=5,\n weight='bold',\n zorder=55,\n transform=trans\n )\n\ndef set_plot_area(ax, max_hight=10000):\n\n # Setting x and y labels\n ax.set_ylabel('Depth')\n ax.set_xlabel('Genome position nt')\n\n ax.axhline(1, color='k', linewidth=0.5, zorder=102) # line at depth=1\n\n # Ticks\n #ymax = np.ceil(tbl['DEPTH'].max() / 10000) * 10000\n\n ax.set_xlim(0, 30000)\n ax.set_xticks([i * 1000 for i in range(1,30)], minor=True)\n ax.set_xticks([i * 5000 for i in range(7)])\n ax.set_xticklabels([str(i * 5000) for i in range(7)], fontsize='8')\n\n ax.set_yscale('log')\n ymin = 10**-(np.log10(max_hight)/5)\n # For linear scale\n # ymin = -(max_hight)/5\n\n ax.set_ylim(ymin, max_hight)\n ax.yaxis.set_major_formatter(NullFormatter())\n ax.yaxis.set_minor_formatter(NullFormatter())\n\n y_major_ticks = [el for el in ax.get_yticks() if el >=1 and el <= max_hight]\n y_minor_ticks = [el for el in ax.get_yticks(minor=True) if el >1 and el < max_hight]\n ax.set_yticks(y_major_ticks)\n ax.set_yticks(y_minor_ticks, minor=True)\n ax.set_yticklabels([str(int(el)) for el in y_major_ticks])\n\n # ax.set_ylim(ymin, max_hight)\n #\n # ax.set_yticks([i * ii for ii in (1,10,100,1000) for i in range(1,11)],\n # minor=True)\n # ax.set_yticks([1, 10, 100, 1000, 10000])\n # ax.set_yticklabels([str(i) for i in (1,10,100,1000,10000)], fontsize='8')\n\ndef fasta_parser(file_path):\n seq = ''\n with open (file_path) as f:\n header = next(f)\n if not header.startswith('>'):\n exit(f'Error: {file_path} does not have a heder line')\n\n for l in f:\n if not l.startswith('>'):\n seq += l.rstrip()\n else:\n break\n return seq\n\ndef fasta_writer(name, inseq):\n print(f'>{name}')\n while inseq:\n print(inseq[0:80])\n inseq = inseq[80:]\n\ndef main(bam_files,\n outpdf,\n primer_bed=None,\n highlight_arg=None,\n fa_file=None,\n num_cpu=1,\n plot_reduction_level=10,\n mismatches_thresh=0.8,\n min_concensus_depth=10,\n remove_softclipped=False,\n min_readlen=0,\n out_consensus=False,\n only_primer_mismatch=False):\n\n if fa_file == None:\n with ProcessPoolExecutor(max_workers = num_cpu) as executor:\n executed1 = [executor.submit(samtools_depth, bam, remove_softclipped, min_readlen) for bam in bam_files]\n else:\n with ProcessPoolExecutor(max_workers = num_cpu) as executor:\n executed1 = [executor.submit(samtools_mpileup, bam, fa_file, threashold=mismatches_thresh) for bam in bam_files]\n\n depth_tbls = [ex.result() for ex in executed1]\n\n with ProcessPoolExecutor(max_workers = num_cpu) as executor:\n executed2 = [executor.submit(samtools_stats, bam) for bam in bam_files]\n\n stats = [ex.result() for ex in executed2]\n\n if highlight_arg:\n highlights=highlight_arg.split(',')\n else:\n highlights=[]\n\n n_sample = len(bam_files)\n\n with PdfPages(outpdf) as pdf:\n if fa_file:\n refseq_str = fasta_parser(fa_file)\n Mismatch.refseq_str = refseq_str\n\n for i in range(n_sample):\n progress_bar = f' [{i+1}/{n_sample}] samples were plotted.'\n sys.stderr.write(f'\\r{progress_bar}')\n sys.stderr.flush()\n\n fig, ax = plt.subplots(1,1, figsize=(8, 3))\n plt.subplots_adjust(right=0.85,\n hspace=0.5,\n bottom=0.5/3,\n top=1-0.5/3)\n\n align_stats = stats[i]\n meta_data = ['Total Seq: {:.1f} Mb'.format(align_stats[0]/1e6),\n 'Paired properly: {:.1%}'.format(align_stats[1])]\n title = os.path.basename(bam_files[i]).rstrip('.bam')\n ax.set_title(title)\n set_plot_area(ax, max_hight=10000)\n tbl = depth_tbls[i]\n\n if tbl is None:\n pdf.savefig()\n plt.close()\n continue\n\n x = list(tbl['POS'])\n y = list(tbl['DEPTH'])\n\n # Data reduction\n x = [_x for i, _x in enumerate(x) if i % plot_reduction_level == 0]\n y = [_y for i, _y in enumerate(y) if i % plot_reduction_level == 0]\n\n x = [np.min(x)] + x + [np.max(x)]\n y = [0.9] + y + [0.9]\n\n ax.fill(x,\n y,\n color_scheme['plot_fc'],\n zorder=52)\n\n # Horizontal line at 10\n ax.axhline(10,\n color='k',\n linestyle=':',\n linewidth=0.8,\n zorder=103)\n\n # Adding supportive data\n ax.text(1.01, 0.7,\n '\\n'.join(meta_data),\n fontsize=5,\n ha='left',\n transform=ax.transAxes)\n\n if primer_bed != None:\n add_amplicons(primer_bed, ax, highlights=highlights)\n add_genes(ax)\n if fa_file != None:\n # refseq_vector = None\n if out_consensus:\n # refseq_vector = list(refseq_str)\n mutate_genome(seq_name=title,\n refseq=refseq_str,\n tbl=tbl,\n min_depth=min_concensus_depth)\n\n add_mismatch(tbl,\n ax,\n primer_bed=primer_bed,\n only_primer_mismatch=only_primer_mismatch)\n\n labels = [item.get_text() for item in ax.get_yticklabels()]\n\n # plt.savefig(outpdf, format='pdf')\n pdf.savefig()\n plt.close()\n\n sys.stderr.write('\\n')\n\nif __name__=='__main__':\n import argparse\n import sys\n import os\n\n _version = 0.12\n\n parser = argparse.ArgumentParser(description='Output depth plot in PDF. Ver: {}'.format(_version))\n parser.add_argument('-i',\n '--bams',\n nargs='*',\n help='Paths for input BAMs')\n parser.add_argument('-o',\n '--out',\n help='Output PDF file name')\n parser.add_argument('-p',\n '--primer', default=None,\n help='Primer regions in BED format [optional]')\n parser.add_argument('-l',\n '--highlights', default=None,\n help='Add highlights on selected amplicons. '\n 'Give amplicon numbers delimited by comma (e.g. 18,76,...) '\n 'Can only be used with the -p --primer option. [optional]')\n parser.add_argument('-r',\n '--ref_fa', default=None,\n help='Reference fasta file [optional]')\n parser.add_argument('-t',\n '--threads', default=1, type=int,\n help='Num tasks to process concurrently [optional]')\n parser.add_argument('-m',\n '--mismatches_thresh', default=0.8, type=float,\n help='Show mismatches higher than this ratio (default=0.8). '\n 'Only effective with the -r option [optional]')\n parser.add_argument('-s',\n '--ignore_softclipped', action='store_true',\n help='Ignore softclipped reads (default=False). [optional]')\n parser.add_argument('--min_readlen', default=0, type=int,\n help='Minumum length of read (default=0). [optional]')\n parser.add_argument('--skip_level', default=10, type=int,\n help='Plot depths at every n (1-50) bases. (default=10). '\n 'Setting this a larger value makes file size smaller '\n 'with reduced resolution [optional]')\n parser.add_argument('--dump_consensus', action='store_true',\n help='Output consensus to STDOUT. Experimental.')\n parser.add_argument('--min_concensus_depth', default=10, type=int,\n help='Min depth to show consensus (default=10).')\n parser.add_argument('--only_primer_mismatch', action='store_true',\n help='Show only primer mismatch')\n\n args = parser.parse_args()\n\n if not args.out:\n sys.exit('-o (--out) option is mandate')\n\n if not args.bams:\n sys.exit('-i (--bams) option is mandate')\n\n if args.highlights and not args.primer:\n sys.exit('-l can be used only with the -p (--primer) option')\n\n for file in args.bams:\n if not os.path.isfile(file):\n sys.exit('{} was not found'.format(file))\n\n if args.primer and not os.path.isfile(args.primer):\n sys.exit('{} was not found'.format(args.primer))\n\n if args.ref_fa and not os.path.isfile(args.ref_fa):\n sys.exit('{} was not found'.format(args.ref_fa))\n\n if args.skip_level < 1 or args.skip_level > 50:\n sys.exit('skip_level should be between 1 and 50')\n\n if args.dump_consensus:\n warinig_msg = ' '*20\n warinig_msg += 'WARNIG!!!!: Consensus generation is an experimental function.'\n print('', file=sys.stderr)\n print(warinig_msg, file=sys.stderr)\n print('',file=sys.stderr)\n if not args.ref_fa:\n sys.exit('--dump_consensus can be used only with the -r (--ref_fa) option')\n\n main(args.bams,\n args.out,\n primer_bed=args.primer,\n highlight_arg=args.highlights,\n fa_file=args.ref_fa,\n num_cpu=args.threads,\n mismatches_thresh=args.mismatches_thresh,\n min_concensus_depth=args.min_concensus_depth,\n remove_softclipped=args.ignore_softclipped,\n min_readlen=args.min_readlen,\n plot_reduction_level=args.skip_level,\n out_consensus=args.dump_consensus,\n only_primer_mismatch=args.only_primer_mismatch)\n", "id": "1444020", "language": "Python", "matching_score": 2.38435435295105, "max_stars_count": 5, "path": "tools/plot_depth.py" }, { "content": "class Primer_range():\n def __init__(self, BED_FILE):\n self.ranges_fwd = []\n self.ranges_rev = []\n with open(BED_FILE, 'r') as bed_h:\n for bed_line in bed_h:\n bed_line = bed_line.rstrip()\n bed_fields = bed_line.split('\\t')\n start = int(bed_fields[1])\n end = int(bed_fields[2])\n strand = bed_fields[5]\n if strand == \"+\":\n self.ranges_fwd.append((start, end, strand))\n if strand == \"-\":\n self.ranges_rev.append((start, end, strand))\n\n def is_contained(self, end_pos, end_type):\n '''\n This function return range of primer region containing a given position.\n Returns None if there is no containing interval.\n '''\n if end_type == \"left\":\n for range in self.ranges_fwd:\n\n if range[0] > end_pos:\n return None\n if range[0] <= end_pos and end_pos < range[1]:\n return range\n if end_type == \"right\":\n for range in self.ranges_rev:\n if range[0] > end_pos:\n return None\n if range[0] < end_pos and end_pos <= range[1]:\n return range\n", "id": "2934194", "language": "Python", "matching_score": 0.931438684463501, "max_stars_count": 5, "path": "tools/trim_primers/Primer_range.py" } ]
2.268031
AmanTyagi2432
[ { "content": "from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\nfrom numpy import get_include\n\next_main = Extension('mtLeastR', ['mtLeastR/mtLeastR.pyx', 'mtLeastR/ep21R.c', 'mtLeastR/eppMatrix.c', 'mtLeastR/epph.c'], include_dirs=['.', 'mtLeastR', get_include()])\n\nsetup(ext_modules=cythonize([ext_main]))\n", "id": "5890064", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "setup.py" } ]
0